agent: transfer leadership when establishLeadership fails (#5247)

This commit is contained in:
Hans Hasselberg 2019-06-19 14:50:48 +02:00 committed by GitHub
parent e6419d4c23
commit 0d8d7ae052
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
184 changed files with 21304 additions and 2515 deletions

View File

@ -1131,6 +1131,7 @@ func (a *Agent) consulConfig() (*consul.Config, error) {
}
// Setup the loggers
base.LogLevel = a.config.LogLevel
base.LogOutput = a.LogOutput
// This will set up the LAN keyring, as well as the WAN and any segments

View File

@ -147,6 +147,9 @@ type Config struct {
// leader election.
ReconcileInterval time.Duration
// LogLevel is the level of the logs to write. Defaults to "INFO".
LogLevel string
// LogOutput is the location to write logs to. If this is not set,
// logs will go to stderr.
LogOutput io.Writer

View File

@ -126,6 +126,21 @@ func (s *Server) monitorLeadership() {
}
}
func (s *Server) leadershipTransfer() error {
retryCount := 3
for i := 0; i < retryCount; i++ {
future := s.raft.LeadershipTransfer()
if err := future.Error(); err != nil {
s.logger.Printf("[ERR] consul: failed to transfer leadership attempt %d/%d: %v", i, retryCount, err)
} else {
s.logger.Printf("[ERR] consul: successfully transferred leadership attempt %d/%d", i, retryCount)
return nil
}
}
return fmt.Errorf("failed to transfer leadership in %d attempts", retryCount)
}
// leaderLoop runs as long as we are the leader to run various
// maintenance activities
func (s *Server) leaderLoop(stopCh chan struct{}) {
@ -142,19 +157,6 @@ func (s *Server) leaderLoop(stopCh chan struct{}) {
var reconcileCh chan serf.Member
establishedLeader := false
reassert := func() error {
if !establishedLeader {
return fmt.Errorf("leadership has not been established")
}
if err := s.revokeLeadership(); err != nil {
return err
}
if err := s.establishLeadership(); err != nil {
return err
}
return nil
}
RECONCILE:
// Setup a reconciliation timer
reconcileCh = nil
@ -175,17 +177,22 @@ RECONCILE:
s.logger.Printf("[ERR] consul: failed to establish leadership: %v", err)
// Immediately revoke leadership since we didn't successfully
// establish leadership.
if err := s.revokeLeadership(); err != nil {
s.logger.Printf("[ERR] consul: failed to revoke leadership: %v", err)
s.revokeLeadership()
// attempt to transfer leadership. If successful it is
// time to leave the leaderLoop since this node is no
// longer the leader. If leadershipTransfer() fails, we
// will try to acquire it again after
// 5 seconds.
if err := s.leadershipTransfer(); err != nil {
s.logger.Printf("[ERR] consul: %v", err)
interval = time.After(5 * time.Second)
goto WAIT
}
goto WAIT
return
}
establishedLeader = true
defer func() {
if err := s.revokeLeadership(); err != nil {
s.logger.Printf("[ERR] consul: failed to revoke leadership: %v", err)
}
}()
defer s.revokeLeadership()
}
// Reconcile any missing data
@ -223,7 +230,47 @@ WAIT:
case index := <-s.tombstoneGC.ExpireCh():
go s.reapTombstones(index)
case errCh := <-s.reassertLeaderCh:
errCh <- reassert()
// we can get into this state when the initial
// establishLeadership has failed as well as the follow
// up leadershipTransfer. Afterwards we will be waiting
// for the interval to trigger a reconciliation and can
// potentially end up here. There is no point to
// reassert because this agent was never leader in the
// first place.
if !establishedLeader {
errCh <- fmt.Errorf("leadership has not been established")
continue
}
// continue to reassert only if we previously were the
// leader, which means revokeLeadership followed by an
// establishLeadership().
s.revokeLeadership()
err := s.establishLeadership()
errCh <- err
// in case establishLeadership failed, we will try to
// transfer leadership. At this time raft thinks we are
// the leader, but consul disagrees.
if err != nil {
if err := s.leadershipTransfer(); err != nil {
// establishedLeader was true before,
// but it no longer is since it revoked
// leadership and Leadership transfer
// also failed. Which is why it stays
// in the leaderLoop, but now
// establishedLeader needs to be set to
// false.
establishedLeader = false
interval = time.After(5 * time.Second)
goto WAIT
}
// leadershipTransfer was successful and it is
// time to leave the leaderLoop.
return
}
}
}
}
@ -290,15 +337,13 @@ func (s *Server) establishLeadership() error {
// revokeLeadership is invoked once we step down as leader.
// This is used to cleanup any state that may be specific to a leader.
func (s *Server) revokeLeadership() error {
func (s *Server) revokeLeadership() {
// Disable the tombstone GC, since it is only useful as a leader
s.tombstoneGC.SetEnabled(false)
// Clear the session timers on either shutdown or step down, since we
// are no longer responsible for session expirations.
if err := s.clearAllSessionTimers(); err != nil {
return err
}
s.clearAllSessionTimers()
s.stopConfigReplication()
@ -313,8 +358,8 @@ func (s *Server) revokeLeadership() error {
s.stopACLUpgrade()
s.resetConsistentReadReady()
s.autopilot.Stop()
return nil
}
// DEPRECATED (ACL-Legacy-Compat) - Remove once old ACL compatibility is removed

View File

@ -32,6 +32,7 @@ import (
"github.com/hashicorp/consul/sentinel"
"github.com/hashicorp/consul/tlsutil"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/raft"
raftboltdb "github.com/hashicorp/raft-boltdb"
"github.com/hashicorp/serf/serf"
@ -548,7 +549,13 @@ func (s *Server) setupRaft() error {
// Make sure we set the LogOutput.
s.config.RaftConfig.LogOutput = s.config.LogOutput
s.config.RaftConfig.Logger = s.logger
raftLogger := hclog.New(&hclog.LoggerOptions{
Name: "raft",
Level: hclog.LevelFromString(s.config.LogLevel),
Output: s.config.LogOutput,
TimeFormat: `2006/01/02 15:04:05`,
})
s.config.RaftConfig.Logger = raftLogger
// Versions of the Raft protocol below 3 require the LocalID to match the network
// address of the transport.

View File

@ -963,14 +963,8 @@ func TestServer_RevokeLeadershipIdempotent(t *testing.T) {
testrpc.WaitForLeader(t, s1.RPC, "dc1")
err := s1.revokeLeadership()
if err != nil {
t.Fatal(err)
}
err = s1.revokeLeadership()
if err != nil {
t.Fatal(err)
}
s1.revokeLeadership()
s1.revokeLeadership()
}
func TestServer_Reload(t *testing.T) {

View File

@ -122,9 +122,8 @@ func (s *Server) clearSessionTimer(id string) error {
// clearAllSessionTimers is used when a leader is stepping
// down and we no longer need to track any session timers.
func (s *Server) clearAllSessionTimers() error {
func (s *Server) clearAllSessionTimers() {
s.sessionTimers.StopAll()
return nil
}
// sessionStats is a long running routine used to capture

View File

@ -281,10 +281,7 @@ func TestClearAllSessionTimers(t *testing.T) {
s1.createSessionTimer("bar", 10*time.Millisecond)
s1.createSessionTimer("baz", 10*time.Millisecond)
err := s1.clearAllSessionTimers()
if err != nil {
t.Fatalf("err: %v", err)
}
s1.clearAllSessionTimers()
// sessionTimers is guarded by the lock
if s1.sessionTimers.Len() != 0 {

21
go.mod
View File

@ -9,7 +9,6 @@ replace github.com/hashicorp/consul/sdk => ./sdk
require (
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
github.com/Azure/go-autorest v10.15.3+incompatible // indirect
github.com/DataDog/datadog-go v0.0.0-20160329135253-cc2f4770f4d6 // indirect
github.com/Jeffail/gabs v1.1.0 // indirect
github.com/Microsoft/go-winio v0.4.3 // indirect
github.com/NYTimes/gziphandler v1.0.1
@ -18,16 +17,13 @@ require (
github.com/SermoDigital/jose v0.0.0-20180104203859-803625baeddc // indirect
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310
github.com/asaskevich/govalidator v0.0.0-20180319081651-7d2e70ef918f // indirect
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
github.com/boltdb/bolt v1.3.1 // indirect
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
github.com/circonus-labs/circonus-gometrics v0.0.0-20161109192337-d17a8420c36e // indirect
github.com/circonus-labs/circonusllhist v0.0.0-20161110002650-365d370cc145 // indirect
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect
github.com/coredns/coredns v1.1.2
github.com/denisenkom/go-mssqldb v0.0.0-20180620032804-94c9c97e8c9f // indirect
@ -58,12 +54,11 @@ require (
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de
github.com/hashicorp/go-cleanhttp v0.5.1
github.com/hashicorp/go-discover v0.0.0-20190403160810-22221edb15cd
github.com/hashicorp/go-hclog v0.0.0-20180402200405-69ff559dc25f // indirect
github.com/hashicorp/go-hclog v0.9.1
github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71
github.com/hashicorp/go-msgpack v0.5.4
github.com/hashicorp/go-msgpack v0.5.5
github.com/hashicorp/go-multierror v1.0.0
github.com/hashicorp/go-plugin v0.0.0-20180331002553-e8d22c780116
github.com/hashicorp/go-retryablehttp v0.0.0-20180531211321-3b087ef2d313 // indirect
github.com/hashicorp/go-rootcerts v1.0.0
github.com/hashicorp/go-sockaddr v1.0.0
github.com/hashicorp/go-syslog v1.0.0
@ -76,7 +71,7 @@ require (
github.com/hashicorp/mdns v1.0.1 // indirect
github.com/hashicorp/memberlist v0.1.4
github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69
github.com/hashicorp/raft v1.0.1-0.20190409200437-d9fe23f7d472
github.com/hashicorp/raft v1.1.0
github.com/hashicorp/raft-boltdb v0.0.0-20150201200839-d1e82c1ec3f1
github.com/hashicorp/serf v0.8.2
github.com/hashicorp/vault v0.10.3
@ -89,7 +84,6 @@ require (
github.com/kr/text v0.1.0
github.com/lib/pq v0.0.0-20180523175426-90697d60dd84 // indirect
github.com/lyft/protoc-gen-validate v0.0.0-20180911180927-64fcb82c878e // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/miekg/dns v1.0.14
github.com/mitchellh/cli v1.0.0
github.com/mitchellh/copystructure v0.0.0-20160804032330-cdac8253d00f
@ -103,13 +97,10 @@ require (
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/opencontainers/runc v0.1.1 // indirect
github.com/ory/dockertest v3.3.4+incompatible // indirect
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c
github.com/pascaldekloe/goe v0.1.0
github.com/patrickmn/go-cache v0.0.0-20180527043350-9f6ff22cfff8 // indirect
github.com/pkg/errors v0.8.1
github.com/prometheus/client_golang v0.0.0-20180328130430-f504d69affe1
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 // indirect
github.com/prometheus/common v0.0.0-20180326160409-38c53a9f4bfc // indirect
github.com/prometheus/procfs v0.0.0-20180408092902-8b1c2da0d56d // indirect
github.com/prometheus/client_golang v0.9.2
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f
github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735 // indirect
github.com/shirou/gopsutil v0.0.0-20181107111621-48177ef5f880

30
go.sum
View File

@ -10,6 +10,8 @@ github.com/Azure/go-autorest v10.15.3+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxS
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/datadog-go v0.0.0-20160329135253-cc2f4770f4d6 h1:veThyuVPIg0cAHly135Y+IW2ymFgZ1pftOyAVkqCoi8=
github.com/DataDog/datadog-go v0.0.0-20160329135253-cc2f4770f4d6/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4=
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/Jeffail/gabs v1.1.0 h1:kw5zCcl9tlJNHTDme7qbi21fDHZmXrnjMoXos3Jw/NI=
github.com/Jeffail/gabs v1.1.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc=
github.com/Microsoft/go-winio v0.4.3 h1:M3NHMuPgMSUPdE5epwNUHlRPSVzHs8HpRTrVXhR0myo=
@ -30,6 +32,8 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM=
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20180319081651-7d2e70ef918f h1:/8NcnxL60YFll4ehCwibKotx0BR9v2ND40fomga8qDs=
@ -50,8 +54,12 @@ github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1q
github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/circonus-labs/circonus-gometrics v0.0.0-20161109192337-d17a8420c36e h1:VhMcRhkS/wJM+XfZxNn+tk5EVmF2k19g6yS6uDXHn0o=
github.com/circonus-labs/circonus-gometrics v0.0.0-20161109192337-d17a8420c36e/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.0.0-20161110002650-365d370cc145 h1:cwLvX6r5EOiVmkUYjY+Ev4ZJrkOhex3r+mfeqT8o+8c=
github.com/circonus-labs/circonusllhist v0.0.0-20161110002650-365d370cc145/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M=
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@ -148,12 +156,15 @@ github.com/hashicorp/go-bexpr v0.1.0 h1:hA/9CWGPsQ6YZXvPvizD+VEEjBG4V6Un0Qcyav5g
github.com/hashicorp/go-bexpr v0.1.0/go.mod h1:ANbpTX1oAql27TZkKVeW8p1w8NTdnyzPe/0qqPCKohU=
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de h1:XDCSythtg8aWSRSO29uwhgh7b127fWr+m5SemqjSUL8=
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de/go.mod h1:xIwEieBHERyEvaeKF/TcHh1Hu+lxPM+n2vT1+g9I4m4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-discover v0.0.0-20190403160810-22221edb15cd h1:SynRxs8h2h7lLSA5py5a3WWkYpImhREtju0CuRd97wc=
github.com/hashicorp/go-discover v0.0.0-20190403160810-22221edb15cd/go.mod h1:ueUgD9BeIocT7QNuvxSyJyPAM9dfifBcaWmeybb67OY=
github.com/hashicorp/go-hclog v0.0.0-20180402200405-69ff559dc25f h1:t34t/ySFIGsPOLQ/dCcKeCoErlqhXlNLYvPn7mVogzo=
github.com/hashicorp/go-hclog v0.0.0-20180402200405-69ff559dc25f/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM=
github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71 h1:yxxFgVz31vFoKKTtRUNbXLNe4GFnbLKqg+0N7yG42L8=
@ -161,12 +172,16 @@ github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71/go.mod h1:kbfIt
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-msgpack v0.5.4 h1:SFT72YqIkOcLdWJUYcriVX7hbrZpwc/f7h8aW2NUqrA=
github.com/hashicorp/go-msgpack v0.5.4/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-plugin v0.0.0-20180331002553-e8d22c780116 h1:Y4V/yReWjQo/Ngyc0w6C3EKXKincp4YgvXeo8lI4LrI=
github.com/hashicorp/go-plugin v0.0.0-20180331002553-e8d22c780116/go.mod h1:JSqWYsict+jzcj0+xElxyrBQRPNoiWQuddnxArJ7XHQ=
github.com/hashicorp/go-retryablehttp v0.0.0-20180531211321-3b087ef2d313 h1:8YjGfJRRXO9DA6RG0wNt3kEkvvnxIDao5us1PG+S0wc=
github.com/hashicorp/go-retryablehttp v0.0.0-20180531211321-3b087ef2d313/go.mod h1:fXcdFsQoipQa7mwORhKad5jmDCeSy/RCGzWA08PO0lM=
github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
@ -203,6 +218,8 @@ github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 h1:lc
github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69/go.mod h1:/z+jUGRBlwVpUZfjute9jWaF6/HuhjuFQuL1YXzVD1Q=
github.com/hashicorp/raft v1.0.1-0.20190409200437-d9fe23f7d472 h1:9EPzHJ1bJFaFbGOz3UV3DDFmGYANr+SF+eapmiK5zV4=
github.com/hashicorp/raft v1.0.1-0.20190409200437-d9fe23f7d472/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI=
github.com/hashicorp/raft v1.1.0 h1:qPMePEczgbkiQsqCsRfuHRqvDUO+zmAInDaD5ptXlq0=
github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM=
github.com/hashicorp/raft-boltdb v0.0.0-20150201200839-d1e82c1ec3f1 h1:LHTrLUnNkk+2YkO5EMG49q0lHdR9AZhDbCpu0+M3e0E=
github.com/hashicorp/raft-boltdb v0.0.0-20150201200839-d1e82c1ec3f1/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk=
github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=
@ -296,6 +313,8 @@ github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8D
github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/patrickmn/go-cache v0.0.0-20180527043350-9f6ff22cfff8 h1:BR6MM54q4W9pn0SySwg6yctZtBKlTdUq6a+b0kArBnE=
github.com/patrickmn/go-cache v0.0.0-20180527043350-9f6ff22cfff8/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
@ -310,12 +329,20 @@ github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.0.0-20180328130430-f504d69affe1 h1:jtnwVoXwppTtQ4ApMgCb+G5CcW8OUvLlprWpB+x3e+8=
github.com/prometheus/client_golang v0.0.0-20180328130430-f504d69affe1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 h1:cLL6NowurKLMfCeQy4tIeph12XNQWgANCNvdyrOYKV4=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20180326160409-38c53a9f4bfc h1:tyg3EcZAmwCUe90Jzl4Qw6Af+ajuW8S9b1VFitMNOQs=
github.com/prometheus/common v0.0.0-20180326160409-38c53a9f4bfc/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20180408092902-8b1c2da0d56d h1:RCcsxyRr6+/pLg6wr0cUjPovhEhSNOtPh0SOz6u3hGU=
github.com/prometheus/procfs v0.0.0-20180408092902-8b1c2da0d56d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 h1:Wdi9nwnhFNAlseAOekn6B5G/+GMtks9UKbvRU/CMM/o=
github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f h1:UFr9zpz4xgTnIE5yIMtWAMngCdZ9p/+q6lTbgelo80M=
@ -349,6 +376,8 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/tent/http-link-go v0.0.0-20130702225549-ac974c61c2f9 h1:/Bsw4C+DEdqPjt8vAqaC9LAqpAQnaCQQqmolqq3S1T4=
github.com/tent/http-link-go v0.0.0-20130702225549-ac974c61c2f9/go.mod h1:RHkNRtSLfOK7qBTHaeSX1D6BNpI3qw7NTxsmNr4RvN8=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/vmware/govmomi v0.18.0 h1:f7QxSmP7meCtoAmiKZogvVbLInT+CZx6Px6K5rYsJZo=
github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs=
@ -372,6 +401,7 @@ golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95/go.mod h1:N/0e6XlmueqKjAG
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@ -3,43 +3,3 @@
Package `statsd` provides a Go [dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/) client. Dogstatsd extends Statsd, adding tags
and histograms.
## Get the code
$ go get github.com/DataDog/datadog-go/statsd
## Usage
```go
// Create the client
c, err := statsd.New("127.0.0.1:8125")
if err != nil {
log.Fatal(err)
}
// Prefix every metric with the app name
c.Namespace = "flubber."
// Send the EC2 availability zone as a tag with every metric
c.Tags = append(c.Tags, "us-east-1a")
err = c.Gauge("request.duration", 1.2, nil, 1)
```
## Buffering Client
Dogstatsd accepts packets with multiple statsd payloads in them. Using the BufferingClient via `NewBufferingClient` will buffer up commands and send them when the buffer is reached or after 100msec.
## Development
Run the tests with:
$ go test
## Documentation
Please see: http://godoc.org/github.com/DataDog/datadog-go/statsd
## License
go-dogstatsd is released under the [MIT license](http://www.opensource.org/licenses/mit-license.php).
## Credits
Original code by [ooyala](https://github.com/ooyala/go-dogstatsd).

109
vendor/github.com/DataDog/datadog-go/statsd/options.go generated vendored Normal file
View File

@ -0,0 +1,109 @@
package statsd
import "time"
var (
// DefaultNamespace is the default value for the Namespace option
DefaultNamespace = ""
// DefaultTags is the default value for the Tags option
DefaultTags = []string{}
// DefaultBuffered is the default value for the Buffered option
DefaultBuffered = false
// DefaultMaxMessagesPerPayload is the default value for the MaxMessagesPerPayload option
DefaultMaxMessagesPerPayload = 16
// DefaultAsyncUDS is the default value for the AsyncUDS option
DefaultAsyncUDS = false
// DefaultWriteTimeoutUDS is the default value for the WriteTimeoutUDS option
DefaultWriteTimeoutUDS = 1 * time.Millisecond
)
// Options contains the configuration options for a client.
type Options struct {
// Namespace to prepend to all metrics, events and service checks name.
Namespace string
// Tags are global tags to be applied to every metrics, events and service checks.
Tags []string
// Buffered allows to pack multiple DogStatsD messages in one payload. Messages will be buffered
// until the total size of the payload exceeds MaxMessagesPerPayload metrics, events and/or service
// checks or after 100ms since the payload startedto be built.
Buffered bool
// MaxMessagesPerPayload is the maximum number of metrics, events and/or service checks a single payload will contain.
// Note that this option only takes effect when the client is buffered.
MaxMessagesPerPayload int
// AsyncUDS allows to switch between async and blocking mode for UDS.
// Blocking mode allows for error checking but does not guarentee that calls won't block the execution.
AsyncUDS bool
// WriteTimeoutUDS is the timeout after which a UDS packet is dropped.
WriteTimeoutUDS time.Duration
}
func resolveOptions(options []Option) (*Options, error) {
o := &Options{
Namespace: DefaultNamespace,
Tags: DefaultTags,
Buffered: DefaultBuffered,
MaxMessagesPerPayload: DefaultMaxMessagesPerPayload,
AsyncUDS: DefaultAsyncUDS,
WriteTimeoutUDS: DefaultWriteTimeoutUDS,
}
for _, option := range options {
err := option(o)
if err != nil {
return nil, err
}
}
return o, nil
}
// Option is a client option. Can return an error if validation fails.
type Option func(*Options) error
// WithNamespace sets the Namespace option.
func WithNamespace(namespace string) Option {
return func(o *Options) error {
o.Namespace = namespace
return nil
}
}
// WithTags sets the Tags option.
func WithTags(tags []string) Option {
return func(o *Options) error {
o.Tags = tags
return nil
}
}
// Buffered sets the Buffered option.
func Buffered() Option {
return func(o *Options) error {
o.Buffered = true
return nil
}
}
// WithMaxMessagesPerPayload sets the MaxMessagesPerPayload option.
func WithMaxMessagesPerPayload(maxMessagesPerPayload int) Option {
return func(o *Options) error {
o.MaxMessagesPerPayload = maxMessagesPerPayload
return nil
}
}
// WithAsyncUDS sets the AsyncUDS option.
func WithAsyncUDS() Option {
return func(o *Options) error {
o.AsyncUDS = true
return nil
}
}
// WithWriteTimeoutUDS sets the WriteTimeoutUDS option.
func WithWriteTimeoutUDS(writeTimeoutUDS time.Duration) Option {
return func(o *Options) error {
o.WriteTimeoutUDS = writeTimeoutUDS
return nil
}
}

View File

@ -27,8 +27,9 @@ import (
"bytes"
"errors"
"fmt"
"io"
"math/rand"
"net"
"os"
"strconv"
"strings"
"sync"
@ -54,104 +55,209 @@ any number greater than that will see frames being cut out.
*/
const MaxUDPPayloadSize = 65467
// A Client is a handle for sending udp messages to dogstatsd. It is safe to
/*
UnixAddressPrefix holds the prefix to use to enable Unix Domain Socket
traffic instead of UDP.
*/
const UnixAddressPrefix = "unix://"
// Client-side entity ID injection for container tagging
const (
entityIDEnvName = "DD_ENTITY_ID"
entityIDTagName = "dd.internal.entity_id"
)
/*
Stat suffixes
*/
var (
gaugeSuffix = []byte("|g")
countSuffix = []byte("|c")
histogramSuffix = []byte("|h")
distributionSuffix = []byte("|d")
decrSuffix = []byte("-1|c")
incrSuffix = []byte("1|c")
setSuffix = []byte("|s")
timingSuffix = []byte("|ms")
)
// A statsdWriter offers a standard interface regardless of the underlying
// protocol. For now UDS and UPD writers are available.
type statsdWriter interface {
Write(data []byte) (n int, err error)
SetWriteTimeout(time.Duration) error
Close() error
}
// A Client is a handle for sending messages to dogstatsd. It is safe to
// use one Client from multiple goroutines simultaneously.
type Client struct {
conn net.Conn
// Writer handles the underlying networking protocol
writer statsdWriter
// Namespace to prepend to all statsd calls
Namespace string
// Tags are global tags to be added to every statsd call
Tags []string
// skipErrors turns off error passing and allows UDS to emulate UDP behaviour
SkipErrors bool
// BufferLength is the length of the buffer in commands.
bufferLength int
flushTime time.Duration
commands []string
commands [][]byte
buffer bytes.Buffer
stop bool
stop chan struct{}
sync.Mutex
}
// New returns a pointer to a new Client given an addr in the format "hostname:port".
func New(addr string) (*Client, error) {
udpAddr, err := net.ResolveUDPAddr("udp", addr)
// New returns a pointer to a new Client given an addr in the format "hostname:port" or
// "unix:///path/to/socket".
func New(addr string, options ...Option) (*Client, error) {
o, err := resolveOptions(options)
if err != nil {
return nil, err
}
conn, err := net.DialUDP("udp", nil, udpAddr)
var w statsdWriter
if !strings.HasPrefix(addr, UnixAddressPrefix) {
w, err = newUDPWriter(addr)
} else if o.AsyncUDS {
w, err = newAsyncUdsWriter(addr[len(UnixAddressPrefix)-1:])
} else {
w, err = newBlockingUdsWriter(addr[len(UnixAddressPrefix)-1:])
}
if err != nil {
return nil, err
}
client := &Client{conn: conn}
w.SetWriteTimeout(o.WriteTimeoutUDS)
c := Client{
Namespace: o.Namespace,
Tags: o.Tags,
writer: w,
}
// Inject DD_ENTITY_ID as a constant tag if found
entityID := os.Getenv(entityIDEnvName)
if entityID != "" {
entityTag := fmt.Sprintf("%s:%s", entityIDTagName, entityID)
c.Tags = append(c.Tags, entityTag)
}
if o.Buffered {
c.bufferLength = o.MaxMessagesPerPayload
c.commands = make([][]byte, 0, o.MaxMessagesPerPayload)
c.flushTime = time.Millisecond * 100
c.stop = make(chan struct{}, 1)
go c.watch()
}
return &c, nil
}
// NewWithWriter creates a new Client with given writer. Writer is a
// io.WriteCloser + SetWriteTimeout(time.Duration) error
func NewWithWriter(w statsdWriter) (*Client, error) {
client := &Client{writer: w, SkipErrors: false}
// Inject DD_ENTITY_ID as a constant tag if found
entityID := os.Getenv(entityIDEnvName)
if entityID != "" {
entityTag := fmt.Sprintf("%s:%s", entityIDTagName, entityID)
client.Tags = append(client.Tags, entityTag)
}
return client, nil
}
// NewBuffered returns a Client that buffers its output and sends it in chunks.
// Buflen is the length of the buffer in number of commands.
//
// When addr is empty, the client will default to a UDP client and use the DD_AGENT_HOST
// and (optionally) the DD_DOGSTATSD_PORT environment variables to build the target address.
func NewBuffered(addr string, buflen int) (*Client, error) {
client, err := New(addr)
if err != nil {
return nil, err
}
client.bufferLength = buflen
client.commands = make([]string, 0, buflen)
client.flushTime = time.Millisecond * 100
go client.watch()
return client, nil
return New(addr, Buffered(), WithMaxMessagesPerPayload(buflen))
}
// format a message from its name, value, tags and rate. Also adds global
// namespace and tags.
func (c *Client) format(name, value string, tags []string, rate float64) string {
var buf bytes.Buffer
func (c *Client) format(name string, value interface{}, suffix []byte, tags []string, rate float64) []byte {
// preallocated buffer, stack allocated as long as it doesn't escape
buf := make([]byte, 0, 200)
if c.Namespace != "" {
buf.WriteString(c.Namespace)
buf = append(buf, c.Namespace...)
}
buf.WriteString(name)
buf.WriteString(":")
buf.WriteString(value)
buf = append(buf, name...)
buf = append(buf, ':')
switch val := value.(type) {
case float64:
buf = strconv.AppendFloat(buf, val, 'f', 6, 64)
case int64:
buf = strconv.AppendInt(buf, val, 10)
case string:
buf = append(buf, val...)
default:
// do nothing
}
buf = append(buf, suffix...)
if rate < 1 {
buf.WriteString(`|@`)
buf.WriteString(strconv.FormatFloat(rate, 'f', -1, 64))
buf = append(buf, "|@"...)
buf = strconv.AppendFloat(buf, rate, 'f', -1, 64)
}
tags = append(c.Tags, tags...)
if len(tags) > 0 {
buf.WriteString("|#")
buf.WriteString(tags[0])
for _, tag := range tags[1:] {
buf.WriteString(",")
buf.WriteString(tag)
}
buf = appendTagString(buf, c.Tags, tags)
// non-zeroing copy to avoid referencing a larger than necessary underlying array
return append([]byte(nil), buf...)
}
// SetWriteTimeout allows the user to set a custom UDS write timeout. Not supported for UDP.
func (c *Client) SetWriteTimeout(d time.Duration) error {
if c == nil {
return fmt.Errorf("Client is nil")
}
return buf.String()
return c.writer.SetWriteTimeout(d)
}
func (c *Client) watch() {
for _ = range time.Tick(c.flushTime) {
if c.stop {
ticker := time.NewTicker(c.flushTime)
for {
select {
case <-ticker.C:
c.Lock()
if len(c.commands) > 0 {
// FIXME: eating error here
c.flushLocked()
}
c.Unlock()
case <-c.stop:
ticker.Stop()
return
}
c.Lock()
if len(c.commands) > 0 {
// FIXME: eating error here
c.flush()
}
c.Unlock()
}
}
func (c *Client) append(cmd string) error {
func (c *Client) append(cmd []byte) error {
c.Lock()
defer c.Unlock()
c.commands = append(c.commands, cmd)
// if we should flush, lets do it
if len(c.commands) == c.bufferLength {
if err := c.flush(); err != nil {
if err := c.flushLocked(); err != nil {
return err
}
}
return nil
}
func (c *Client) joinMaxSize(cmds []string, sep string, maxSize int) ([][]byte, []int) {
func (c *Client) joinMaxSize(cmds [][]byte, sep string, maxSize int) ([][]byte, []int) {
c.buffer.Reset() //clear buffer
var frames [][]byte
@ -171,13 +277,13 @@ func (c *Client) joinMaxSize(cmds []string, sep string, maxSize int) ([][]byte,
if elem != 0 {
c.buffer.Write(sepBytes)
}
c.buffer.WriteString(cmd)
c.buffer.Write(cmd)
elem++
} else {
frames = append(frames, copyAndResetBuffer(&c.buffer))
ncmds = append(ncmds, elem)
// if cmd is bigger than maxSize it will get flushed on next loop
c.buffer.WriteString(cmd)
c.buffer.Write(cmd)
elem = 1
}
}
@ -198,13 +304,23 @@ func copyAndResetBuffer(buf *bytes.Buffer) []byte {
return tmpBuf
}
// Flush forces a flush of the pending commands in the buffer
func (c *Client) Flush() error {
if c == nil {
return fmt.Errorf("Client is nil")
}
c.Lock()
defer c.Unlock()
return c.flushLocked()
}
// flush the commands in the buffer. Lock must be held by caller.
func (c *Client) flush() error {
func (c *Client) flushLocked() error {
frames, flushable := c.joinMaxSize(c.commands, "\n", OptimalPayloadSize)
var err error
cmdsFlushed := 0
for i, data := range frames {
_, e := c.conn.Write(data)
_, e := c.writer.Write(data)
if e != nil {
err = e
break
@ -223,71 +339,93 @@ func (c *Client) flush() error {
return err
}
func (c *Client) sendMsg(msg string) error {
func (c *Client) sendMsg(msg []byte) error {
// return an error if message is bigger than MaxUDPPayloadSize
if len(msg) > MaxUDPPayloadSize {
return errors.New("message size exceeds MaxUDPPayloadSize")
}
// if this client is buffered, then we'll just append this
c.Lock()
defer c.Unlock()
if c.bufferLength > 0 {
// return an error if message is bigger than OptimalPayloadSize
if len(msg) > MaxUDPPayloadSize {
return errors.New("message size exceeds MaxUDPPayloadSize")
}
return c.append(msg)
}
_, err := c.conn.Write([]byte(msg))
_, err := c.writer.Write(msg)
if c.SkipErrors {
return nil
}
return err
}
// send handles sampling and sends the message over UDP. It also adds global namespace prefixes and tags.
func (c *Client) send(name, value string, tags []string, rate float64) error {
func (c *Client) send(name string, value interface{}, suffix []byte, tags []string, rate float64) error {
if c == nil {
return nil
return fmt.Errorf("Client is nil")
}
if rate < 1 && rand.Float64() > rate {
return nil
}
data := c.format(name, value, tags, rate)
data := c.format(name, value, suffix, tags, rate)
return c.sendMsg(data)
}
// Gauge measures the value of a metric at a particular time.
func (c *Client) Gauge(name string, value float64, tags []string, rate float64) error {
stat := fmt.Sprintf("%f|g", value)
return c.send(name, stat, tags, rate)
return c.send(name, value, gaugeSuffix, tags, rate)
}
// Count tracks how many times something happened per second.
func (c *Client) Count(name string, value int64, tags []string, rate float64) error {
stat := fmt.Sprintf("%d|c", value)
return c.send(name, stat, tags, rate)
return c.send(name, value, countSuffix, tags, rate)
}
// Histogram tracks the statistical distribution of a set of values.
// Histogram tracks the statistical distribution of a set of values on each host.
func (c *Client) Histogram(name string, value float64, tags []string, rate float64) error {
stat := fmt.Sprintf("%f|h", value)
return c.send(name, stat, tags, rate)
return c.send(name, value, histogramSuffix, tags, rate)
}
// Distribution tracks the statistical distribution of a set of values across your infrastructure.
func (c *Client) Distribution(name string, value float64, tags []string, rate float64) error {
return c.send(name, value, distributionSuffix, tags, rate)
}
// Decr is just Count of -1
func (c *Client) Decr(name string, tags []string, rate float64) error {
return c.send(name, nil, decrSuffix, tags, rate)
}
// Incr is just Count of 1
func (c *Client) Incr(name string, tags []string, rate float64) error {
return c.send(name, nil, incrSuffix, tags, rate)
}
// Set counts the number of unique elements in a group.
func (c *Client) Set(name string, value string, tags []string, rate float64) error {
stat := fmt.Sprintf("%s|s", value)
return c.send(name, stat, tags, rate)
return c.send(name, value, setSuffix, tags, rate)
}
// Timing sends timing information, it is an alias for TimeInMilliseconds
func (c *Client) Timing(name string, value time.Duration, tags []string, rate float64) error {
return c.TimeInMilliseconds(name, value.Seconds()*1000, tags, rate)
}
// TimeInMilliseconds sends timing information in milliseconds.
// It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing)
func (c *Client) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error {
stat := fmt.Sprintf("%f|ms", value)
return c.send(name, stat, tags, rate)
return c.send(name, value, timingSuffix, tags, rate)
}
// Event sends the provided Event.
func (c *Client) Event(e *Event) error {
if c == nil {
return fmt.Errorf("Client is nil")
}
stat, err := e.Encode(c.Tags...)
if err != nil {
return err
}
return c.sendMsg(stat)
return c.sendMsg([]byte(stat))
}
// SimpleEvent sends an event with the provided title and text.
@ -296,37 +434,70 @@ func (c *Client) SimpleEvent(title, text string) error {
return c.Event(e)
}
// ServiceCheck sends the provided ServiceCheck.
func (c *Client) ServiceCheck(sc *ServiceCheck) error {
if c == nil {
return fmt.Errorf("Client is nil")
}
stat, err := sc.Encode(c.Tags...)
if err != nil {
return err
}
return c.sendMsg([]byte(stat))
}
// SimpleServiceCheck sends an serviceCheck with the provided name and status.
func (c *Client) SimpleServiceCheck(name string, status ServiceCheckStatus) error {
sc := NewServiceCheck(name, status)
return c.ServiceCheck(sc)
}
// Close the client connection.
func (c *Client) Close() error {
if c == nil {
return nil
return fmt.Errorf("Client is nil")
}
c.stop = true
return c.conn.Close()
select {
case c.stop <- struct{}{}:
default:
}
// if this client is buffered, flush before closing the writer
if c.bufferLength > 0 {
if err := c.Flush(); err != nil {
return err
}
}
return c.writer.Close()
}
// Events support
// EventAlertType and EventAlertPriority became exported types after this issue was submitted: https://github.com/DataDog/datadog-go/issues/41
// The reason why they got exported is so that client code can directly use the types.
type eventAlertType string
// EventAlertType is the alert type for events
type EventAlertType string
const (
// Info is the "info" AlertType for events
Info eventAlertType = "info"
Info EventAlertType = "info"
// Error is the "error" AlertType for events
Error eventAlertType = "error"
Error EventAlertType = "error"
// Warning is the "warning" AlertType for events
Warning eventAlertType = "warning"
Warning EventAlertType = "warning"
// Success is the "success" AlertType for events
Success eventAlertType = "success"
Success EventAlertType = "success"
)
type eventPriority string
// EventPriority is the event priority for events
type EventPriority string
const (
// Normal is the "normal" Priority for events
Normal eventPriority = "normal"
Normal EventPriority = "normal"
// Low is the "low" Priority for events
Low eventPriority = "low"
Low EventPriority = "low"
)
// An Event is an object that can be posted to your DataDog event stream.
@ -343,12 +514,12 @@ type Event struct {
// AggregationKey groups this event with others of the same key.
AggregationKey string
// Priority of the event. Can be statsd.Low or statsd.Normal.
Priority eventPriority
Priority EventPriority
// SourceTypeName is a source type for the event.
SourceTypeName string
// AlertType can be statsd.Info, statsd.Error, statsd.Warning, or statsd.Success.
// If absent, the default value applied by the dogstatsd server is Info.
AlertType eventAlertType
AlertType EventAlertType
// Tags for the event.
Tags []string
}
@ -424,16 +595,93 @@ func (e Event) Encode(tags ...string) (string, error) {
buffer.WriteString(string(e.AlertType))
}
if len(tags)+len(e.Tags) > 0 {
all := make([]string, 0, len(tags)+len(e.Tags))
all = append(all, tags...)
all = append(all, e.Tags...)
buffer.WriteString("|#")
buffer.WriteString(all[0])
for _, tag := range all[1:] {
buffer.WriteString(",")
buffer.WriteString(tag)
}
writeTagString(&buffer, tags, e.Tags)
return buffer.String(), nil
}
// ServiceCheckStatus support
type ServiceCheckStatus byte
const (
// Ok is the "ok" ServiceCheck status
Ok ServiceCheckStatus = 0
// Warn is the "warning" ServiceCheck status
Warn ServiceCheckStatus = 1
// Critical is the "critical" ServiceCheck status
Critical ServiceCheckStatus = 2
// Unknown is the "unknown" ServiceCheck status
Unknown ServiceCheckStatus = 3
)
// An ServiceCheck is an object that contains status of DataDog service check.
type ServiceCheck struct {
// Name of the service check. Required.
Name string
// Status of service check. Required.
Status ServiceCheckStatus
// Timestamp is a timestamp for the serviceCheck. If not provided, the dogstatsd
// server will set this to the current time.
Timestamp time.Time
// Hostname for the serviceCheck.
Hostname string
// A message describing the current state of the serviceCheck.
Message string
// Tags for the serviceCheck.
Tags []string
}
// NewServiceCheck creates a new serviceCheck with the given name and status. Error checking
// against these values is done at send-time, or upon running sc.Check.
func NewServiceCheck(name string, status ServiceCheckStatus) *ServiceCheck {
return &ServiceCheck{
Name: name,
Status: status,
}
}
// Check verifies that an event is valid.
func (sc ServiceCheck) Check() error {
if len(sc.Name) == 0 {
return fmt.Errorf("statsd.ServiceCheck name is required")
}
if byte(sc.Status) < 0 || byte(sc.Status) > 3 {
return fmt.Errorf("statsd.ServiceCheck status has invalid value")
}
return nil
}
// Encode returns the dogstatsd wire protocol representation for an serviceCheck.
// Tags may be passed which will be added to the encoded output but not to
// the Event's list of tags, eg. for default tags.
func (sc ServiceCheck) Encode(tags ...string) (string, error) {
err := sc.Check()
if err != nil {
return "", err
}
message := sc.escapedMessage()
var buffer bytes.Buffer
buffer.WriteString("_sc|")
buffer.WriteString(sc.Name)
buffer.WriteRune('|')
buffer.WriteString(strconv.FormatInt(int64(sc.Status), 10))
if !sc.Timestamp.IsZero() {
buffer.WriteString("|d:")
buffer.WriteString(strconv.FormatInt(int64(sc.Timestamp.Unix()), 10))
}
if len(sc.Hostname) != 0 {
buffer.WriteString("|h:")
buffer.WriteString(sc.Hostname)
}
writeTagString(&buffer, tags, sc.Tags)
if len(message) != 0 {
buffer.WriteString("|m:")
buffer.WriteString(message)
}
return buffer.String(), nil
@ -442,3 +690,68 @@ func (e Event) Encode(tags ...string) (string, error) {
func (e Event) escapedText() string {
return strings.Replace(e.Text, "\n", "\\n", -1)
}
func (sc ServiceCheck) escapedMessage() string {
msg := strings.Replace(sc.Message, "\n", "\\n", -1)
return strings.Replace(msg, "m:", `m\:`, -1)
}
func removeNewlines(str string) string {
return strings.Replace(str, "\n", "", -1)
}
func writeTagString(w io.Writer, tagList1, tagList2 []string) {
// the tag lists may be shared with other callers, so we cannot modify
// them in any way (which means we cannot append to them either)
// therefore we must make an entirely separate copy just for this call
totalLen := len(tagList1) + len(tagList2)
if totalLen == 0 {
return
}
tags := make([]string, 0, totalLen)
tags = append(tags, tagList1...)
tags = append(tags, tagList2...)
io.WriteString(w, "|#")
io.WriteString(w, removeNewlines(tags[0]))
for _, tag := range tags[1:] {
io.WriteString(w, ",")
io.WriteString(w, removeNewlines(tag))
}
}
func appendTagString(buf []byte, tagList1, tagList2 []string) []byte {
if len(tagList1) == 0 {
if len(tagList2) == 0 {
return buf
}
tagList1 = tagList2
tagList2 = nil
}
buf = append(buf, "|#"...)
buf = appendWithoutNewlines(buf, tagList1[0])
for _, tag := range tagList1[1:] {
buf = append(buf, ',')
buf = appendWithoutNewlines(buf, tag)
}
for _, tag := range tagList2 {
buf = append(buf, ',')
buf = appendWithoutNewlines(buf, tag)
}
return buf
}
func appendWithoutNewlines(buf []byte, s string) []byte {
// fastpath for strings without newlines
if strings.IndexByte(s, '\n') == -1 {
return append(buf, s...)
}
for _, b := range []byte(s) {
if b != '\n' {
buf = append(buf, b)
}
}
return buf
}

73
vendor/github.com/DataDog/datadog-go/statsd/udp.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
package statsd
import (
"errors"
"fmt"
"net"
"os"
"time"
)
const (
autoHostEnvName = "DD_AGENT_HOST"
autoPortEnvName = "DD_DOGSTATSD_PORT"
defaultUDPPort = "8125"
)
// udpWriter is an internal class wrapping around management of UDP connection
type udpWriter struct {
conn net.Conn
}
// New returns a pointer to a new udpWriter given an addr in the format "hostname:port".
func newUDPWriter(addr string) (*udpWriter, error) {
if addr == "" {
addr = addressFromEnvironment()
}
if addr == "" {
return nil, errors.New("No address passed and autodetection from environment failed")
}
udpAddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return nil, err
}
conn, err := net.DialUDP("udp", nil, udpAddr)
if err != nil {
return nil, err
}
writer := &udpWriter{conn: conn}
return writer, nil
}
// SetWriteTimeout is not needed for UDP, returns error
func (w *udpWriter) SetWriteTimeout(d time.Duration) error {
return errors.New("SetWriteTimeout: not supported for UDP connections")
}
// Write data to the UDP connection with no error handling
func (w *udpWriter) Write(data []byte) (int, error) {
return w.conn.Write(data)
}
func (w *udpWriter) Close() error {
return w.conn.Close()
}
func (w *udpWriter) remoteAddr() net.Addr {
return w.conn.RemoteAddr()
}
func addressFromEnvironment() string {
autoHost := os.Getenv(autoHostEnvName)
if autoHost == "" {
return ""
}
autoPort := os.Getenv(autoPortEnvName)
if autoPort == "" {
autoPort = defaultUDPPort
}
return fmt.Sprintf("%s:%s", autoHost, autoPort)
}

11
vendor/github.com/DataDog/datadog-go/statsd/uds.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
package statsd
import (
"time"
)
/*
UDSTimeout holds the default timeout for UDS socket writes, as they can get
blocking when the receiving buffer is full.
*/
const defaultUDSTimeout = 1 * time.Millisecond

View File

@ -0,0 +1,113 @@
package statsd
import (
"fmt"
"net"
"time"
)
// asyncUdsWriter is an internal class wrapping around management of UDS connection
type asyncUdsWriter struct {
// Address to send metrics to, needed to allow reconnection on error
addr net.Addr
// Established connection object, or nil if not connected yet
conn net.Conn
// write timeout
writeTimeout time.Duration
// datagramQueue is the queue of datagrams ready to be sent
datagramQueue chan []byte
stopChan chan struct{}
}
// New returns a pointer to a new asyncUdsWriter given a socket file path as addr.
func newAsyncUdsWriter(addr string) (*asyncUdsWriter, error) {
udsAddr, err := net.ResolveUnixAddr("unixgram", addr)
if err != nil {
return nil, err
}
writer := &asyncUdsWriter{
addr: udsAddr,
conn: nil,
writeTimeout: defaultUDSTimeout,
// 8192 * 8KB = 65.5MB
datagramQueue: make(chan []byte, 8192),
stopChan: make(chan struct{}, 1),
}
go writer.sendLoop()
return writer, nil
}
func (w *asyncUdsWriter) sendLoop() {
for {
select {
case datagram := <-w.datagramQueue:
w.write(datagram)
case <-w.stopChan:
return
}
}
}
// SetWriteTimeout allows the user to set a custom write timeout
func (w *asyncUdsWriter) SetWriteTimeout(d time.Duration) error {
w.writeTimeout = d
return nil
}
// Write data to the UDS connection with write timeout and minimal error handling:
// create the connection if nil, and destroy it if the statsd server has disconnected
func (w *asyncUdsWriter) Write(data []byte) (int, error) {
select {
case w.datagramQueue <- data:
return len(data), nil
default:
return 0, fmt.Errorf("uds datagram queue is full (the agent might not be able to keep up)")
}
}
// write writes the given data to the UDS.
// This function is **not** thread safe.
func (w *asyncUdsWriter) write(data []byte) (int, error) {
conn, err := w.ensureConnection()
if err != nil {
return 0, err
}
conn.SetWriteDeadline(time.Now().Add(w.writeTimeout))
n, err := conn.Write(data)
if e, isNetworkErr := err.(net.Error); !isNetworkErr || !e.Temporary() {
// err is not temporary, Statsd server disconnected, retry connecting at next packet
w.unsetConnection()
return 0, e
}
return n, err
}
func (w *asyncUdsWriter) Close() error {
close(w.stopChan)
if w.conn != nil {
return w.conn.Close()
}
return nil
}
func (w *asyncUdsWriter) ensureConnection() (net.Conn, error) {
if w.conn != nil {
return w.conn, nil
}
newConn, err := net.Dial(w.addr.Network(), w.addr.String())
if err != nil {
return nil, err
}
w.conn = newConn
return newConn, nil
}
func (w *asyncUdsWriter) unsetConnection() {
w.conn = nil
}

View File

@ -0,0 +1,92 @@
package statsd
import (
"net"
"sync"
"time"
)
// blockingUdsWriter is an internal class wrapping around management of UDS connection
type blockingUdsWriter struct {
// Address to send metrics to, needed to allow reconnection on error
addr net.Addr
// Established connection object, or nil if not connected yet
conn net.Conn
// write timeout
writeTimeout time.Duration
sync.RWMutex // used to lock conn / writer can replace it
}
// New returns a pointer to a new blockingUdsWriter given a socket file path as addr.
func newBlockingUdsWriter(addr string) (*blockingUdsWriter, error) {
udsAddr, err := net.ResolveUnixAddr("unixgram", addr)
if err != nil {
return nil, err
}
// Defer connection to first Write
writer := &blockingUdsWriter{addr: udsAddr, conn: nil, writeTimeout: defaultUDSTimeout}
return writer, nil
}
// SetWriteTimeout allows the user to set a custom write timeout
func (w *blockingUdsWriter) SetWriteTimeout(d time.Duration) error {
w.writeTimeout = d
return nil
}
// Write data to the UDS connection with write timeout and minimal error handling:
// create the connection if nil, and destroy it if the statsd server has disconnected
func (w *blockingUdsWriter) Write(data []byte) (int, error) {
conn, err := w.ensureConnection()
if err != nil {
return 0, err
}
conn.SetWriteDeadline(time.Now().Add(w.writeTimeout))
n, e := conn.Write(data)
if err, isNetworkErr := e.(net.Error); !isNetworkErr || !err.Temporary() {
// Statsd server disconnected, retry connecting at next packet
w.unsetConnection()
return 0, e
}
return n, e
}
func (w *blockingUdsWriter) Close() error {
if w.conn != nil {
return w.conn.Close()
}
return nil
}
func (w *blockingUdsWriter) ensureConnection() (net.Conn, error) {
// Check if we've already got a socket we can use
w.RLock()
currentConn := w.conn
w.RUnlock()
if currentConn != nil {
return currentConn, nil
}
// Looks like we might need to connect - try again with write locking.
w.Lock()
defer w.Unlock()
if w.conn != nil {
return w.conn, nil
}
newConn, err := net.Dial(w.addr.Network(), w.addr.String())
if err != nil {
return nil, err
}
w.conn = newConn
return newConn, nil
}
func (w *blockingUdsWriter) unsetConnection() {
w.Lock()
defer w.Unlock()
w.conn = nil
}

13
vendor/github.com/armon/go-metrics/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,13 @@
language: go
go:
- "1.x"
env:
- GO111MODULE=on
install:
- go get ./...
script:
- go test ./...

16
vendor/github.com/armon/go-metrics/go.mod generated vendored Normal file
View File

@ -0,0 +1,16 @@
module github.com/armon/go-metrics
go 1.12
require (
github.com/DataDog/datadog-go v2.2.0+incompatible
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible
github.com/circonus-labs/circonusllhist v0.1.3 // indirect
github.com/hashicorp/go-immutable-radix v1.0.0
github.com/hashicorp/go-retryablehttp v0.5.3 // indirect
github.com/pascaldekloe/goe v0.1.0
github.com/pkg/errors v0.8.1 // indirect
github.com/prometheus/client_golang v0.9.2
github.com/stretchr/testify v1.3.0 // indirect
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect
)

46
vendor/github.com/armon/go-metrics/go.sum generated vendored Normal file
View File

@ -0,0 +1,46 @@
github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4=
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@ -255,11 +255,11 @@ func (i *InmemSink) Data() []*IntervalMetrics {
}
copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters))
for k, v := range current.Counters {
copyCurrent.Counters[k] = v
copyCurrent.Counters[k] = v.deepCopy()
}
copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples))
for k, v := range current.Samples {
copyCurrent.Samples[k] = v
copyCurrent.Samples[k] = v.deepCopy()
}
current.RUnlock()

View File

@ -41,6 +41,16 @@ type SampledValue struct {
DisplayLabels map[string]string `json:"Labels"`
}
// deepCopy allocates a new instance of AggregateSample
func (source *SampledValue) deepCopy() SampledValue {
dest := *source
if source.AggregateSample != nil {
dest.AggregateSample = &AggregateSample{}
*dest.AggregateSample = *source.AggregateSample
}
return dest
}
// DisplayMetrics returns a summary of the metrics from the most recent finished interval.
func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
data := i.Data()
@ -52,12 +62,15 @@ func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request)
return nil, fmt.Errorf("no metric intervals have been initialized yet")
case n == 1:
// Show the current interval if it's all we have
interval = i.intervals[0]
interval = data[0]
default:
// Show the most recent finished interval if we have one
interval = i.intervals[n-2]
interval = data[n-2]
}
interval.RLock()
defer interval.RUnlock()
summary := MetricsSummary{
Timestamp: interval.Interval.Round(time.Second).UTC().String(),
Gauges: make([]GaugeValue, 0, len(interval.Gauges)),

View File

@ -197,7 +197,7 @@ func (m *Metrics) filterLabels(labels []Label) []Label {
if labels == nil {
return nil
}
toReturn := labels[:0]
toReturn := []Label{}
for _, label := range labels {
if m.labelIsAllowed(&label) {
toReturn = append(toReturn, label)

View File

@ -1,3 +1,11 @@
.DS_Store
env.sh
NOTES.md
# codecov.io
.codecov
coverage.txt
coverage.xml
coverage.html
vendor/

View File

@ -0,0 +1,72 @@
# v2.3.1
* fix: incorrect attribute types in graph overlays (docs vs what api actually returns)
# v2.3.0
* fix: graph structures incorrectly represented nesting of overlay sets
# v2.2.7
* add: `search` (`*string`) attribute to graph datapoint
* add: `cluster_ip` (`*string`) attribute to broker details
# v2.2.6
* fix: func signature to match go-retryablehttp update
* upd: dependency go-retryablehttp, lock to v0.5.2 to prevent future breaking patch features
# v2.2.5
* upd: switch from tracking master to versions for retryablehttp and circonusllhist now that both repositories are doing releases
# v2.2.4
* fix: worksheet.graphs is a required attribute. worksheet.smart_queries is an optional attribute.
# v2.2.3
* upd: remove go.{mod,dep} as cgm being v2 causes more issues than it solves at this point. will re-add after `go mod` becomes more common and adding `v2` to all internal import statements won't cause additional issues.
# v2.2.2
* upd: add go.mod and go.sum
# v2.2.1
* fix: if submission url host is 'api.circonus.com' do not use private CA in TLSConfig
# v2.2.0
* fix: do not reset counter|gauge|text funcs after each snapshot (only on explicit call to Reset)
* upd: dashboards - optional widget attributes - which are structs - should be pointers for correct omission in json sent to api
* fix: dashboards - remove `omitempty` from required attributes
* fix: graphs - remove `omitempty` from required attributes
* fix: worksheets - correct attribute name, remove `omitempty` from required attributes
* fix: handle case where a broker has no external host or ip set
# v2.1.2
* upd: breaking change in upstream repo
* upd: upstream deps
# v2.1.1
* dep dependencies
* fix two instances of shadowed variables
* fix several documentation typos
* simplify (gofmt -s)
* remove an inefficient use of regexp.MatchString
# v2.1.0
* Add unix socket capability for SubmissionURL `http+unix://...`
* Add `RecordCountForValue` function to histograms
# v2.0.0
* gauges as `interface{}`
* change: `GeTestGauge(string) (string,error)` -> `GeTestGauge(string) (interface{},error)`
* add: `AddGauge(string, interface{})` to add a delta value to an existing gauge
* prom output candidate
* Add `CHANGELOG.md` to repository

View File

@ -0,0 +1,39 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "github.com/circonus-labs/circonusllhist"
packages = ["."]
revision = "87d4d00b35adeefe4911ece727838749e0fab113"
version = "v0.1.3"
[[projects]]
name = "github.com/hashicorp/go-cleanhttp"
packages = ["."]
revision = "e8ab9daed8d1ddd2d3c4efba338fe2eeae2e4f18"
version = "v0.5.0"
[[projects]]
name = "github.com/hashicorp/go-retryablehttp"
packages = ["."]
revision = "73489d0a1476f0c9e6fb03f9c39241523a496dfd"
version = "v0.5.2"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
version = "v0.8.1"
[[projects]]
branch = "master"
name = "github.com/tv42/httpunix"
packages = ["."]
revision = "b75d8614f926c077e48d85f1f8f7885b758c6225"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "ff81639f2f1513555846304ee903af4d13a0f0f181e140e1ebb1d71aa18fb5fb"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -0,0 +1,15 @@
[[constraint]]
name = "github.com/circonus-labs/circonusllhist"
version = "0.1.3"
[[constraint]]
name = "github.com/hashicorp/go-retryablehttp"
version = "=0.5.2"
[[constraint]]
name = "github.com/pkg/errors"
version = "0.8.1"
[[constraint]]
branch = "master"
name = "github.com/tv42/httpunix"

View File

@ -0,0 +1,113 @@
## Circonus gometrics options
### Example defaults
```go
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
"path"
cgm "github.com/circonus-labs/circonus-gometrics"
)
func main() {
cfg := &cgm.Config{}
// Defaults
// General
cfg.Debug = false
cfg.Log = log.New(ioutil.Discard, "", log.LstdFlags)
cfg.Interval = "10s"
cfg.ResetCounters = "true"
cfg.ResetGauges = "true"
cfg.ResetHistograms = "true"
cfg.ResetText = "true"
// API
cfg.CheckManager.API.TokenKey = ""
cfg.CheckManager.API.TokenApp = "circonus-gometrics"
cfg.CheckManager.API.TokenURL = "https://api.circonus.com/v2"
cfg.CheckManager.API.CACert = nil
cfg.CheckManager.API.TLSConfig = nil
// Check
_, an := path.Split(os.Args[0])
hn, _ := os.Hostname()
cfg.CheckManager.Check.ID = ""
cfg.CheckManager.Check.SubmissionURL = ""
cfg.CheckManager.Check.InstanceID = fmt.Sprintf("%s:%s", hn, an)
cfg.CheckManager.Check.TargetHost = cfg.CheckManager.Check.InstanceID
cfg.CheckManager.Check.DisplayName = cfg.CheckManager.Check.InstanceID
cfg.CheckManager.Check.SearchTag = fmt.Sprintf("service:%s", an)
cfg.CheckManager.Check.Tags = ""
cfg.CheckManager.Check.Secret = "" // randomly generated sha256 hash
cfg.CheckManager.Check.MaxURLAge = "5m"
cfg.CheckManager.Check.ForceMetricActivation = "false"
// Broker
cfg.CheckManager.Broker.ID = ""
cfg.CheckManager.Broker.SelectTag = ""
cfg.CheckManager.Broker.MaxResponseTime = "500ms"
cfg.CheckManager.Broker.TLSConfig = nil
// create a new cgm instance and start sending metrics...
// see the complete example in the main README.
}
```
## Options
| Option | Default | Description |
| ------ | ------- | ----------- |
| General ||
| `cfg.Log` | none | log.Logger instance to send logging messages. Default is to discard messages. If Debug is turned on and no instance is specified, messages will go to stderr. |
| `cfg.Debug` | false | Turn on debugging messages. |
| `cfg.Interval` | "10s" | Interval at which metrics are flushed and sent to Circonus. Set to "0s" to disable automatic flush (note, if disabled, `cgm.Flush()` must be called manually to send metrics to Circonus).|
| `cfg.ResetCounters` | "true" | Reset counter metrics after each submission. Change to "false" to retain (and continue submitting) the last value.|
| `cfg.ResetGauges` | "true" | Reset gauge metrics after each submission. Change to "false" to retain (and continue submitting) the last value.|
| `cfg.ResetHistograms` | "true" | Reset histogram metrics after each submission. Change to "false" to retain (and continue submitting) the last value.|
| `cfg.ResetText` | "true" | Reset text metrics after each submission. Change to "false" to retain (and continue submitting) the last value.|
|API||
| `cfg.CheckManager.API.TokenKey` | "" | [Circonus API Token key](https://login.circonus.com/user/tokens) |
| `cfg.CheckManager.API.TokenApp` | "circonus-gometrics" | App associated with API token |
| `cfg.CheckManager.API.URL` | "https://api.circonus.com/v2" | Circonus API URL |
| `cfg.CheckManager.API.TLSConfig` | nil | Custom tls.Config to use when communicating with Circonus API |
| `cfg.CheckManager.API.CACert` | nil | DEPRECATED - use TLSConfig ~~[*x509.CertPool](https://golang.org/pkg/crypto/x509/#CertPool) with CA Cert to validate API endpoint using internal CA or self-signed certificates~~ |
|Check||
| `cfg.CheckManager.Check.ID` | "" | Check ID of previously created check. (*Note: **check id** not **check bundle id**.*) |
| `cfg.CheckManager.Check.SubmissionURL` | "" | Submission URL of previously created check. Metrics can also be sent to a local [circonus-agent](https://github.com/circonus-labs/circonus-agent) by using the agent's URL (e.g. `http://127.0.0.1:2609/write/appid` where `appid` is a unique identifier for the application which will prefix all metrics. Additionally, the circonus-agent can optionally listen for requests to `/write` on a unix socket - to leverage this feature, use a URL such as `http+unix:///path/to/socket_file/write/appid`). |
| `cfg.CheckManager.Check.InstanceID` | hostname:program name | An identifier for the 'group of metrics emitted by this process or service'. |
| `cfg.CheckManager.Check.TargetHost` | InstanceID | Explicit setting of `check.target`. |
| `cfg.CheckManager.Check.DisplayName` | InstanceID | Custom `check.display_name`. Shows in UI check list. |
| `cfg.CheckManager.Check.SearchTag` | service:program name | Specific tag used to search for an existing check when neither SubmissionURL nor ID are provided. |
| `cfg.CheckManager.Check.Tags` | "" | List (comma separated) of tags to add to check when it is being created. The SearchTag will be added to the list. |
| `cfg.CheckManager.Check.Secret` | random generated | A secret to use for when creating an httptrap check. |
| `cfg.CheckManager.Check.MaxURLAge` | "5m" | Maximum amount of time to retry a [failing] submission URL before refreshing it. |
| `cfg.CheckManager.Check.ForceMetricActivation` | "false" | If a metric has been disabled via the UI the default behavior is to *not* re-activate the metric; this setting overrides the behavior and will re-activate the metric when it is encountered. |
|Broker||
| `cfg.CheckManager.Broker.ID` | "" | ID of a specific broker to use when creating a check. Default is to use a random enterprise broker or the public Circonus default broker. |
| `cfg.CheckManager.Broker.SelectTag` | "" | Used to select a broker with the same tag(s). If more than one broker has the tag(s), one will be selected randomly from the resulting list. (e.g. could be used to select one from a list of brokers serving a specific colo/region. "dc:sfo", "loc:nyc,dc:nyc01", "zone:us-west") |
| `cfg.CheckManager.Broker.MaxResponseTime` | "500ms" | Maximum amount time to wait for a broker connection test to be considered valid. (if latency is > the broker will be considered invalid and not available for selection.) |
| `cfg.CheckManager.Broker.TLSConfig` | nil | Custom tls.Config to use when communicating with Circonus Broker |
## Notes:
* All options are *strings* with the following exceptions:
* `cfg.Log` - an instance of [`log.Logger`](https://golang.org/pkg/log/#Logger) or something else (e.g. [logrus](https://github.com/Sirupsen/logrus)) which can be used to satisfy the interface requirements.
* `cfg.Debug` - a boolean true|false.
* At a minimum, one of either `API.TokenKey` or `Check.SubmissionURL` is **required** for cgm to function.
* Check management can be disabled by providing a `Check.SubmissionURL` without an `API.TokenKey`. Note: the supplied URL needs to be http or the broker needs to be running with a cert which can be verified. Otherwise, the `API.TokenKey` will be required to retrieve the correct CA certificate to validate the broker's cert for the SSL connection.
* A note on `Check.InstanceID`, the instance id is used to consistently identify a check. The display name can be changed in the UI. The hostname may be ephemeral. For metric continuity, the instance id is used to locate existing checks. Since the check.target is never actually used by an httptrap check it is more decorative than functional, a valid FQDN is not required for an httptrap check.target. But, using instance id as the target can pollute the Host list in the UI with host:application specific entries.
* Check identification precedence
1. Check SubmissionURL
2. Check ID
3. Search
1. Search for an active httptrap check for TargetHost which has the SearchTag
2. Search for an active httptrap check which has the SearchTag and the InstanceID in the notes field
3. Create a new check
* Broker selection
1. If Broker.ID or Broker.SelectTag are not specified, a broker will be selected randomly from the list of brokers available to the API token. Enterprise brokers take precedence. A viable broker is "active", has the "httptrap" module enabled, and responds within Broker.MaxResponseTime.

View File

@ -1,189 +1,186 @@
# Circonus metrics tracking for Go applications
This library supports named counters, gauges and histograms.
It also provides convenience wrappers for registering latency
instrumented functions with Go's builtin http server.
This library supports named counters, gauges and histograms. It also provides convenience wrappers for registering latency instrumented functions with Go's builtin http server.
Initializing only requires setting an ApiToken.
Initializing only requires setting an [API Token](https://login.circonus.com/user/tokens) at a minimum.
## Options
See [OPTIONS.md](OPTIONS.md) for information on all of the available cgm options.
## Example
**rough and simple**
### Bare bones minimum
A working cut-n-past example. Simply set the required environment variable `CIRCONUS_API_TOKEN` and run.
```go
package main
import (
"log"
"math/rand"
"os"
"time"
"log"
"math/rand"
"os"
"os/signal"
"syscall"
"time"
cgm "github.com/circonus-labs/circonus-gometrics"
cgm "github.com/circonus-labs/circonus-gometrics"
)
func main() {
logger := log.New(os.Stdout, "", log.LstdFlags)
logger.Println("Configuring cgm")
logger.Println("Configuring cgm")
cmc := &cgm.Config{}
cmc := &cgm.Config{}
cmc.Debug = false // set to true for debug messages
cmc.Log = logger
// Interval at which metrics are submitted to Circonus, default: 10 seconds
// cmc.Interval = "10s" // 10 seconds
// Circonus API Token key (https://login.circonus.com/user/tokens)
cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN")
// Enable debug messages, default: false
cmc.Debug = true
logger.Println("Creating new cgm instance")
// Send debug messages to specific log.Logger instance
// default: if debug stderr, else, discard
cmc.Log = logger
metrics, err := cgm.NewCirconusMetrics(cmc)
if err != nil {
logger.Println(err)
os.Exit(1)
}
// Reset counter metrics after each submission, default: "true"
// Change to "false" to retain (and continue submitting) the last value.
// cmc.ResetCounters = "true"
// Reset gauge metrics after each submission, default: "true"
// Change to "false" to retain (and continue submitting) the last value.
// cmc.ResetGauges = "true"
// Reset histogram metrics after each submission, default: "true"
// Change to "false" to retain (and continue submitting) the last value.
// cmc.ResetHistograms = "true"
// Reset text metrics after each submission, default: "true"
// Change to "false" to retain (and continue submitting) the last value.
// cmc.ResetText = "true"
// Circonus API configuration options
//
// Token, no default (blank disables check manager)
cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN")
// App name, default: circonus-gometrics
cmc.CheckManager.API.TokenApp = os.Getenv("CIRCONUS_API_APP")
// URL, default: https://api.circonus.com/v2
cmc.CheckManager.API.URL = os.Getenv("CIRCONUS_API_URL")
// Check configuration options
//
// precedence 1 - explicit submission_url
// precedence 2 - specific check id (note: not a check bundle id)
// precedence 3 - search using instanceId and searchTag
// otherwise: if an applicable check is NOT specified or found, an
// attempt will be made to automatically create one
//
// Submission URL for an existing [httptrap] check
cmc.CheckManager.Check.SubmissionURL = os.Getenv("CIRCONUS_SUBMISION_URL")
// ID of an existing [httptrap] check (note: check id not check bundle id)
cmc.CheckManager.Check.ID = os.Getenv("CIRCONUS_CHECK_ID")
// if neither a submission url nor check id are provided, an attempt will be made to find an existing
// httptrap check by using the circonus api to search for a check matching the following criteria:
// an active check,
// of type httptrap,
// where the target/host is equal to InstanceId - see below
// and the check has a tag equal to SearchTag - see below
// Instance ID - an identifier for the 'group of metrics emitted by this process or service'
// this is used as the value for check.target (aka host)
// default: 'hostname':'program name'
// note: for a persistent instance that is ephemeral or transient where metric continuity is
// desired set this explicitly so that the current hostname will not be used.
// cmc.CheckManager.Check.InstanceID = ""
// Search tag - specific tag(s) used in conjunction with isntanceId to search for an
// existing check. comma separated string of tags (spaces will be removed, no commas
// in tag elements).
// default: service:application name (e.g. service:consul service:nomad etc.)
// cmc.CheckManager.Check.SearchTag = ""
// Check secret, default: generated when a check needs to be created
// cmc.CheckManager.Check.Secret = ""
// Additional tag(s) to add when *creating* a check. comma separated string
// of tags (spaces will be removed, no commas in tag elements).
// (e.g. group:abc or service_role:agent,group:xyz).
// default: none
// cmc.CheckManager.Check.Tags = ""
// max amount of time to to hold on to a submission url
// when a given submission fails (due to retries) if the
// time the url was last updated is > than this, the trap
// url will be refreshed (e.g. if the broker is changed
// in the UI) default 5 minutes
// cmc.CheckManager.Check.MaxURLAge = "5m"
// custom display name for check, default: "InstanceId /cgm"
// cmc.CheckManager.Check.DisplayName = ""
// force metric activation - if a metric has been disabled via the UI
// the default behavior is to *not* re-activate the metric; this setting
// overrides the behavior and will re-activate the metric when it is
// encountered. "(true|false)", default "false"
// cmc.CheckManager.Check.ForceMetricActivation = "false"
// Broker configuration options
//
// Broker ID of specific broker to use, default: random enterprise broker or
// Circonus default if no enterprise brokers are available.
// default: only used if set
// cmc.CheckManager.Broker.ID = ""
// used to select a broker with the same tag(s) (e.g. can be used to dictate that a broker
// serving a specific location should be used. "dc:sfo", "loc:nyc,dc:nyc01", "zone:us-west")
// if more than one broker has the tag(s), one will be selected randomly from the resulting
// list. comma separated string of tags (spaces will be removed, no commas in tag elements).
// default: none
// cmc.CheckManager.Broker.SelectTag = ""
// longest time to wait for a broker connection (if latency is > the broker will
// be considered invalid and not available for selection.), default: 500 milliseconds
// cmc.CheckManager.Broker.MaxResponseTime = "500ms"
// note: if broker Id or SelectTag are not specified, a broker will be selected randomly
// from the list of brokers available to the api token. enterprise brokers take precedence
// viable brokers are "active", have the "httptrap" module enabled, are reachable and respond
// within MaxResponseTime.
logger.Println("Creating new cgm instance")
metrics, err := cgm.NewCirconusMetrics(cmc)
if err != nil {
panic(err)
}
src := rand.NewSource(time.Now().UnixNano())
rnd := rand.New(src)
logger.Println("Starting cgm internal auto-flush timer")
metrics.Start()
src := rand.NewSource(time.Now().UnixNano())
rnd := rand.New(src)
logger.Println("Adding ctrl-c trap")
c := make(chan os.Signal, 2)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
<-c
logger.Println("Received CTRL-C, flushing outstanding metrics before exit")
metrics.Flush()
os.Exit(0)
}()
c := make(chan os.Signal, 2)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
<-c
logger.Println("Received CTRL-C, flushing outstanding metrics before exit")
metrics.Flush()
os.Exit(0)
}()
logger.Println("Starting to send metrics")
// number of "sets" of metrics to send
max := 60
for i := 1; i < max; i++ {
logger.Printf("\tmetric set %d of %d", i, 60)
metrics.Timing("foo", rnd.Float64()*10)
metrics.Increment("bar")
metrics.Gauge("baz", 10)
time.Sleep(time.Second)
}
metrics.SetText("fini", "complete")
logger.Println("Flushing any outstanding metrics manually")
metrics.Flush()
}
```
### A more complete example
A working, cut-n-paste example with all options available for modification. Also, demonstrates metric tagging.
```go
package main
import (
"log"
"math/rand"
"os"
"os/signal"
"syscall"
"time"
cgm "github.com/circonus-labs/circonus-gometrics"
)
func main() {
logger := log.New(os.Stdout, "", log.LstdFlags)
logger.Println("Configuring cgm")
cmc := &cgm.Config{}
// General
cmc.Interval = "10s"
cmc.Log = logger
cmc.Debug = false
cmc.ResetCounters = "true"
cmc.ResetGauges = "true"
cmc.ResetHistograms = "true"
cmc.ResetText = "true"
// Circonus API configuration options
cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN")
cmc.CheckManager.API.TokenApp = os.Getenv("CIRCONUS_API_APP")
cmc.CheckManager.API.URL = os.Getenv("CIRCONUS_API_URL")
cmc.CheckManager.API.TLSConfig = nil
// Check configuration options
cmc.CheckManager.Check.SubmissionURL = os.Getenv("CIRCONUS_SUBMISSION_URL")
cmc.CheckManager.Check.ID = os.Getenv("CIRCONUS_CHECK_ID")
cmc.CheckManager.Check.InstanceID = ""
cmc.CheckManager.Check.DisplayName = ""
cmc.CheckManager.Check.TargetHost = ""
// if hn, err := os.Hostname(); err == nil {
// cmc.CheckManager.Check.TargetHost = hn
// }
cmc.CheckManager.Check.SearchTag = ""
cmc.CheckManager.Check.Secret = ""
cmc.CheckManager.Check.Tags = ""
cmc.CheckManager.Check.MaxURLAge = "5m"
cmc.CheckManager.Check.ForceMetricActivation = "false"
// Broker configuration options
cmc.CheckManager.Broker.ID = ""
cmc.CheckManager.Broker.SelectTag = ""
cmc.CheckManager.Broker.MaxResponseTime = "500ms"
cmc.CheckManager.Broker.TLSConfig = nil
logger.Println("Creating new cgm instance")
metrics, err := cgm.NewCirconusMetrics(cmc)
if err != nil {
logger.Println(err)
os.Exit(1)
}
src := rand.NewSource(time.Now().UnixNano())
rnd := rand.New(src)
logger.Println("Adding ctrl-c trap")
c := make(chan os.Signal, 2)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
<-c
logger.Println("Received CTRL-C, flushing outstanding metrics before exit")
metrics.Flush()
os.Exit(0)
}()
// Add metric tags (append to any existing tags on specified metric)
metrics.AddMetricTags("foo", []string{"cgm:test"})
metrics.AddMetricTags("baz", []string{"cgm:test"})
logger.Println("Starting to send metrics")
logger.Println("Starting to send metrics")
// number of "sets" of metrics to send
max := 60
// number of "sets" of metrics to send
max := 60
for i := 1; i < max; i++ {
logger.Printf("\tmetric set %d of %d", i, 60)
for i := 1; i < max; i++ {
logger.Printf("\tmetric set %d of %d", i, 60)
metrics.Timing("foo", rnd.Float64()*10)
metrics.Increment("bar")
metrics.Gauge("baz", 10)
metrics.Timing("foo", rnd.Float64()*10)
metrics.Increment("bar")
metrics.Gauge("baz", 10)
if i == 35 {
// Set metric tags (overwrite current tags on specified metric)
@ -191,23 +188,23 @@ func main() {
}
time.Sleep(time.Second)
}
}
logger.Println("Flushing any outstanding metrics manually")
metrics.Flush()
logger.Println("Flushing any outstanding metrics manually")
metrics.Flush()
}
```
### HTTP Handler wrapping
```
```go
http.HandleFunc("/", metrics.TrackHTTPLatency("/", handler_func))
```
### HTTP latency example
```
```go
package main
import (
@ -225,7 +222,6 @@ func main() {
if err != nil {
panic(err)
}
metrics.Start()
http.HandleFunc("/", metrics.TrackHTTPLatency("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello, %s!", r.URL.Path[1:])
@ -235,4 +231,4 @@ func main() {
```
Unless otherwise noted, the source files are distributed under the BSD-style license found in the LICENSE file.
Unless otherwise noted, the source files are distributed under the BSD-style license found in the [LICENSE](LICENSE) file.

View File

@ -0,0 +1,163 @@
## Circonus API package
Full api documentation (for using *this* package) is available at [godoc.org](https://godoc.org/github.com/circonus-labs/circonus-gometrics/api). Links in the lists below go directly to the generic Circonus API documentation for the endpoint.
### Straight [raw] API access
* Get
* Post (for creates)
* Put (for updates)
* Delete
### Helpers for currently supported API endpoints
> Note, these interfaces are still being actively developed. For example, many of the `New*` methods only return an empty struct; sensible defaults will be added going forward. Other, common helper methods for the various endpoints may be added as use cases emerge. The organization
of the API may change if common use contexts would benefit significantly.
* [Account](https://login.circonus.com/resources/api/calls/account)
* FetchAccount
* FetchAccounts
* UpdateAccount
* SearchAccounts
* [Acknowledgement](https://login.circonus.com/resources/api/calls/acknowledgement)
* NewAcknowledgement
* FetchAcknowledgement
* FetchAcknowledgements
* UpdateAcknowledgement
* CreateAcknowledgement
* DeleteAcknowledgement
* DeleteAcknowledgementByCID
* SearchAcknowledgements
* [Alert](https://login.circonus.com/resources/api/calls/alert)
* FetchAlert
* FetchAlerts
* SearchAlerts
* [Annotation](https://login.circonus.com/resources/api/calls/annotation)
* NewAnnotation
* FetchAnnotation
* FetchAnnotations
* UpdateAnnotation
* CreateAnnotation
* DeleteAnnotation
* DeleteAnnotationByCID
* SearchAnnotations
* [Broker](https://login.circonus.com/resources/api/calls/broker)
* FetchBroker
* FetchBrokers
* SearchBrokers
* [Check Bundle](https://login.circonus.com/resources/api/calls/check_bundle)
* NewCheckBundle
* FetchCheckBundle
* FetchCheckBundles
* UpdateCheckBundle
* CreateCheckBundle
* DeleteCheckBundle
* DeleteCheckBundleByCID
* SearchCheckBundles
* [Check Bundle Metrics](https://login.circonus.com/resources/api/calls/check_bundle_metrics)
* FetchCheckBundleMetrics
* UpdateCheckBundleMetrics
* [Check](https://login.circonus.com/resources/api/calls/check)
* FetchCheck
* FetchChecks
* SearchChecks
* [Contact Group](https://login.circonus.com/resources/api/calls/contact_group)
* NewContactGroup
* FetchContactGroup
* FetchContactGroups
* UpdateContactGroup
* CreateContactGroup
* DeleteContactGroup
* DeleteContactGroupByCID
* SearchContactGroups
* [Dashboard](https://login.circonus.com/resources/api/calls/dashboard) -- note, this is a work in progress, the methods/types may still change
* NewDashboard
* FetchDashboard
* FetchDashboards
* UpdateDashboard
* CreateDashboard
* DeleteDashboard
* DeleteDashboardByCID
* SearchDashboards
* [Graph](https://login.circonus.com/resources/api/calls/graph)
* NewGraph
* FetchGraph
* FetchGraphs
* UpdateGraph
* CreateGraph
* DeleteGraph
* DeleteGraphByCID
* SearchGraphs
* [Metric Cluster](https://login.circonus.com/resources/api/calls/metric_cluster)
* NewMetricCluster
* FetchMetricCluster
* FetchMetricClusters
* UpdateMetricCluster
* CreateMetricCluster
* DeleteMetricCluster
* DeleteMetricClusterByCID
* SearchMetricClusters
* [Metric](https://login.circonus.com/resources/api/calls/metric)
* FetchMetric
* FetchMetrics
* UpdateMetric
* SearchMetrics
* [Maintenance window](https://login.circonus.com/resources/api/calls/maintenance)
* NewMaintenanceWindow
* FetchMaintenanceWindow
* FetchMaintenanceWindows
* UpdateMaintenanceWindow
* CreateMaintenanceWindow
* DeleteMaintenanceWindow
* DeleteMaintenanceWindowByCID
* SearchMaintenanceWindows
* [Outlier Report](https://login.circonus.com/resources/api/calls/outlier_report)
* NewOutlierReport
* FetchOutlierReport
* FetchOutlierReports
* UpdateOutlierReport
* CreateOutlierReport
* DeleteOutlierReport
* DeleteOutlierReportByCID
* SearchOutlierReports
* [Provision Broker](https://login.circonus.com/resources/api/calls/provision_broker)
* NewProvisionBroker
* FetchProvisionBroker
* UpdateProvisionBroker
* CreateProvisionBroker
* [Rule Set](https://login.circonus.com/resources/api/calls/rule_set)
* NewRuleset
* FetchRuleset
* FetchRulesets
* UpdateRuleset
* CreateRuleset
* DeleteRuleset
* DeleteRulesetByCID
* SearchRulesets
* [Rule Set Group](https://login.circonus.com/resources/api/calls/rule_set_group)
* NewRulesetGroup
* FetchRulesetGroup
* FetchRulesetGroups
* UpdateRulesetGroup
* CreateRulesetGroup
* DeleteRulesetGroup
* DeleteRulesetGroupByCID
* SearchRulesetGroups
* [User](https://login.circonus.com/resources/api/calls/user)
* FetchUser
* FetchUsers
* UpdateUser
* SearchUsers
* [Worksheet](https://login.circonus.com/resources/api/calls/worksheet)
* NewWorksheet
* FetchWorksheet
* FetchWorksheets
* UpdateWorksheet
* CreateWorksheet
* DeleteWorksheet
* DeleteWorksheetByCID
* SearchWorksheets
---
Unless otherwise noted, the source files are distributed under the BSD-style license found in the LICENSE file.

View File

@ -0,0 +1,181 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Account API support - Fetch and Update
// See: https://login.circonus.com/resources/api/calls/account
// Note: Create and Delete are not supported for Accounts via the API
package api
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// AccountLimit defines a usage limit imposed on account
type AccountLimit struct {
Limit uint `json:"_limit,omitempty"` // uint >=0
Type string `json:"_type,omitempty"` // string
Used uint `json:"_used,omitempty"` // uint >=0
}
// AccountInvite defines outstanding invites
type AccountInvite struct {
Email string `json:"email"` // string
Role string `json:"role"` // string
}
// AccountUser defines current users
type AccountUser struct {
Role string `json:"role"` // string
UserCID string `json:"user"` // string
}
// Account defines an account. See https://login.circonus.com/resources/api/calls/account for more information.
type Account struct {
Address1 *string `json:"address1,omitempty"` // string or null
Address2 *string `json:"address2,omitempty"` // string or null
CCEmail *string `json:"cc_email,omitempty"` // string or null
CID string `json:"_cid,omitempty"` // string
City *string `json:"city,omitempty"` // string or null
ContactGroups []string `json:"_contact_groups,omitempty"` // [] len >= 0
Country string `json:"country_code,omitempty"` // string
Description *string `json:"description,omitempty"` // string or null
Invites []AccountInvite `json:"invites,omitempty"` // [] len >= 0
Name string `json:"name,omitempty"` // string
OwnerCID string `json:"_owner,omitempty"` // string
StateProv *string `json:"state_prov,omitempty"` // string or null
Timezone string `json:"timezone,omitempty"` // string
UIBaseURL string `json:"_ui_base_url,omitempty"` // string
Usage []AccountLimit `json:"_usage,omitempty"` // [] len >= 0
Users []AccountUser `json:"users,omitempty"` // [] len >= 0
}
// FetchAccount retrieves account with passed cid. Pass nil for '/account/current'.
func (a *API) FetchAccount(cid CIDType) (*Account, error) {
var accountCID string
if cid == nil || *cid == "" {
accountCID = config.AccountPrefix + "/current"
} else {
accountCID = string(*cid)
}
matched, err := regexp.MatchString(config.AccountCIDRegex, accountCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid account CID [%s]", accountCID)
}
result, err := a.Get(accountCID)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] account fetch, received JSON: %s", string(result))
}
account := new(Account)
if err := json.Unmarshal(result, account); err != nil {
return nil, err
}
return account, nil
}
// FetchAccounts retrieves all accounts available to the API Token.
func (a *API) FetchAccounts() (*[]Account, error) {
result, err := a.Get(config.AccountPrefix)
if err != nil {
return nil, err
}
var accounts []Account
if err := json.Unmarshal(result, &accounts); err != nil {
return nil, err
}
return &accounts, nil
}
// UpdateAccount updates passed account.
func (a *API) UpdateAccount(cfg *Account) (*Account, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid account config [nil]")
}
accountCID := string(cfg.CID)
matched, err := regexp.MatchString(config.AccountCIDRegex, accountCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid account CID [%s]", accountCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] account update, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(accountCID, jsonCfg)
if err != nil {
return nil, err
}
account := &Account{}
if err := json.Unmarshal(result, account); err != nil {
return nil, err
}
return account, nil
}
// SearchAccounts returns accounts matching a filter (search queries are not
// suppoted by the account endpoint). Pass nil as filter for all accounts the
// API Token can access.
func (a *API) SearchAccounts(filterCriteria *SearchFilterType) (*[]Account, error) {
q := url.Values{}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchAccounts()
}
reqURL := url.URL{
Path: config.AccountPrefix,
RawQuery: q.Encode(),
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var accounts []Account
if err := json.Unmarshal(result, &accounts); err != nil {
return nil, err
}
return &accounts, nil
}

View File

@ -0,0 +1,190 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Acknowledgement API support - Fetch, Create, Update, Delete*, and Search
// See: https://login.circonus.com/resources/api/calls/acknowledgement
// * : delete (cancel) by updating with AcknowledgedUntil set to 0
package api
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// Acknowledgement defines a acknowledgement. See https://login.circonus.com/resources/api/calls/acknowledgement for more information.
type Acknowledgement struct {
AcknowledgedBy string `json:"_acknowledged_by,omitempty"` // string
AcknowledgedOn uint `json:"_acknowledged_on,omitempty"` // uint
AcknowledgedUntil interface{} `json:"acknowledged_until,omitempty"` // NOTE received as uint; can be set using string or uint
Active bool `json:"_active,omitempty"` // bool
AlertCID string `json:"alert,omitempty"` // string
CID string `json:"_cid,omitempty"` // string
LastModified uint `json:"_last_modified,omitempty"` // uint
LastModifiedBy string `json:"_last_modified_by,omitempty"` // string
Notes string `json:"notes,omitempty"` // string
}
// NewAcknowledgement returns new Acknowledgement (with defaults, if applicable).
func NewAcknowledgement() *Acknowledgement {
return &Acknowledgement{}
}
// FetchAcknowledgement retrieves acknowledgement with passed cid.
func (a *API) FetchAcknowledgement(cid CIDType) (*Acknowledgement, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid acknowledgement CID [none]")
}
acknowledgementCID := string(*cid)
matched, err := regexp.MatchString(config.AcknowledgementCIDRegex, acknowledgementCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid acknowledgement CID [%s]", acknowledgementCID)
}
result, err := a.Get(acknowledgementCID)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] acknowledgement fetch, received JSON: %s", string(result))
}
acknowledgement := &Acknowledgement{}
if err := json.Unmarshal(result, acknowledgement); err != nil {
return nil, err
}
return acknowledgement, nil
}
// FetchAcknowledgements retrieves all acknowledgements available to the API Token.
func (a *API) FetchAcknowledgements() (*[]Acknowledgement, error) {
result, err := a.Get(config.AcknowledgementPrefix)
if err != nil {
return nil, err
}
var acknowledgements []Acknowledgement
if err := json.Unmarshal(result, &acknowledgements); err != nil {
return nil, err
}
return &acknowledgements, nil
}
// UpdateAcknowledgement updates passed acknowledgement.
func (a *API) UpdateAcknowledgement(cfg *Acknowledgement) (*Acknowledgement, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid acknowledgement config [nil]")
}
acknowledgementCID := string(cfg.CID)
matched, err := regexp.MatchString(config.AcknowledgementCIDRegex, acknowledgementCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid acknowledgement CID [%s]", acknowledgementCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] acknowledgement update, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(acknowledgementCID, jsonCfg)
if err != nil {
return nil, err
}
acknowledgement := &Acknowledgement{}
if err := json.Unmarshal(result, acknowledgement); err != nil {
return nil, err
}
return acknowledgement, nil
}
// CreateAcknowledgement creates a new acknowledgement.
func (a *API) CreateAcknowledgement(cfg *Acknowledgement) (*Acknowledgement, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid acknowledgement config [nil]")
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
result, err := a.Post(config.AcknowledgementPrefix, jsonCfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] acknowledgement create, sending JSON: %s", string(jsonCfg))
}
acknowledgement := &Acknowledgement{}
if err := json.Unmarshal(result, acknowledgement); err != nil {
return nil, err
}
return acknowledgement, nil
}
// SearchAcknowledgements returns acknowledgements matching
// the specified search query and/or filter. If nil is passed for
// both parameters all acknowledgements will be returned.
func (a *API) SearchAcknowledgements(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Acknowledgement, error) {
q := url.Values{}
if searchCriteria != nil && *searchCriteria != "" {
q.Set("search", string(*searchCriteria))
}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchAcknowledgements()
}
reqURL := url.URL{
Path: config.AcknowledgementPrefix,
RawQuery: q.Encode(),
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var acknowledgements []Acknowledgement
if err := json.Unmarshal(result, &acknowledgements); err != nil {
return nil, err
}
return &acknowledgements, nil
}

View File

@ -0,0 +1,131 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Alert API support - Fetch and Search
// See: https://login.circonus.com/resources/api/calls/alert
package api
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// Alert defines a alert. See https://login.circonus.com/resources/api/calls/alert for more information.
type Alert struct {
AcknowledgementCID *string `json:"_acknowledgement,omitempty"` // string or null
AlertURL string `json:"_alert_url,omitempty"` // string
BrokerCID string `json:"_broker,omitempty"` // string
CheckCID string `json:"_check,omitempty"` // string
CheckName string `json:"_check_name,omitempty"` // string
CID string `json:"_cid,omitempty"` // string
ClearedOn *uint `json:"_cleared_on,omitempty"` // uint or null
ClearedValue *string `json:"_cleared_value,omitempty"` // string or null
Maintenance []string `json:"_maintenance,omitempty"` // [] len >= 0
MetricLinkURL *string `json:"_metric_link,omitempty"` // string or null
MetricName string `json:"_metric_name,omitempty"` // string
MetricNotes *string `json:"_metric_notes,omitempty"` // string or null
OccurredOn uint `json:"_occurred_on,omitempty"` // uint
RuleSetCID string `json:"_rule_set,omitempty"` // string
Severity uint `json:"_severity,omitempty"` // uint
Tags []string `json:"_tags,omitempty"` // [] len >= 0
Value string `json:"_value,omitempty"` // string
}
// NewAlert returns a new alert (with defaults, if applicable)
func NewAlert() *Alert {
return &Alert{}
}
// FetchAlert retrieves alert with passed cid.
func (a *API) FetchAlert(cid CIDType) (*Alert, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid alert CID [none]")
}
alertCID := string(*cid)
matched, err := regexp.MatchString(config.AlertCIDRegex, alertCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid alert CID [%s]", alertCID)
}
result, err := a.Get(alertCID)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch alert, received JSON: %s", string(result))
}
alert := &Alert{}
if err := json.Unmarshal(result, alert); err != nil {
return nil, err
}
return alert, nil
}
// FetchAlerts retrieves all alerts available to the API Token.
func (a *API) FetchAlerts() (*[]Alert, error) {
result, err := a.Get(config.AlertPrefix)
if err != nil {
return nil, err
}
var alerts []Alert
if err := json.Unmarshal(result, &alerts); err != nil {
return nil, err
}
return &alerts, nil
}
// SearchAlerts returns alerts matching the specified search query
// and/or filter. If nil is passed for both parameters all alerts
// will be returned.
func (a *API) SearchAlerts(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Alert, error) {
q := url.Values{}
if searchCriteria != nil && *searchCriteria != "" {
q.Set("search", string(*searchCriteria))
}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchAlerts()
}
reqURL := url.URL{
Path: config.AlertPrefix,
RawQuery: q.Encode(),
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var alerts []Alert
if err := json.Unmarshal(result, &alerts); err != nil {
return nil, err
}
return &alerts, nil
}

View File

@ -0,0 +1,223 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Annotation API support - Fetch, Create, Update, Delete, and Search
// See: https://login.circonus.com/resources/api/calls/annotation
package api
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// Annotation defines a annotation. See https://login.circonus.com/resources/api/calls/annotation for more information.
type Annotation struct {
Category string `json:"category"` // string
CID string `json:"_cid,omitempty"` // string
Created uint `json:"_created,omitempty"` // uint
Description string `json:"description"` // string
LastModified uint `json:"_last_modified,omitempty"` // uint
LastModifiedBy string `json:"_last_modified_by,omitempty"` // string
RelatedMetrics []string `json:"rel_metrics"` // [] len >= 0
Start uint `json:"start"` // uint
Stop uint `json:"stop"` // uint
Title string `json:"title"` // string
}
// NewAnnotation returns a new Annotation (with defaults, if applicable)
func NewAnnotation() *Annotation {
return &Annotation{}
}
// FetchAnnotation retrieves annotation with passed cid.
func (a *API) FetchAnnotation(cid CIDType) (*Annotation, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid annotation CID [none]")
}
annotationCID := string(*cid)
matched, err := regexp.MatchString(config.AnnotationCIDRegex, annotationCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid annotation CID [%s]", annotationCID)
}
result, err := a.Get(annotationCID)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch annotation, received JSON: %s", string(result))
}
annotation := &Annotation{}
if err := json.Unmarshal(result, annotation); err != nil {
return nil, err
}
return annotation, nil
}
// FetchAnnotations retrieves all annotations available to the API Token.
func (a *API) FetchAnnotations() (*[]Annotation, error) {
result, err := a.Get(config.AnnotationPrefix)
if err != nil {
return nil, err
}
var annotations []Annotation
if err := json.Unmarshal(result, &annotations); err != nil {
return nil, err
}
return &annotations, nil
}
// UpdateAnnotation updates passed annotation.
func (a *API) UpdateAnnotation(cfg *Annotation) (*Annotation, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid annotation config [nil]")
}
annotationCID := string(cfg.CID)
matched, err := regexp.MatchString(config.AnnotationCIDRegex, annotationCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid annotation CID [%s]", annotationCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] update annotation, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(annotationCID, jsonCfg)
if err != nil {
return nil, err
}
annotation := &Annotation{}
if err := json.Unmarshal(result, annotation); err != nil {
return nil, err
}
return annotation, nil
}
// CreateAnnotation creates a new annotation.
func (a *API) CreateAnnotation(cfg *Annotation) (*Annotation, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid annotation config [nil]")
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] create annotation, sending JSON: %s", string(jsonCfg))
}
result, err := a.Post(config.AnnotationPrefix, jsonCfg)
if err != nil {
return nil, err
}
annotation := &Annotation{}
if err := json.Unmarshal(result, annotation); err != nil {
return nil, err
}
return annotation, nil
}
// DeleteAnnotation deletes passed annotation.
func (a *API) DeleteAnnotation(cfg *Annotation) (bool, error) {
if cfg == nil {
return false, fmt.Errorf("Invalid annotation config [nil]")
}
return a.DeleteAnnotationByCID(CIDType(&cfg.CID))
}
// DeleteAnnotationByCID deletes annotation with passed cid.
func (a *API) DeleteAnnotationByCID(cid CIDType) (bool, error) {
if cid == nil || *cid == "" {
return false, fmt.Errorf("Invalid annotation CID [none]")
}
annotationCID := string(*cid)
matched, err := regexp.MatchString(config.AnnotationCIDRegex, annotationCID)
if err != nil {
return false, err
}
if !matched {
return false, fmt.Errorf("Invalid annotation CID [%s]", annotationCID)
}
_, err = a.Delete(annotationCID)
if err != nil {
return false, err
}
return true, nil
}
// SearchAnnotations returns annotations matching the specified
// search query and/or filter. If nil is passed for both parameters
// all annotations will be returned.
func (a *API) SearchAnnotations(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Annotation, error) {
q := url.Values{}
if searchCriteria != nil && *searchCriteria != "" {
q.Set("search", string(*searchCriteria))
}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchAnnotations()
}
reqURL := url.URL{
Path: config.AnnotationPrefix,
RawQuery: q.Encode(),
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var annotations []Annotation
if err := json.Unmarshal(result, &annotations); err != nil {
return nil, err
}
return &annotations, nil
}

View File

@ -2,24 +2,41 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package api provides methods for interacting with the Circonus API
package api
import (
"bytes"
"context"
crand "crypto/rand"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"log"
"math"
"math/big"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"strings"
"sync"
"time"
"github.com/hashicorp/go-retryablehttp"
)
func init() {
n, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
rand.Seed(time.Now().UTC().UnixNano())
return
}
rand.Seed(n.Int64())
}
const (
// a few sensible defaults
defaultAPIURL = "https://api.circonus.com/v2"
@ -35,44 +52,76 @@ type TokenKeyType string
// TokenAppType - Circonus API Token app name
type TokenAppType string
// IDType Circonus object id (numeric portion of cid)
type IDType int
// TokenAccountIDType - Circonus API Token account id
type TokenAccountIDType string
// CIDType Circonus object cid
type CIDType string
type CIDType *string
// IDType Circonus object id
type IDType int
// URLType submission url type
type URLType string
// SearchQueryType search query
// SearchQueryType search query (see: https://login.circonus.com/resources/api#searching)
type SearchQueryType string
// SearchFilterType search filter
type SearchFilterType string
// SearchFilterType search filter (see: https://login.circonus.com/resources/api#filtering)
type SearchFilterType map[string][]string
// TagType search/select/custom tag(s) type
type TagType []string
// Config options for Circonus API
type Config struct {
URL string
// URL defines the API URL - default https://api.circonus.com/v2/
URL string
// TokenKey defines the key to use when communicating with the API
TokenKey string
// TokenApp defines the app to use when communicating with the API
TokenApp string
Log *log.Logger
Debug bool
TokenAccountID string
// CACert deprecating, use TLSConfig instead
CACert *x509.CertPool
// TLSConfig defines a custom tls configuration to use when communicating with the API
TLSConfig *tls.Config
Log *log.Logger
Debug bool
}
// API Circonus API
type API struct {
apiURL *url.URL
key TokenKeyType
app TokenAppType
Debug bool
Log *log.Logger
apiURL *url.URL
key TokenKeyType
app TokenAppType
accountID TokenAccountIDType
caCert *x509.CertPool
tlsConfig *tls.Config
Debug bool
Log *log.Logger
useExponentialBackoff bool
useExponentialBackoffmu sync.Mutex
}
// NewAPI returns a new Circonus API
// NewClient returns a new Circonus API (alias for New)
func NewClient(ac *Config) (*API, error) {
return New(ac)
}
// NewAPI returns a new Circonus API (alias for New)
func NewAPI(ac *Config) (*API, error) {
return New(ac)
}
// New returns a new Circonus API
func New(ac *Config) (*API, error) {
if ac == nil {
return nil, errors.New("Invalid API configuration (nil)")
@ -88,6 +137,8 @@ func NewAPI(ac *Config) (*API, error) {
app = defaultAPIApp
}
acctID := TokenAccountIDType(ac.TokenAccountID)
au := string(ac.URL)
if au == "" {
au = defaultAPIURL
@ -97,6 +148,7 @@ func NewAPI(ac *Config) (*API, error) {
au = fmt.Sprintf("https://%s/v2", ac.URL)
}
if last := len(au) - 1; last >= 0 && au[last] == '/' {
// strip off trailing '/'
au = au[:last]
}
apiURL, err := url.Parse(au)
@ -104,7 +156,17 @@ func NewAPI(ac *Config) (*API, error) {
return nil, err
}
a := &API{apiURL, key, app, ac.Debug, ac.Log}
a := &API{
apiURL: apiURL,
key: key,
app: app,
accountID: acctID,
caCert: ac.CACert,
tlsConfig: ac.TLSConfig,
Debug: ac.Debug,
Log: ac.Log,
useExponentialBackoff: false,
}
a.Debug = ac.Debug
a.Log = ac.Log
@ -118,51 +180,111 @@ func NewAPI(ac *Config) (*API, error) {
return a, nil
}
// EnableExponentialBackoff enables use of exponential backoff for next API call(s)
// and use exponential backoff for all API calls until exponential backoff is disabled.
func (a *API) EnableExponentialBackoff() {
a.useExponentialBackoffmu.Lock()
a.useExponentialBackoff = true
a.useExponentialBackoffmu.Unlock()
}
// DisableExponentialBackoff disables use of exponential backoff. If a request using
// exponential backoff is currently running, it will stop using exponential backoff
// on its next iteration (if needed).
func (a *API) DisableExponentialBackoff() {
a.useExponentialBackoffmu.Lock()
a.useExponentialBackoff = false
a.useExponentialBackoffmu.Unlock()
}
// Get API request
func (a *API) Get(reqPath string) ([]byte, error) {
return a.apiCall("GET", reqPath, nil)
return a.apiRequest("GET", reqPath, nil)
}
// Delete API request
func (a *API) Delete(reqPath string) ([]byte, error) {
return a.apiCall("DELETE", reqPath, nil)
return a.apiRequest("DELETE", reqPath, nil)
}
// Post API request
func (a *API) Post(reqPath string, data []byte) ([]byte, error) {
return a.apiCall("POST", reqPath, data)
return a.apiRequest("POST", reqPath, data)
}
// Put API request
func (a *API) Put(reqPath string, data []byte) ([]byte, error) {
return a.apiCall("PUT", reqPath, data)
return a.apiRequest("PUT", reqPath, data)
}
func backoff(interval uint) float64 {
return math.Floor(((float64(interval) * (1 + rand.Float64())) / 2) + .5)
}
// apiRequest manages retry strategy for exponential backoffs
func (a *API) apiRequest(reqMethod string, reqPath string, data []byte) ([]byte, error) {
backoffs := []uint{2, 4, 8, 16, 32}
attempts := 0
success := false
var result []byte
var err error
for !success {
result, err = a.apiCall(reqMethod, reqPath, data)
if err == nil {
success = true
}
// break and return error if not using exponential backoff
if err != nil {
if !a.useExponentialBackoff {
break
}
if strings.Contains(err.Error(), "code 403") {
break
}
}
if !success {
var wait float64
if attempts >= len(backoffs) {
wait = backoff(backoffs[len(backoffs)-1])
} else {
wait = backoff(backoffs[attempts])
}
attempts++
a.Log.Printf("[WARN] API call failed %s, retrying in %d seconds.\n", err.Error(), uint(wait))
time.Sleep(time.Duration(wait) * time.Second)
}
}
return result, err
}
// apiCall call Circonus API
func (a *API) apiCall(reqMethod string, reqPath string, data []byte) ([]byte, error) {
dataReader := bytes.NewReader(data)
reqURL := a.apiURL.String()
if reqPath == "" {
return nil, errors.New("Invalid URL path")
}
if reqPath[:1] != "/" {
reqURL += "/"
}
if reqPath[:3] == "/v2" {
reqURL += reqPath[3:len(reqPath)]
if len(reqPath) >= 3 && reqPath[:3] == "/v2" {
reqURL += reqPath[3:]
} else {
reqURL += reqPath
}
req, err := retryablehttp.NewRequest(reqMethod, reqURL, dataReader)
if err != nil {
return nil, fmt.Errorf("[ERROR] creating API request: %s %+v", reqURL, err)
}
req.Header.Add("Accept", "application/json")
req.Header.Add("X-Circonus-Auth-Token", string(a.key))
req.Header.Add("X-Circonus-App-Name", string(a.app))
// keep last HTTP error in the event of retry failure
var lastHTTPError error
retryPolicy := func(resp *http.Response, err error) (bool, error) {
retryPolicy := func(ctx context.Context, resp *http.Response, err error) (bool, error) {
if ctxErr := ctx.Err(); ctxErr != nil {
return false, ctxErr
}
if err != nil {
lastHTTPError = err
return true, err
@ -172,24 +294,83 @@ func (a *API) apiCall(reqMethod string, reqPath string, data []byte) ([]byte, er
// errors and may relate to outages on the server side. This will catch
// invalid response codes as well, like 0 and 999.
// Retry on 429 (rate limit) as well.
if resp.StatusCode == 0 || resp.StatusCode >= 500 || resp.StatusCode == 429 {
if resp.StatusCode == 0 || // wtf?!
resp.StatusCode >= 500 || // rutroh
resp.StatusCode == 429 { // rate limit
body, readErr := ioutil.ReadAll(resp.Body)
if readErr != nil {
lastHTTPError = fmt.Errorf("- last HTTP error: %d %+v", resp.StatusCode, readErr)
lastHTTPError = fmt.Errorf("- response: %d %s", resp.StatusCode, readErr.Error())
} else {
lastHTTPError = fmt.Errorf("- last HTTP error: %d %s", resp.StatusCode, string(body))
lastHTTPError = fmt.Errorf("- response: %d %s", resp.StatusCode, strings.TrimSpace(string(body)))
}
return true, nil
}
return false, nil
}
dataReader := bytes.NewReader(data)
req, err := retryablehttp.NewRequest(reqMethod, reqURL, dataReader)
if err != nil {
return nil, fmt.Errorf("[ERROR] creating API request: %s %+v", reqURL, err)
}
req.Header.Add("Accept", "application/json")
req.Header.Add("X-Circonus-Auth-Token", string(a.key))
req.Header.Add("X-Circonus-App-Name", string(a.app))
if string(a.accountID) != "" {
req.Header.Add("X-Circonus-Account-ID", string(a.accountID))
}
client := retryablehttp.NewClient()
client.RetryWaitMin = minRetryWait
client.RetryWaitMax = maxRetryWait
client.RetryMax = maxRetries
if a.apiURL.Scheme == "https" {
var tlscfg *tls.Config
if a.tlsConfig != nil { // preference full custom tls config
tlscfg = a.tlsConfig
} else if a.caCert != nil {
tlscfg = &tls.Config{RootCAs: a.caCert}
}
client.HTTPClient.Transport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: tlscfg,
DisableKeepAlives: true,
MaxIdleConnsPerHost: -1,
DisableCompression: true,
}
} else {
client.HTTPClient.Transport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
DisableKeepAlives: true,
MaxIdleConnsPerHost: -1,
DisableCompression: true,
}
}
a.useExponentialBackoffmu.Lock()
eb := a.useExponentialBackoff
a.useExponentialBackoffmu.Unlock()
if eb {
// limit to one request if using exponential backoff
client.RetryWaitMin = 1
client.RetryWaitMax = 2
client.RetryMax = 0
} else {
client.RetryWaitMin = minRetryWait
client.RetryWaitMax = maxRetryWait
client.RetryMax = maxRetries
}
// retryablehttp only groks log or no log
// but, outputs everything as [DEBUG] messages
if a.Debug {
client.Logger = a.Log
} else {

View File

@ -2,51 +2,70 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Broker API support - Fetch and Search
// See: https://login.circonus.com/resources/api/calls/broker
package api
import (
"encoding/json"
"fmt"
"strings"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// BrokerDetail instance attributes
// BrokerDetail defines instance attributes
type BrokerDetail struct {
CN string `json:"cn"`
ExternalHost string `json:"external_host"`
ExternalPort int `json:"external_port"`
IP string `json:"ipaddress"`
MinVer int `json:"minimum_version_required"`
Modules []string `json:"modules"`
Port int `json:"port"`
Skew string `json:"skew"`
Status string `json:"status"`
Version int `json:"version"`
ClusterIP *string `json:"cluster_ip"` // string or null
CN string `json:"cn"` // string
ExternalHost *string `json:"external_host"` // string or null
ExternalPort uint16 `json:"external_port"` // uint16
IP *string `json:"ipaddress"` // string or null
MinVer uint `json:"minimum_version_required"` // uint
Modules []string `json:"modules"` // [] len >= 0
Port *uint16 `json:"port"` // uint16 or null
Skew *string `json:"skew"` // BUG doc: floating point number, api object: string or null
Status string `json:"status"` // string
Version *uint `json:"version"` // uint or null
}
// Broker definition
// Broker defines a broker. See https://login.circonus.com/resources/api/calls/broker for more information.
type Broker struct {
Cid string `json:"_cid"`
Details []BrokerDetail `json:"_details"`
Latitude string `json:"_latitude"`
Longitude string `json:"_longitude"`
Name string `json:"_name"`
Tags []string `json:"_tags"`
Type string `json:"_type"`
CID string `json:"_cid"` // string
Details []BrokerDetail `json:"_details"` // [] len >= 1
Latitude *string `json:"_latitude"` // string or null
Longitude *string `json:"_longitude"` // string or null
Name string `json:"_name"` // string
Tags []string `json:"_tags"` // [] len >= 0
Type string `json:"_type"` // string
}
// FetchBrokerByID fetch a broker configuration by [group]id
func (a *API) FetchBrokerByID(id IDType) (*Broker, error) {
cid := CIDType(fmt.Sprintf("/broker/%d", id))
return a.FetchBrokerByCID(cid)
}
// FetchBroker retrieves broker with passed cid.
func (a *API) FetchBroker(cid CIDType) (*Broker, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid broker CID [none]")
}
// FetchBrokerByCID fetch a broker configuration by cid
func (a *API) FetchBrokerByCID(cid CIDType) (*Broker, error) {
result, err := a.Get(string(cid))
brokerCID := string(*cid)
matched, err := regexp.MatchString(config.BrokerCIDRegex, brokerCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid broker CID [%s]", brokerCID)
}
result, err := a.Get(brokerCID)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch broker, received JSON: %s", string(result))
}
response := new(Broker)
if err := json.Unmarshal(result, &response); err != nil {
@ -57,32 +76,9 @@ func (a *API) FetchBrokerByCID(cid CIDType) (*Broker, error) {
}
// FetchBrokerListByTag return list of brokers with a specific tag
func (a *API) FetchBrokerListByTag(searchTag TagType) ([]Broker, error) {
query := SearchQueryType(fmt.Sprintf("f__tags_has=%s", strings.Replace(strings.Join(searchTag, ","), ",", "&f__tags_has=", -1)))
return a.BrokerSearch(query)
}
// BrokerSearch return a list of brokers matching a query/filter
func (a *API) BrokerSearch(query SearchQueryType) ([]Broker, error) {
queryURL := fmt.Sprintf("/broker?%s", string(query))
result, err := a.Get(queryURL)
if err != nil {
return nil, err
}
var brokers []Broker
if err := json.Unmarshal(result, &brokers); err != nil {
return nil, err
}
return brokers, nil
}
// FetchBrokerList return list of all brokers available to the api token/app
func (a *API) FetchBrokerList() ([]Broker, error) {
result, err := a.Get("/broker")
// FetchBrokers returns all brokers available to the API Token.
func (a *API) FetchBrokers() (*[]Broker, error) {
result, err := a.Get(config.BrokerPrefix)
if err != nil {
return nil, err
}
@ -92,5 +88,45 @@ func (a *API) FetchBrokerList() ([]Broker, error) {
return nil, err
}
return response, nil
return &response, nil
}
// SearchBrokers returns brokers matching the specified search
// query and/or filter. If nil is passed for both parameters
// all brokers will be returned.
func (a *API) SearchBrokers(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Broker, error) {
q := url.Values{}
if searchCriteria != nil && *searchCriteria != "" {
q.Set("search", string(*searchCriteria))
}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchBrokers()
}
reqURL := url.URL{
Path: config.BrokerPrefix,
RawQuery: q.Encode(),
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var brokers []Broker
if err := json.Unmarshal(result, &brokers); err != nil {
return nil, err
}
return &brokers, nil
}

View File

@ -2,42 +2,58 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Check API support - Fetch and Search
// See: https://login.circonus.com/resources/api/calls/check
// Notes: checks do not directly support create, update, and delete - see check bundle.
package api
import (
"encoding/json"
"fmt"
"net/url"
"strings"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// CheckDetails is an arbitrary json structure, we would only care about submission_url
type CheckDetails struct {
SubmissionURL string `json:"submission_url"`
}
// CheckDetails contains [undocumented] check type specific information
type CheckDetails map[config.Key]string
// Check definition
// Check defines a check. See https://login.circonus.com/resources/api/calls/check for more information.
type Check struct {
Cid string `json:"_cid"`
Active bool `json:"_active"`
BrokerCid string `json:"_broker"`
CheckBundleCid string `json:"_check_bundle"`
CheckUUID string `json:"_check_uuid"`
Details CheckDetails `json:"_details"`
Active bool `json:"_active"` // bool
BrokerCID string `json:"_broker"` // string
CheckBundleCID string `json:"_check_bundle"` // string
CheckUUID string `json:"_check_uuid"` // string
CID string `json:"_cid"` // string
Details CheckDetails `json:"_details"` // NOTE contents of details are check type specific, map len >= 0
}
// FetchCheckByID fetch a check configuration by id
func (a *API) FetchCheckByID(id IDType) (*Check, error) {
cid := CIDType(fmt.Sprintf("/check/%d", int(id)))
return a.FetchCheckByCID(cid)
}
// FetchCheck retrieves check with passed cid.
func (a *API) FetchCheck(cid CIDType) (*Check, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid check CID [none]")
}
// FetchCheckByCID fetch a check configuration by cid
func (a *API) FetchCheckByCID(cid CIDType) (*Check, error) {
result, err := a.Get(string(cid))
checkCID := string(*cid)
matched, err := regexp.MatchString(config.CheckCIDRegex, checkCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid check CID [%s]", checkCID)
}
result, err := a.Get(checkCID)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch check, received JSON: %s", string(result))
}
check := new(Check)
if err := json.Unmarshal(result, check); err != nil {
@ -47,62 +63,49 @@ func (a *API) FetchCheckByCID(cid CIDType) (*Check, error) {
return check, nil
}
// FetchCheckBySubmissionURL fetch a check configuration by submission_url
func (a *API) FetchCheckBySubmissionURL(submissionURL URLType) (*Check, error) {
u, err := url.Parse(string(submissionURL))
// FetchChecks retrieves all checks available to the API Token.
func (a *API) FetchChecks() (*[]Check, error) {
result, err := a.Get(config.CheckPrefix)
if err != nil {
return nil, err
}
// valid trap url: scheme://host[:port]/module/httptrap/UUID/secret
// does it smell like a valid trap url path
if !strings.Contains(u.Path, "/module/httptrap/") {
return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', unrecognized path", submissionURL)
}
// extract uuid
pathParts := strings.Split(strings.Replace(u.Path, "/module/httptrap/", "", 1), "/")
if len(pathParts) != 2 {
return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', UUID not where expected", submissionURL)
}
uuid := pathParts[0]
filter := SearchFilterType(fmt.Sprintf("f__check_uuid=%s", uuid))
checks, err := a.CheckFilterSearch(filter)
if err != nil {
var checks []Check
if err := json.Unmarshal(result, &checks); err != nil {
return nil, err
}
if len(checks) == 0 {
return nil, fmt.Errorf("[ERROR] No checks found with UUID %s", uuid)
return &checks, nil
}
// SearchChecks returns checks matching the specified search query
// and/or filter. If nil is passed for both parameters all checks
// will be returned.
func (a *API) SearchChecks(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Check, error) {
q := url.Values{}
if searchCriteria != nil && *searchCriteria != "" {
q.Set("search", string(*searchCriteria))
}
numActive := 0
checkID := -1
for idx, check := range checks {
if check.Active {
numActive++
checkID = idx
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if numActive > 1 {
return nil, fmt.Errorf("[ERROR] Multiple checks with same UUID %s", uuid)
if q.Encode() == "" {
return a.FetchChecks()
}
return &checks[checkID], nil
reqURL := url.URL{
Path: config.CheckPrefix,
RawQuery: q.Encode(),
}
}
// CheckSearch returns a list of checks matching a search query
func (a *API) CheckSearch(query SearchQueryType) ([]Check, error) {
queryURL := fmt.Sprintf("/check?search=%s", string(query))
result, err := a.Get(queryURL)
result, err := a.Get(reqURL.String())
if err != nil {
return nil, err
}
@ -112,22 +115,5 @@ func (a *API) CheckSearch(query SearchQueryType) ([]Check, error) {
return nil, err
}
return checks, nil
}
// CheckFilterSearch returns a list of checks matching a filter
func (a *API) CheckFilterSearch(filter SearchFilterType) ([]Check, error) {
filterURL := fmt.Sprintf("/check?%s", string(filter))
result, err := a.Get(filterURL)
if err != nil {
return nil, err
}
var checks []Check
if err := json.Unmarshal(result, &checks); err != nil {
return nil, err
}
return checks, nil
return &checks, nil
}

View File

@ -0,0 +1,255 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Check bundle API support - Fetch, Create, Update, Delete, and Search
// See: https://login.circonus.com/resources/api/calls/check_bundle
package api
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// CheckBundleMetric individual metric configuration
type CheckBundleMetric struct {
Name string `json:"name"` // string
Result *string `json:"result,omitempty"` // string or null, NOTE not settable - return/information value only
Status string `json:"status,omitempty"` // string
Tags []string `json:"tags"` // [] len >= 0
Type string `json:"type"` // string
Units *string `json:"units,omitempty"` // string or null
}
// CheckBundleConfig contains the check type specific configuration settings
// as k/v pairs (see https://login.circonus.com/resources/api/calls/check_bundle
// for the specific settings available for each distinct check type)
type CheckBundleConfig map[config.Key]string
// CheckBundle defines a check bundle. See https://login.circonus.com/resources/api/calls/check_bundle for more information.
type CheckBundle struct {
Brokers []string `json:"brokers"` // [] len >= 0
Checks []string `json:"_checks,omitempty"` // [] len >= 0
CheckUUIDs []string `json:"_check_uuids,omitempty"` // [] len >= 0
CID string `json:"_cid,omitempty"` // string
Config CheckBundleConfig `json:"config"` // NOTE contents of config are check type specific, map len >= 0
Created uint `json:"_created,omitempty"` // uint
DisplayName string `json:"display_name"` // string
LastModifedBy string `json:"_last_modifed_by,omitempty"` // string
LastModified uint `json:"_last_modified,omitempty"` // uint
MetricLimit int `json:"metric_limit,omitempty"` // int
Metrics []CheckBundleMetric `json:"metrics"` // [] >= 0
Notes *string `json:"notes,omitempty"` // string or null
Period uint `json:"period,omitempty"` // uint
ReverseConnectURLs []string `json:"_reverse_connection_urls,omitempty"` // [] len >= 0
Status string `json:"status,omitempty"` // string
Tags []string `json:"tags,omitempty"` // [] len >= 0
Target string `json:"target"` // string
Timeout float32 `json:"timeout,omitempty"` // float32
Type string `json:"type"` // string
}
// NewCheckBundle returns new CheckBundle (with defaults, if applicable)
func NewCheckBundle() *CheckBundle {
return &CheckBundle{
Config: make(CheckBundleConfig, config.DefaultConfigOptionsSize),
MetricLimit: config.DefaultCheckBundleMetricLimit,
Period: config.DefaultCheckBundlePeriod,
Timeout: config.DefaultCheckBundleTimeout,
Status: config.DefaultCheckBundleStatus,
}
}
// FetchCheckBundle retrieves check bundle with passed cid.
func (a *API) FetchCheckBundle(cid CIDType) (*CheckBundle, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid check bundle CID [none]")
}
bundleCID := string(*cid)
matched, err := regexp.MatchString(config.CheckBundleCIDRegex, bundleCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid check bundle CID [%v]", bundleCID)
}
result, err := a.Get(bundleCID)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch check bundle, received JSON: %s", string(result))
}
checkBundle := &CheckBundle{}
if err := json.Unmarshal(result, checkBundle); err != nil {
return nil, err
}
return checkBundle, nil
}
// FetchCheckBundles retrieves all check bundles available to the API Token.
func (a *API) FetchCheckBundles() (*[]CheckBundle, error) {
result, err := a.Get(config.CheckBundlePrefix)
if err != nil {
return nil, err
}
var checkBundles []CheckBundle
if err := json.Unmarshal(result, &checkBundles); err != nil {
return nil, err
}
return &checkBundles, nil
}
// UpdateCheckBundle updates passed check bundle.
func (a *API) UpdateCheckBundle(cfg *CheckBundle) (*CheckBundle, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid check bundle config [nil]")
}
bundleCID := string(cfg.CID)
matched, err := regexp.MatchString(config.CheckBundleCIDRegex, bundleCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid check bundle CID [%s]", bundleCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] update check bundle, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(bundleCID, jsonCfg)
if err != nil {
return nil, err
}
checkBundle := &CheckBundle{}
if err := json.Unmarshal(result, checkBundle); err != nil {
return nil, err
}
return checkBundle, nil
}
// CreateCheckBundle creates a new check bundle (check).
func (a *API) CreateCheckBundle(cfg *CheckBundle) (*CheckBundle, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid check bundle config [nil]")
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] create check bundle, sending JSON: %s", string(jsonCfg))
}
result, err := a.Post(config.CheckBundlePrefix, jsonCfg)
if err != nil {
return nil, err
}
checkBundle := &CheckBundle{}
if err := json.Unmarshal(result, checkBundle); err != nil {
return nil, err
}
return checkBundle, nil
}
// DeleteCheckBundle deletes passed check bundle.
func (a *API) DeleteCheckBundle(cfg *CheckBundle) (bool, error) {
if cfg == nil {
return false, fmt.Errorf("Invalid check bundle config [nil]")
}
return a.DeleteCheckBundleByCID(CIDType(&cfg.CID))
}
// DeleteCheckBundleByCID deletes check bundle with passed cid.
func (a *API) DeleteCheckBundleByCID(cid CIDType) (bool, error) {
if cid == nil || *cid == "" {
return false, fmt.Errorf("Invalid check bundle CID [none]")
}
bundleCID := string(*cid)
matched, err := regexp.MatchString(config.CheckBundleCIDRegex, bundleCID)
if err != nil {
return false, err
}
if !matched {
return false, fmt.Errorf("Invalid check bundle CID [%v]", bundleCID)
}
_, err = a.Delete(bundleCID)
if err != nil {
return false, err
}
return true, nil
}
// SearchCheckBundles returns check bundles matching the specified
// search query and/or filter. If nil is passed for both parameters
// all check bundles will be returned.
func (a *API) SearchCheckBundles(searchCriteria *SearchQueryType, filterCriteria *map[string][]string) (*[]CheckBundle, error) {
q := url.Values{}
if searchCriteria != nil && *searchCriteria != "" {
q.Set("search", string(*searchCriteria))
}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchCheckBundles()
}
reqURL := url.URL{
Path: config.CheckBundlePrefix,
RawQuery: q.Encode(),
}
resp, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var results []CheckBundle
if err := json.Unmarshal(resp, &results); err != nil {
return nil, err
}
return &results, nil
}

View File

@ -0,0 +1,95 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// CheckBundleMetrics API support - Fetch, Create*, Update, and Delete**
// See: https://login.circonus.com/resources/api/calls/check_bundle_metrics
// * : create metrics by adding to array with a status of 'active'
// ** : delete (distable collection of) metrics by changing status from 'active' to 'available'
package api
import (
"encoding/json"
"fmt"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// CheckBundleMetrics defines metrics for a specific check bundle. See https://login.circonus.com/resources/api/calls/check_bundle_metrics for more information.
type CheckBundleMetrics struct {
CID string `json:"_cid,omitempty"` // string
Metrics []CheckBundleMetric `json:"metrics"` // See check_bundle.go for CheckBundleMetric definition
}
// FetchCheckBundleMetrics retrieves metrics for the check bundle with passed cid.
func (a *API) FetchCheckBundleMetrics(cid CIDType) (*CheckBundleMetrics, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid check bundle metrics CID [none]")
}
metricsCID := string(*cid)
matched, err := regexp.MatchString(config.CheckBundleMetricsCIDRegex, metricsCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid check bundle metrics CID [%s]", metricsCID)
}
result, err := a.Get(metricsCID)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch check bundle metrics, received JSON: %s", string(result))
}
metrics := &CheckBundleMetrics{}
if err := json.Unmarshal(result, metrics); err != nil {
return nil, err
}
return metrics, nil
}
// UpdateCheckBundleMetrics updates passed metrics.
func (a *API) UpdateCheckBundleMetrics(cfg *CheckBundleMetrics) (*CheckBundleMetrics, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid check bundle metrics config [nil]")
}
metricsCID := string(cfg.CID)
matched, err := regexp.MatchString(config.CheckBundleMetricsCIDRegex, metricsCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid check bundle metrics CID [%s]", metricsCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] update check bundle metrics, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(metricsCID, jsonCfg)
if err != nil {
return nil, err
}
metrics := &CheckBundleMetrics{}
if err := json.Unmarshal(result, metrics); err != nil {
return nil, err
}
return metrics, nil
}

View File

@ -1,139 +0,0 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package api
import (
"encoding/json"
"fmt"
)
// CheckBundleConfig configuration specific to check type
type CheckBundleConfig struct {
AsyncMetrics bool `json:"async_metrics"`
Secret string `json:"secret"`
SubmissionURL string `json:"submission_url"`
ReverseSecret string `json:"reverse:secret_key"`
HTTPVersion string `json:"http_version,omitempty"`
Method string `json:"method,omitempty"`
Payload string `json:"payload,omitempty"`
Port string `json:"port,omitempty"`
ReadLimit string `json:"read_limit,omitempty"`
URL string `json:"url,omitempty"`
}
// CheckBundleMetric individual metric configuration
type CheckBundleMetric struct {
Name string `json:"name"`
Type string `json:"type"`
Units string `json:"units"`
Status string `json:"status"`
Tags []string `json:"tags"`
}
// CheckBundle definition
type CheckBundle struct {
CheckUUIDs []string `json:"_check_uuids,omitempty"`
Checks []string `json:"_checks,omitempty"`
Cid string `json:"_cid,omitempty"`
Created int `json:"_created,omitempty"`
LastModified int `json:"_last_modified,omitempty"`
LastModifedBy string `json:"_last_modifed_by,omitempty"`
ReverseConnectURLs []string `json:"_reverse_connection_urls"`
Brokers []string `json:"brokers"`
Config CheckBundleConfig `json:"config"`
DisplayName string `json:"display_name"`
Metrics []CheckBundleMetric `json:"metrics"`
MetricLimit int `json:"metric_limit"`
Notes string `json:"notes"`
Period int `json:"period"`
Status string `json:"status"`
Tags []string `json:"tags"`
Target string `json:"target"`
Timeout int `json:"timeout"`
Type string `json:"type"`
}
// FetchCheckBundleByID fetch a check bundle configuration by id
func (a *API) FetchCheckBundleByID(id IDType) (*CheckBundle, error) {
cid := CIDType(fmt.Sprintf("/check_bundle/%d", id))
return a.FetchCheckBundleByCID(cid)
}
// FetchCheckBundleByCID fetch a check bundle configuration by id
func (a *API) FetchCheckBundleByCID(cid CIDType) (*CheckBundle, error) {
result, err := a.Get(string(cid))
if err != nil {
return nil, err
}
checkBundle := &CheckBundle{}
if err := json.Unmarshal(result, checkBundle); err != nil {
return nil, err
}
return checkBundle, nil
}
// CheckBundleSearch returns list of check bundles matching a search query
// - a search query not a filter (see: https://login.circonus.com/resources/api#searching)
func (a *API) CheckBundleSearch(searchCriteria SearchQueryType) ([]CheckBundle, error) {
apiPath := fmt.Sprintf("/check_bundle?search=%s", searchCriteria)
response, err := a.Get(apiPath)
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var results []CheckBundle
if err := json.Unmarshal(response, &results); err != nil {
return nil, err
}
return results, nil
}
// CreateCheckBundle create a new check bundle (check)
func (a *API) CreateCheckBundle(config CheckBundle) (*CheckBundle, error) {
cfgJSON, err := json.Marshal(config)
if err != nil {
return nil, err
}
response, err := a.Post("/check_bundle", cfgJSON)
if err != nil {
return nil, err
}
checkBundle := &CheckBundle{}
if err := json.Unmarshal(response, checkBundle); err != nil {
return nil, err
}
return checkBundle, nil
}
// UpdateCheckBundle updates a check bundle configuration
func (a *API) UpdateCheckBundle(config *CheckBundle) (*CheckBundle, error) {
if a.Debug {
a.Log.Printf("[DEBUG] Updating check bundle.")
}
cfgJSON, err := json.Marshal(config)
if err != nil {
return nil, err
}
response, err := a.Put(config.Cid, cfgJSON)
if err != nil {
return nil, err
}
checkBundle := &CheckBundle{}
if err := json.Unmarshal(response, checkBundle); err != nil {
return nil, err
}
return checkBundle, nil
}

View File

@ -0,0 +1,538 @@
package config
// Key for CheckBundleConfig options and CheckDetails info
type Key string
// Constants per type as defined in
// https://login.circonus.com/resources/api/calls/check_bundle
const (
//
// default settings for api.NewCheckBundle()
//
DefaultCheckBundleMetricLimit = -1 // unlimited
DefaultCheckBundleStatus = "active"
DefaultCheckBundlePeriod = 60
DefaultCheckBundleTimeout = 10
DefaultConfigOptionsSize = 20
//
// common (apply to more than one check type)
//
AsyncMetrics = Key("asynch_metrics")
AuthMethod = Key("auth_method")
AuthPassword = Key("auth_password")
AuthUser = Key("auth_user")
BaseURL = Key("base_url")
CAChain = Key("ca_chain")
CertFile = Key("certificate_file")
Ciphers = Key("ciphers")
Command = Key("command")
DSN = Key("dsn")
HeaderPrefix = Key("header_")
HTTPVersion = Key("http_version")
KeyFile = Key("key_file")
Method = Key("method")
Password = Key("password")
Payload = Key("payload")
Port = Key("port")
Query = Key("query")
ReadLimit = Key("read_limit")
Secret = Key("secret")
SQL = Key("sql")
URI = Key("uri")
URL = Key("url")
Username = Key("username")
UseSSL = Key("use_ssl")
User = Key("user")
SASLAuthentication = Key("sasl_authentication")
SASLUser = Key("sasl_user")
SecurityLevel = Key("security_level")
Version = Key("version")
AppendColumnName = Key("append_column_name")
Database = Key("database")
JDBCPrefix = Key("jdbc_")
//
// CAQL check
//
// Common items:
// Query
//
// Circonus Windows Agent
//
// Common items:
// AuthPassword
// AuthUser
// Port
// URL
Calculated = Key("calculated")
Category = Key("category")
//
// Cloudwatch
//
// Notes:
// DimPrefix is special because the actual key is dynamic and matches: `dim_(.+)`
// Common items:
// URL
// Version
APIKey = Key("api_key")
APISecret = Key("api_secret")
CloudwatchMetrics = Key("cloudwatch_metrics")
DimPrefix = Key("dim_")
Granularity = Key("granularity")
Namespace = Key("namespace")
Statistics = Key("statistics")
//
// Collectd
//
// Common items:
// AsyncMetrics
// Username
// Secret
// SecurityLevel
//
// Composite
//
CompositeMetricName = Key("composite_metric_name")
Formula = Key("formula")
//
// DHCP
//
HardwareAddress = Key("hardware_addr")
HostIP = Key("host_ip")
RequestType = Key("request_type")
SendPort = Key("send_port")
//
// DNS
//
// Common items:
// Query
CType = Key("ctype")
Nameserver = Key("nameserver")
RType = Key("rtype")
//
// EC Console
//
// Common items:
// Command
// Port
// SASLAuthentication
// SASLUser
Objects = Key("objects")
XPath = Key("xpath")
//
// Elastic Search
//
// Common items:
// Port
// URL
//
// Ganglia
//
// Common items:
// AsyncMetrics
//
// Google Analytics
//
// Common items:
// Password
// Username
OAuthToken = Key("oauth_token")
OAuthTokenSecret = Key("oauth_token_secret")
OAuthVersion = Key("oauth_version")
TableID = Key("table_id")
UseOAuth = Key("use_oauth")
//
// HA Proxy
//
// Common items:
// AuthPassword
// AuthUser
// Port
// UseSSL
Host = Key("host")
Select = Key("select")
//
// HTTP
//
// Notes:
// HeaderPrefix is special because the actual key is dynamic and matches: `header_(\S+)`
// Common items:
// AuthMethod
// AuthPassword
// AuthUser
// CAChain
// CertFile
// Ciphers
// KeyFile
// URL
// HeaderPrefix
// HTTPVersion
// Method
// Payload
// ReadLimit
Body = Key("body")
Code = Key("code")
Extract = Key("extract")
Redirects = Key("redirects")
//
// HTTPTRAP
//
// Common items:
// AsyncMetrics
// Secret
//
// IMAP
//
// Common items:
// AuthPassword
// AuthUser
// CAChain
// CertFile
// Ciphers
// KeyFile
// Port
// UseSSL
Fetch = Key("fetch")
Folder = Key("folder")
HeaderHost = Key("header_Host")
Search = Key("search")
//
// JMX
//
// Common items:
// Password
// Port
// URI
// Username
MbeanDomains = Key("mbean_domains")
//
// JSON
//
// Common items:
// AuthMethod
// AuthPassword
// AuthUser
// CAChain
// CertFile
// Ciphers
// HeaderPrefix
// HTTPVersion
// KeyFile
// Method
// Payload
// Port
// ReadLimit
// URL
//
// Keynote
//
// Notes:
// SlotAliasPrefix is special because the actual key is dynamic and matches: `slot_alias_(\d+)`
// Common items:
// APIKey
// BaseURL
PageComponent = Key("pagecomponent")
SlotAliasPrefix = Key("slot_alias_")
SlotIDList = Key("slot_id_list")
TransPageList = Key("transpagelist")
//
// Keynote Pulse
//
// Common items:
// BaseURL
// Password
// User
AgreementID = Key("agreement_id")
//
// LDAP
//
// Common items:
// Password
// Port
AuthType = Key("authtype")
DN = Key("dn")
SecurityPrincipal = Key("security_principal")
//
// Memcached
//
// Common items:
// Port
//
// MongoDB
//
// Common items:
// Command
// Password
// Port
// Username
DBName = Key("dbname")
//
// Munin
//
// Note: no configuration options
//
// MySQL
//
// Common items:
// DSN
// SQL
//
// Newrelic rpm
//
// Common items:
// APIKey
AccountID = Key("acct_id")
ApplicationID = Key("application_id")
LicenseKey = Key("license_key")
//
// Nginx
//
// Common items:
// CAChain
// CertFile
// Ciphers
// KeyFile
// URL
//
// NRPE
//
// Common items:
// Command
// Port
// UseSSL
AppendUnits = Key("append_uom")
//
// NTP
//
// Common items:
// Port
Control = Key("control")
//
// Oracle
//
// Notes:
// JDBCPrefix is special because the actual key is dynamic and matches: `jdbc_(\S+)`
// Common items:
// AppendColumnName
// Database
// JDBCPrefix
// Password
// Port
// SQL
// User
//
// Ping ICMP
//
AvailNeeded = Key("avail_needed")
Count = Key("count")
Interval = Key("interval")
//
// PostgreSQL
//
// Common items:
// DSN
// SQL
//
// Redis
//
// Common items:
// Command
// Password
// Port
DBIndex = Key("dbindex")
//
// Resmon
//
// Notes:
// HeaderPrefix is special because the actual key is dynamic and matches: `header_(\S+)`
// Common items:
// AuthMethod
// AuthPassword
// AuthUser
// CAChain
// CertFile
// Ciphers
// HeaderPrefix
// HTTPVersion
// KeyFile
// Method
// Payload
// Port
// ReadLimit
// URL
//
// SMTP
//
// Common items:
// Payload
// Port
// SASLAuthentication
// SASLUser
EHLO = Key("ehlo")
From = Key("from")
SASLAuthID = Key("sasl_auth_id")
SASLPassword = Key("sasl_password")
StartTLS = Key("starttls")
To = Key("to")
//
// SNMP
//
// Notes:
// OIDPrefix is special because the actual key is dynamic and matches: `oid_(.+)`
// TypePrefix is special because the actual key is dynamic and matches: `type_(.+)`
// Common items:
// Port
// SecurityLevel
// Version
AuthPassphrase = Key("auth_passphrase")
AuthProtocol = Key("auth_protocol")
Community = Key("community")
ContextEngine = Key("context_engine")
ContextName = Key("context_name")
OIDPrefix = Key("oid_")
PrivacyPassphrase = Key("privacy_passphrase")
PrivacyProtocol = Key("privacy_protocol")
SecurityEngine = Key("security_engine")
SecurityName = Key("security_name")
SeparateQueries = Key("separate_queries")
TypePrefix = Key("type_")
//
// SQLServer
//
// Notes:
// JDBCPrefix is special because the actual key is dynamic and matches: `jdbc_(\S+)`
// Common items:
// AppendColumnName
// Database
// JDBCPrefix
// Password
// Port
// SQL
// User
//
// SSH v2
//
// Common items:
// Port
MethodCompCS = Key("method_comp_cs")
MethodCompSC = Key("method_comp_sc")
MethodCryptCS = Key("method_crypt_cs")
MethodCryptSC = Key("method_crypt_sc")
MethodHostKey = Key("method_hostkey")
MethodKeyExchange = Key("method_kex")
MethodMacCS = Key("method_mac_cs")
MethodMacSC = Key("method_mac_sc")
//
// StatsD
//
// Note: no configuration options
//
// TCP
//
// Common items:
// CAChain
// CertFile
// Ciphers
// KeyFile
// Port
// UseSSL
BannerMatch = Key("banner_match")
//
// Varnish
//
// Note: no configuration options
//
// reserved - config option(s) can't actually be set - here for r/o access
//
ReverseSecretKey = Key("reverse:secret_key")
SubmissionURL = Key("submission_url")
//
// Endpoint prefix & cid regex
//
DefaultCIDRegex = "[0-9]+"
DefaultUUIDRegex = "[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12}"
AccountPrefix = "/account"
AccountCIDRegex = "^(" + AccountPrefix + "/(" + DefaultCIDRegex + "|current))$"
AcknowledgementPrefix = "/acknowledgement"
AcknowledgementCIDRegex = "^(" + AcknowledgementPrefix + "/(" + DefaultCIDRegex + "))$"
AlertPrefix = "/alert"
AlertCIDRegex = "^(" + AlertPrefix + "/(" + DefaultCIDRegex + "))$"
AnnotationPrefix = "/annotation"
AnnotationCIDRegex = "^(" + AnnotationPrefix + "/(" + DefaultCIDRegex + "))$"
BrokerPrefix = "/broker"
BrokerCIDRegex = "^(" + BrokerPrefix + "/(" + DefaultCIDRegex + "))$"
CheckBundleMetricsPrefix = "/check_bundle_metrics"
CheckBundleMetricsCIDRegex = "^(" + CheckBundleMetricsPrefix + "/(" + DefaultCIDRegex + "))$"
CheckBundlePrefix = "/check_bundle"
CheckBundleCIDRegex = "^(" + CheckBundlePrefix + "/(" + DefaultCIDRegex + "))$"
CheckPrefix = "/check"
CheckCIDRegex = "^(" + CheckPrefix + "/(" + DefaultCIDRegex + "))$"
ContactGroupPrefix = "/contact_group"
ContactGroupCIDRegex = "^(" + ContactGroupPrefix + "/(" + DefaultCIDRegex + "))$"
DashboardPrefix = "/dashboard"
DashboardCIDRegex = "^(" + DashboardPrefix + "/(" + DefaultCIDRegex + "))$"
GraphPrefix = "/graph"
GraphCIDRegex = "^(" + GraphPrefix + "/(" + DefaultUUIDRegex + "))$"
MaintenancePrefix = "/maintenance"
MaintenanceCIDRegex = "^(" + MaintenancePrefix + "/(" + DefaultCIDRegex + "))$"
MetricClusterPrefix = "/metric_cluster"
MetricClusterCIDRegex = "^(" + MetricClusterPrefix + "/(" + DefaultCIDRegex + "))$"
MetricPrefix = "/metric"
MetricCIDRegex = "^(" + MetricPrefix + "/((" + DefaultCIDRegex + ")_([^[:space:]]+)))$"
OutlierReportPrefix = "/outlier_report"
OutlierReportCIDRegex = "^(" + OutlierReportPrefix + "/(" + DefaultCIDRegex + "))$"
ProvisionBrokerPrefix = "/provision_broker"
ProvisionBrokerCIDRegex = "^(" + ProvisionBrokerPrefix + "/([a-z0-9]+-[a-z0-9]+))$"
RuleSetGroupPrefix = "/rule_set_group"
RuleSetGroupCIDRegex = "^(" + RuleSetGroupPrefix + "/(" + DefaultCIDRegex + "))$"
RuleSetPrefix = "/rule_set"
RuleSetCIDRegex = "^(" + RuleSetPrefix + "/((" + DefaultCIDRegex + ")_([^[:space:]]+)))$"
UserPrefix = "/user"
UserCIDRegex = "^(" + UserPrefix + "/(" + DefaultCIDRegex + "|current))$"
WorksheetPrefix = "/worksheet"
WorksheetCIDRegex = "^(" + WorksheetPrefix + "/(" + DefaultUUIDRegex + "))$"
// contact group serverity levels
NumSeverityLevels = 5
)

View File

@ -0,0 +1,263 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Contact Group API support - Fetch, Create, Update, Delete, and Search
// See: https://login.circonus.com/resources/api/calls/contact_group
package api
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// ContactGroupAlertFormats define alert formats
type ContactGroupAlertFormats struct {
LongMessage *string `json:"long_message"` // string or null
LongSubject *string `json:"long_subject"` // string or null
LongSummary *string `json:"long_summary"` // string or null
ShortMessage *string `json:"short_message"` // string or null
ShortSummary *string `json:"short_summary"` // string or null
}
// ContactGroupContactsExternal external contacts
type ContactGroupContactsExternal struct {
Info string `json:"contact_info"` // string
Method string `json:"method"` // string
}
// ContactGroupContactsUser user contacts
type ContactGroupContactsUser struct {
Info string `json:"_contact_info,omitempty"` // string
Method string `json:"method"` // string
UserCID string `json:"user"` // string
}
// ContactGroupContacts list of contacts
type ContactGroupContacts struct {
External []ContactGroupContactsExternal `json:"external"` // [] len >= 0
Users []ContactGroupContactsUser `json:"users"` // [] len >= 0
}
// ContactGroupEscalation defines escalations for severity levels
type ContactGroupEscalation struct {
After uint `json:"after"` // uint
ContactGroupCID string `json:"contact_group"` // string
}
// ContactGroup defines a contact group. See https://login.circonus.com/resources/api/calls/contact_group for more information.
type ContactGroup struct {
AggregationWindow uint `json:"aggregation_window,omitempty"` // uint
AlertFormats ContactGroupAlertFormats `json:"alert_formats,omitempty"` // ContactGroupAlertFormats
CID string `json:"_cid,omitempty"` // string
Contacts ContactGroupContacts `json:"contacts,omitempty"` // ContactGroupContacts
Escalations []*ContactGroupEscalation `json:"escalations,omitempty"` // [] len == 5, elements: ContactGroupEscalation or null
LastModified uint `json:"_last_modified,omitempty"` // uint
LastModifiedBy string `json:"_last_modified_by,omitempty"` // string
Name string `json:"name,omitempty"` // string
Reminders []uint `json:"reminders,omitempty"` // [] len == 5
Tags []string `json:"tags,omitempty"` // [] len >= 0
}
// NewContactGroup returns a ContactGroup (with defaults, if applicable)
func NewContactGroup() *ContactGroup {
return &ContactGroup{
Escalations: make([]*ContactGroupEscalation, config.NumSeverityLevels),
Reminders: make([]uint, config.NumSeverityLevels),
Contacts: ContactGroupContacts{
External: []ContactGroupContactsExternal{},
Users: []ContactGroupContactsUser{},
},
}
}
// FetchContactGroup retrieves contact group with passed cid.
func (a *API) FetchContactGroup(cid CIDType) (*ContactGroup, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid contact group CID [none]")
}
groupCID := string(*cid)
matched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid contact group CID [%s]", groupCID)
}
result, err := a.Get(groupCID)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch contact group, received JSON: %s", string(result))
}
group := new(ContactGroup)
if err := json.Unmarshal(result, group); err != nil {
return nil, err
}
return group, nil
}
// FetchContactGroups retrieves all contact groups available to the API Token.
func (a *API) FetchContactGroups() (*[]ContactGroup, error) {
result, err := a.Get(config.ContactGroupPrefix)
if err != nil {
return nil, err
}
var groups []ContactGroup
if err := json.Unmarshal(result, &groups); err != nil {
return nil, err
}
return &groups, nil
}
// UpdateContactGroup updates passed contact group.
func (a *API) UpdateContactGroup(cfg *ContactGroup) (*ContactGroup, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid contact group config [nil]")
}
groupCID := string(cfg.CID)
matched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid contact group CID [%s]", groupCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] update contact group, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(groupCID, jsonCfg)
if err != nil {
return nil, err
}
group := &ContactGroup{}
if err := json.Unmarshal(result, group); err != nil {
return nil, err
}
return group, nil
}
// CreateContactGroup creates a new contact group.
func (a *API) CreateContactGroup(cfg *ContactGroup) (*ContactGroup, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid contact group config [nil]")
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] create contact group, sending JSON: %s", string(jsonCfg))
}
result, err := a.Post(config.ContactGroupPrefix, jsonCfg)
if err != nil {
return nil, err
}
group := &ContactGroup{}
if err := json.Unmarshal(result, group); err != nil {
return nil, err
}
return group, nil
}
// DeleteContactGroup deletes passed contact group.
func (a *API) DeleteContactGroup(cfg *ContactGroup) (bool, error) {
if cfg == nil {
return false, fmt.Errorf("Invalid contact group config [nil]")
}
return a.DeleteContactGroupByCID(CIDType(&cfg.CID))
}
// DeleteContactGroupByCID deletes contact group with passed cid.
func (a *API) DeleteContactGroupByCID(cid CIDType) (bool, error) {
if cid == nil || *cid == "" {
return false, fmt.Errorf("Invalid contact group CID [none]")
}
groupCID := string(*cid)
matched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID)
if err != nil {
return false, err
}
if !matched {
return false, fmt.Errorf("Invalid contact group CID [%s]", groupCID)
}
_, err = a.Delete(groupCID)
if err != nil {
return false, err
}
return true, nil
}
// SearchContactGroups returns contact groups matching the specified
// search query and/or filter. If nil is passed for both parameters
// all contact groups will be returned.
func (a *API) SearchContactGroups(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]ContactGroup, error) {
q := url.Values{}
if searchCriteria != nil && *searchCriteria != "" {
q.Set("search", string(*searchCriteria))
}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchContactGroups()
}
reqURL := url.URL{
Path: config.ContactGroupPrefix,
RawQuery: q.Encode(),
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var groups []ContactGroup
if err := json.Unmarshal(result, &groups); err != nil {
return nil, err
}
return &groups, nil
}

View File

@ -0,0 +1,400 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Dashboard API support - Fetch, Create, Update, Delete, and Search
// See: https://login.circonus.com/resources/api/calls/dashboard
package api
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// DashboardGridLayout defines layout
type DashboardGridLayout struct {
Height uint `json:"height"`
Width uint `json:"width"`
}
// DashboardAccessConfig defines access config
type DashboardAccessConfig struct {
BlackDash bool `json:"black_dash"`
Enabled bool `json:"enabled"`
Fullscreen bool `json:"fullscreen"`
FullscreenHideTitle bool `json:"fullscreen_hide_title"`
Nickname string `json:"nickname"`
ScaleText bool `json:"scale_text"`
SharedID string `json:"shared_id"`
TextSize uint `json:"text_size"`
}
// DashboardOptions defines options
type DashboardOptions struct {
AccessConfigs []DashboardAccessConfig `json:"access_configs"`
FullscreenHideTitle bool `json:"fullscreen_hide_title"`
HideGrid bool `json:"hide_grid"`
Linkages [][]string `json:"linkages"`
ScaleText bool `json:"scale_text"`
TextSize uint `json:"text_size"`
}
// ChartTextWidgetDatapoint defines datapoints for charts
type ChartTextWidgetDatapoint struct {
AccountID string `json:"account_id,omitempty"` // metric cluster, metric
CheckID uint `json:"_check_id,omitempty"` // metric
ClusterID uint `json:"cluster_id,omitempty"` // metric cluster
ClusterTitle string `json:"_cluster_title,omitempty"` // metric cluster
Label string `json:"label,omitempty"` // metric
Label2 string `json:"_label,omitempty"` // metric cluster
Metric string `json:"metric,omitempty"` // metric
MetricType string `json:"_metric_type,omitempty"` // metric
NumericOnly bool `json:"numeric_only,omitempty"` // metric cluster
}
// ChartWidgetDefinitionLegend defines chart widget definition legend
type ChartWidgetDefinitionLegend struct {
Show bool `json:"show,omitempty"`
Type string `json:"type,omitempty"`
}
// ChartWidgetWedgeLabels defines chart widget wedge labels
type ChartWidgetWedgeLabels struct {
OnChart bool `json:"on_chart,omitempty"`
ToolTips bool `json:"tooltips,omitempty"`
}
// ChartWidgetWedgeValues defines chart widget wedge values
type ChartWidgetWedgeValues struct {
Angle string `json:"angle,omitempty"`
Color string `json:"color,omitempty"`
Show bool `json:"show,omitempty"`
}
// ChartWidgtDefinition defines chart widget definition
type ChartWidgtDefinition struct {
Datasource string `json:"datasource,omitempty"`
Derive string `json:"derive,omitempty"`
DisableAutoformat bool `json:"disable_autoformat,omitempty"`
Formula string `json:"formula,omitempty"`
Legend ChartWidgetDefinitionLegend `json:"legend,omitempty"`
Period uint `json:"period,omitempty"`
PopOnHover bool `json:"pop_onhover,omitempty"`
WedgeLabels ChartWidgetWedgeLabels `json:"wedge_labels,omitempty"`
WedgeValues ChartWidgetWedgeValues `json:"wedge_values,omitempty"`
}
// ForecastGaugeWidgetThresholds defines forecast widget thresholds
type ForecastGaugeWidgetThresholds struct {
Colors []string `json:"colors,omitempty"` // forecasts, gauges
Flip bool `json:"flip,omitempty"` // gauges
Values []string `json:"values,omitempty"` // forecasts, gauges
}
// StatusWidgetAgentStatusSettings defines agent status settings
type StatusWidgetAgentStatusSettings struct {
Search string `json:"search,omitempty"`
ShowAgentTypes string `json:"show_agent_types,omitempty"`
ShowContact bool `json:"show_contact,omitempty"`
ShowFeeds bool `json:"show_feeds,omitempty"`
ShowSetup bool `json:"show_setup,omitempty"`
ShowSkew bool `json:"show_skew,omitempty"`
ShowUpdates bool `json:"show_updates,omitempty"`
}
// StatusWidgetHostStatusSettings defines host status settings
type StatusWidgetHostStatusSettings struct {
LayoutStyle string `json:"layout_style,omitempty"`
Search string `json:"search,omitempty"`
SortBy string `json:"sort_by,omitempty"`
TagFilterSet []string `json:"tag_filter_set,omitempty"`
}
// DashboardWidgetSettings defines settings specific to widget
// Note: optional attributes which are structs need to be pointers so they will be omitted
type DashboardWidgetSettings struct {
AccountID string `json:"account_id,omitempty"` // alerts, clusters, gauges, graphs, lists, status
Acknowledged string `json:"acknowledged,omitempty"` // alerts
AgentStatusSettings *StatusWidgetAgentStatusSettings `json:"agent_status_settings,omitempty"` // status
Algorithm string `json:"algorithm,omitempty"` // clusters
Autoformat bool `json:"autoformat,omitempty"` // text
BodyFormat string `json:"body_format,omitempty"` // text
ChartType string `json:"chart_type,omitempty"` // charts
CheckUUID string `json:"check_uuid,omitempty"` // gauges
Cleared string `json:"cleared,omitempty"` // alerts
ClusterID uint `json:"cluster_id,omitempty"` // clusters
ClusterName string `json:"cluster_name,omitempty"` // clusters
ContactGroups []uint `json:"contact_groups,omitempty"` // alerts
ContentType string `json:"content_type,omitempty"` // status
Datapoints []ChartTextWidgetDatapoint `json:"datapoints,omitempty"` // charts, text
DateWindow string `json:"date_window,omitempty"` // graphs
Definition *ChartWidgtDefinition `json:"definition,omitempty"` // charts
Dependents string `json:"dependents,omitempty"` // alerts
DisableAutoformat bool `json:"disable_autoformat,omitempty"` // gauges
Display string `json:"display,omitempty"` // alerts
Format string `json:"format,omitempty"` // forecasts
Formula string `json:"formula,omitempty"` // gauges
GraphUUID string `json:"graph_id,omitempty"` // graphs
HideXAxis bool `json:"hide_xaxis,omitempty"` // graphs
HideYAxis bool `json:"hide_yaxis,omitempty"` // graphs
HostStatusSettings *StatusWidgetHostStatusSettings `json:"host_status_settings,omitempty"` // status
KeyInline bool `json:"key_inline,omitempty"` // graphs
KeyLoc string `json:"key_loc,omitempty"` // graphs
KeySize uint `json:"key_size,omitempty"` // graphs
KeyWrap bool `json:"key_wrap,omitempty"` // graphs
Label string `json:"label,omitempty"` // graphs
Layout string `json:"layout,omitempty"` // clusters
Limit uint `json:"limit,omitempty"` // lists
Maintenance string `json:"maintenance,omitempty"` // alerts
Markup string `json:"markup,omitempty"` // html
MetricDisplayName string `json:"metric_display_name,omitempty"` // gauges
MetricName string `json:"metric_name,omitempty"` // gauges
MinAge string `json:"min_age,omitempty"` // alerts
OffHours []uint `json:"off_hours,omitempty"` // alerts
OverlaySetID string `json:"overlay_set_id,omitempty"` // graphs
Period uint `json:"period,omitempty"` // gauges, text, graphs
RangeHigh int `json:"range_high,omitempty"` // gauges
RangeLow int `json:"range_low,omitempty"` // gauges
Realtime bool `json:"realtime,omitempty"` // graphs
ResourceLimit string `json:"resource_limit,omitempty"` // forecasts
ResourceUsage string `json:"resource_usage,omitempty"` // forecasts
Search string `json:"search,omitempty"` // alerts, lists
Severity string `json:"severity,omitempty"` // alerts
ShowFlags bool `json:"show_flags,omitempty"` // graphs
Size string `json:"size,omitempty"` // clusters
TagFilterSet []string `json:"tag_filter_set,omitempty"` // alerts
Threshold float32 `json:"threshold,omitempty"` // clusters
Thresholds *ForecastGaugeWidgetThresholds `json:"thresholds,omitempty"` // forecasts, gauges
TimeWindow string `json:"time_window,omitempty"` // alerts
Title string `json:"title,omitempty"` // alerts, charts, forecasts, gauges, html
TitleFormat string `json:"title_format,omitempty"` // text
Trend string `json:"trend,omitempty"` // forecasts
Type string `json:"type,omitempty"` // gauges, lists
UseDefault bool `json:"use_default,omitempty"` // text
ValueType string `json:"value_type,omitempty"` // gauges, text
WeekDays []string `json:"weekdays,omitempty"` // alerts
}
// DashboardWidget defines widget
type DashboardWidget struct {
Active bool `json:"active"`
Height uint `json:"height"`
Name string `json:"name"`
Origin string `json:"origin"`
Settings DashboardWidgetSettings `json:"settings"`
Type string `json:"type"`
WidgetID string `json:"widget_id"`
Width uint `json:"width"`
}
// Dashboard defines a dashboard. See https://login.circonus.com/resources/api/calls/dashboard for more information.
type Dashboard struct {
AccountDefault bool `json:"account_default"`
Active bool `json:"_active,omitempty"`
CID string `json:"_cid,omitempty"`
Created uint `json:"_created,omitempty"`
CreatedBy string `json:"_created_by,omitempty"`
GridLayout DashboardGridLayout `json:"grid_layout"`
LastModified uint `json:"_last_modified,omitempty"`
Options DashboardOptions `json:"options"`
Shared bool `json:"shared"`
Title string `json:"title"`
UUID string `json:"_dashboard_uuid,omitempty"`
Widgets []DashboardWidget `json:"widgets"`
}
// NewDashboard returns a new Dashboard (with defaults, if applicable)
func NewDashboard() *Dashboard {
return &Dashboard{}
}
// FetchDashboard retrieves dashboard with passed cid.
func (a *API) FetchDashboard(cid CIDType) (*Dashboard, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid dashboard CID [none]")
}
dashboardCID := string(*cid)
matched, err := regexp.MatchString(config.DashboardCIDRegex, dashboardCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid dashboard CID [%s]", dashboardCID)
}
result, err := a.Get(string(*cid))
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch dashboard, received JSON: %s", string(result))
}
dashboard := new(Dashboard)
if err := json.Unmarshal(result, dashboard); err != nil {
return nil, err
}
return dashboard, nil
}
// FetchDashboards retrieves all dashboards available to the API Token.
func (a *API) FetchDashboards() (*[]Dashboard, error) {
result, err := a.Get(config.DashboardPrefix)
if err != nil {
return nil, err
}
var dashboards []Dashboard
if err := json.Unmarshal(result, &dashboards); err != nil {
return nil, err
}
return &dashboards, nil
}
// UpdateDashboard updates passed dashboard.
func (a *API) UpdateDashboard(cfg *Dashboard) (*Dashboard, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid dashboard config [nil]")
}
dashboardCID := string(cfg.CID)
matched, err := regexp.MatchString(config.DashboardCIDRegex, dashboardCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid dashboard CID [%s]", dashboardCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] update dashboard, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(dashboardCID, jsonCfg)
if err != nil {
return nil, err
}
dashboard := &Dashboard{}
if err := json.Unmarshal(result, dashboard); err != nil {
return nil, err
}
return dashboard, nil
}
// CreateDashboard creates a new dashboard.
func (a *API) CreateDashboard(cfg *Dashboard) (*Dashboard, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid dashboard config [nil]")
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] create dashboard, sending JSON: %s", string(jsonCfg))
}
result, err := a.Post(config.DashboardPrefix, jsonCfg)
if err != nil {
return nil, err
}
dashboard := &Dashboard{}
if err := json.Unmarshal(result, dashboard); err != nil {
return nil, err
}
return dashboard, nil
}
// DeleteDashboard deletes passed dashboard.
func (a *API) DeleteDashboard(cfg *Dashboard) (bool, error) {
if cfg == nil {
return false, fmt.Errorf("Invalid dashboard config [nil]")
}
return a.DeleteDashboardByCID(CIDType(&cfg.CID))
}
// DeleteDashboardByCID deletes dashboard with passed cid.
func (a *API) DeleteDashboardByCID(cid CIDType) (bool, error) {
if cid == nil || *cid == "" {
return false, fmt.Errorf("Invalid dashboard CID [none]")
}
dashboardCID := string(*cid)
matched, err := regexp.MatchString(config.DashboardCIDRegex, dashboardCID)
if err != nil {
return false, err
}
if !matched {
return false, fmt.Errorf("Invalid dashboard CID [%s]", dashboardCID)
}
_, err = a.Delete(dashboardCID)
if err != nil {
return false, err
}
return true, nil
}
// SearchDashboards returns dashboards matching the specified
// search query and/or filter. If nil is passed for both parameters
// all dashboards will be returned.
func (a *API) SearchDashboards(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Dashboard, error) {
q := url.Values{}
if searchCriteria != nil && *searchCriteria != "" {
q.Set("search", string(*searchCriteria))
}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchDashboards()
}
reqURL := url.URL{
Path: config.DashboardPrefix,
RawQuery: q.Encode(),
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var dashboards []Dashboard
if err := json.Unmarshal(result, &dashboards); err != nil {
return nil, err
}
return &dashboards, nil
}

View File

@ -0,0 +1,63 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package api provides methods for interacting with the Circonus API. See the full Circonus API
Documentation at https://login.circonus.com/resources/api for more information.
Raw REST methods
Get - retrieve existing item(s)
Put - update an existing item
Post - create a new item
Delete - remove an existing item
Endpoints (supported)
Account https://login.circonus.com/resources/api/calls/account
Acknowledgement https://login.circonus.com/resources/api/calls/acknowledgement
Alert https://login.circonus.com/resources/api/calls/alert
Annotation https://login.circonus.com/resources/api/calls/annotation
Broker https://login.circonus.com/resources/api/calls/broker
Check https://login.circonus.com/resources/api/calls/check
Check Bundle https://login.circonus.com/resources/api/calls/check_bundle
Check Bundle Metrics https://login.circonus.com/resources/api/calls/check_bundle_metrics
Contact Group https://login.circonus.com/resources/api/calls/contact_group
Dashboard https://login.circonus.com/resources/api/calls/dashboard
Graph https://login.circonus.com/resources/api/calls/graph
Maintenance [window] https://login.circonus.com/resources/api/calls/maintenance
Metric https://login.circonus.com/resources/api/calls/metric
Metric Cluster https://login.circonus.com/resources/api/calls/metric_cluster
Outlier Report https://login.circonus.com/resources/api/calls/outlier_report
Provision Broker https://login.circonus.com/resources/api/calls/provision_broker
Rule Set https://login.circonus.com/resources/api/calls/rule_set
Rule Set Group https://login.circonus.com/resources/api/calls/rule_set_group
User https://login.circonus.com/resources/api/calls/user
Worksheet https://login.circonus.com/resources/api/calls/worksheet
Endpoints (not supported)
Support may be added for these endpoints in the future. These endpoints may currently be used
directly with the Raw REST methods above.
CAQL https://login.circonus.com/resources/api/calls/caql
Check Move https://login.circonus.com/resources/api/calls/check_move
Data https://login.circonus.com/resources/api/calls/data
Snapshot https://login.circonus.com/resources/api/calls/snapshot
Tag https://login.circonus.com/resources/api/calls/tag
Template https://login.circonus.com/resources/api/calls/template
Verbs
Fetch singular/plural item(s) - e.g. FetchAnnotation, FetchAnnotations
Create create new item - e.g. CreateAnnotation
Update update an item - e.g. UpdateAnnotation
Delete remove an item - e.g. DeleteAnnotation, DeleteAnnotationByCID
Search search for item(s) - e.g. SearchAnnotations
New new item config - e.g. NewAnnotation (returns an empty item,
any applicable defaults defined)
Not all endpoints support all verbs.
*/
package api

View File

@ -0,0 +1,356 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Graph API support - Fetch, Create, Update, Delete, and Search
// See: https://login.circonus.com/resources/api/calls/graph
package api
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// GraphAccessKey defines an access key for a graph
type GraphAccessKey struct {
Active bool `json:"active,omitempty"` // boolean
Height uint `json:"height,omitempty"` // uint
Key string `json:"key,omitempty"` // string
Legend bool `json:"legend,omitempty"` // boolean
LockDate bool `json:"lock_date,omitempty"` // boolean
LockMode string `json:"lock_mode,omitempty"` // string
LockRangeEnd uint `json:"lock_range_end,omitempty"` // uint
LockRangeStart uint `json:"lock_range_start,omitempty"` // uint
LockShowTimes bool `json:"lock_show_times,omitempty"` // boolean
LockZoom string `json:"lock_zoom,omitempty"` // string
Nickname string `json:"nickname,omitempty"` // string
Title bool `json:"title,omitempty"` // boolean
Width uint `json:"width,omitempty"` // uint
XLabels bool `json:"x_labels,omitempty"` // boolean
YLabels bool `json:"y_labels,omitempty"` // boolean
}
// GraphComposite defines a composite
type GraphComposite struct {
Axis string `json:"axis"` // string
Color string `json:"color"` // string
DataFormula *string `json:"data_formula"` // string or null
Hidden bool `json:"hidden"` // boolean
LegendFormula *string `json:"legend_formula"` // string or null
Name string `json:"name"` // string
Stack *uint `json:"stack"` // uint or null
}
// GraphDatapoint defines a datapoint
type GraphDatapoint struct {
Alpha *float64 `json:"alpha,string,omitempty"` // float64
Axis string `json:"axis,omitempty"` // string
CAQL *string `json:"caql,omitempty"` // string or null
CheckID uint `json:"check_id,omitempty"` // uint
Color *string `json:"color,omitempty"` // string
DataFormula *string `json:"data_formula"` // string or null
Derive interface{} `json:"derive,omitempty"` // BUG doc: string, api: string or boolean(for caql statements)
Hidden bool `json:"hidden"` // boolean
LegendFormula *string `json:"legend_formula"` // string or null
MetricName string `json:"metric_name,omitempty"` // string
MetricType string `json:"metric_type,omitempty"` // string
Name string `json:"name"` // string
Search *string `json:"search"` // string or null
Stack *uint `json:"stack"` // uint or null
}
// GraphGuide defines a guide
type GraphGuide struct {
Color string `json:"color"` // string
DataFormula *string `json:"data_formula"` // string or null
Hidden bool `json:"hidden"` // boolean
LegendFormula *string `json:"legend_formula"` // string or null
Name string `json:"name"` // string
}
// GraphMetricCluster defines a metric cluster
type GraphMetricCluster struct {
AggregateFunc string `json:"aggregate_function,omitempty"` // string
Axis string `json:"axis,omitempty"` // string
Color *string `json:"color,omitempty"` // string
DataFormula *string `json:"data_formula"` // string or null
Hidden bool `json:"hidden"` // boolean
LegendFormula *string `json:"legend_formula"` // string or null
MetricCluster string `json:"metric_cluster,omitempty"` // string
Name string `json:"name,omitempty"` // string
Stack *uint `json:"stack"` // uint or null
}
// GraphOverlaySet defines an overlay set for a graph
type GraphOverlaySet struct {
Overlays map[string]GraphOverlay `json:"overlays"`
Title string `json:"title"`
}
// GraphOverlay defines a single overlay in an overlay set
type GraphOverlay struct {
DataOpts OverlayDataOptions `json:"data_opts,omitempty"` // OverlayDataOptions
ID string `json:"id,omitempty"` // string
Title string `json:"title,omitempty"` // string
UISpecs OverlayUISpecs `json:"ui_specs,omitempty"` // OverlayUISpecs
}
// OverlayUISpecs defines UI specs for overlay
type OverlayUISpecs struct {
Decouple bool `json:"decouple,omitempty"` // boolean
ID string `json:"id,omitempty"` // string
Label string `json:"label,omitempty"` // string
Type string `json:"type,omitempty"` // string
Z string `json:"z,omitempty"` // int encoded as string BUG doc: numeric, api: string
}
// OverlayDataOptions defines overlay options for data. Note, each overlay type requires
// a _subset_ of the options. See Graph API documentation (URL above) for details.
type OverlayDataOptions struct {
Alerts string `json:"alerts,omitempty"` // int encoded as string BUG doc: numeric, api: string
ArrayOutput string `json:"array_output,omitempty"` // int encoded as string BUG doc: numeric, api: string
BasePeriod string `json:"base_period,omitempty"` // int encoded as string BUG doc: numeric, api: string
Delay string `json:"delay,omitempty"` // int encoded as string BUG doc: numeric, api: string
Extension string `json:"extension,omitempty"` // string
GraphTitle string `json:"graph_title,omitempty"` // string
GraphUUID string `json:"graph_id,omitempty"` // string
InPercent string `json:"in_percent,omitempty"` // boolean encoded as string BUG doc: boolean, api: string
Inverse string `json:"inverse,omitempty"` // int encoded as string BUG doc: numeric, api: string
Method string `json:"method,omitempty"` // string
Model string `json:"model,omitempty"` // string
ModelEnd string `json:"model_end,omitempty"` // string
ModelPeriod string `json:"model_period,omitempty"` // string
ModelRelative string `json:"model_relative,omitempty"` // int encoded as string BUG doc: numeric, api: string
Out string `json:"out,omitempty"` // string
Prequel string `json:"prequel,omitempty"` // int
Presets string `json:"presets,omitempty"` // string
Quantiles string `json:"quantiles,omitempty"` // string
SeasonLength string `json:"season_length,omitempty"` // int encoded as string BUG doc: numeric, api: string
Sensitivity string `json:"sensitivity,omitempty"` // int encoded as string BUG doc: numeric, api: string
SingleValue string `json:"single_value,omitempty"` // int encoded as string BUG doc: numeric, api: string
TargetPeriod string `json:"target_period,omitempty"` // string
TimeOffset string `json:"time_offset,omitempty"` // string
TimeShift string `json:"time_shift,omitempty"` // int encoded as string BUG doc: numeric, api: string
Transform string `json:"transform,omitempty"` // string
Version string `json:"version,omitempty"` // int encoded as string BUG doc: numeric, api: string
Window string `json:"window,omitempty"` // int encoded as string BUG doc: numeric, api: string
XShift string `json:"x_shift,omitempty"` // string
}
// Graph defines a graph. See https://login.circonus.com/resources/api/calls/graph for more information.
type Graph struct {
AccessKeys []GraphAccessKey `json:"access_keys,omitempty"` // [] len >= 0
CID string `json:"_cid,omitempty"` // string
Composites []GraphComposite `json:"composites,omitempty"` // [] len >= 0
Datapoints []GraphDatapoint `json:"datapoints,omitempt"` // [] len >= 0
Description string `json:"description,omitempty"` // string
Guides []GraphGuide `json:"guides,omitempty"` // [] len >= 0
LineStyle *string `json:"line_style"` // string or null
LogLeftY *int `json:"logarithmic_left_y,string,omitempty"` // int encoded as string or null BUG doc: number (not string)
LogRightY *int `json:"logarithmic_right_y,string,omitempty"` // int encoded as string or null BUG doc: number (not string)
MaxLeftY *float64 `json:"max_left_y,string,omitempty"` // float64 encoded as string or null BUG doc: number (not string)
MaxRightY *float64 `json:"max_right_y,string,omitempty"` // float64 encoded as string or null BUG doc: number (not string)
MetricClusters []GraphMetricCluster `json:"metric_clusters,omitempty"` // [] len >= 0
MinLeftY *float64 `json:"min_left_y,string,omitempty"` // float64 encoded as string or null BUG doc: number (not string)
MinRightY *float64 `json:"min_right_y,string,omitempty"` // float64 encoded as string or null BUG doc: number (not string)
Notes *string `json:"notes,omitempty"` // string or null
OverlaySets *map[string]GraphOverlaySet `json:"overlay_sets,omitempty"` // GroupOverLaySets or null
Style *string `json:"style"` // string or null
Tags []string `json:"tags,omitempty"` // [] len >= 0
Title string `json:"title,omitempty"` // string
}
// NewGraph returns a Graph (with defaults, if applicable)
func NewGraph() *Graph {
return &Graph{}
}
// FetchGraph retrieves graph with passed cid.
func (a *API) FetchGraph(cid CIDType) (*Graph, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid graph CID [none]")
}
graphCID := string(*cid)
matched, err := regexp.MatchString(config.GraphCIDRegex, graphCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid graph CID [%s]", graphCID)
}
result, err := a.Get(graphCID)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch graph, received JSON: %s", string(result))
}
graph := new(Graph)
if err := json.Unmarshal(result, graph); err != nil {
return nil, err
}
return graph, nil
}
// FetchGraphs retrieves all graphs available to the API Token.
func (a *API) FetchGraphs() (*[]Graph, error) {
result, err := a.Get(config.GraphPrefix)
if err != nil {
return nil, err
}
var graphs []Graph
if err := json.Unmarshal(result, &graphs); err != nil {
return nil, err
}
return &graphs, nil
}
// UpdateGraph updates passed graph.
func (a *API) UpdateGraph(cfg *Graph) (*Graph, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid graph config [nil]")
}
graphCID := string(cfg.CID)
matched, err := regexp.MatchString(config.GraphCIDRegex, graphCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid graph CID [%s]", graphCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] update graph, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(graphCID, jsonCfg)
if err != nil {
return nil, err
}
graph := &Graph{}
if err := json.Unmarshal(result, graph); err != nil {
return nil, err
}
return graph, nil
}
// CreateGraph creates a new graph.
func (a *API) CreateGraph(cfg *Graph) (*Graph, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid graph config [nil]")
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] update graph, sending JSON: %s", string(jsonCfg))
}
result, err := a.Post(config.GraphPrefix, jsonCfg)
if err != nil {
return nil, err
}
graph := &Graph{}
if err := json.Unmarshal(result, graph); err != nil {
return nil, err
}
return graph, nil
}
// DeleteGraph deletes passed graph.
func (a *API) DeleteGraph(cfg *Graph) (bool, error) {
if cfg == nil {
return false, fmt.Errorf("Invalid graph config [nil]")
}
return a.DeleteGraphByCID(CIDType(&cfg.CID))
}
// DeleteGraphByCID deletes graph with passed cid.
func (a *API) DeleteGraphByCID(cid CIDType) (bool, error) {
if cid == nil || *cid == "" {
return false, fmt.Errorf("Invalid graph CID [none]")
}
graphCID := string(*cid)
matched, err := regexp.MatchString(config.GraphCIDRegex, graphCID)
if err != nil {
return false, err
}
if !matched {
return false, fmt.Errorf("Invalid graph CID [%s]", graphCID)
}
_, err = a.Delete(graphCID)
if err != nil {
return false, err
}
return true, nil
}
// SearchGraphs returns graphs matching the specified search query
// and/or filter. If nil is passed for both parameters all graphs
// will be returned.
func (a *API) SearchGraphs(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Graph, error) {
q := url.Values{}
if searchCriteria != nil && *searchCriteria != "" {
q.Set("search", string(*searchCriteria))
}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchGraphs()
}
reqURL := url.URL{
Path: config.GraphPrefix,
RawQuery: q.Encode(),
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var graphs []Graph
if err := json.Unmarshal(result, &graphs); err != nil {
return nil, err
}
return &graphs, nil
}

View File

@ -0,0 +1,220 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Maintenance window API support - Fetch, Create, Update, Delete, and Search
// See: https://login.circonus.com/resources/api/calls/maintenance
package api
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// Maintenance defines a maintenance window. See https://login.circonus.com/resources/api/calls/maintenance for more information.
type Maintenance struct {
CID string `json:"_cid,omitempty"` // string
Item string `json:"item,omitempty"` // string
Notes string `json:"notes,omitempty"` // string
Severities interface{} `json:"severities,omitempty"` // []string NOTE can be set with CSV string or []string
Start uint `json:"start,omitempty"` // uint
Stop uint `json:"stop,omitempty"` // uint
Tags []string `json:"tags,omitempty"` // [] len >= 0
Type string `json:"type,omitempty"` // string
}
// NewMaintenanceWindow returns a new Maintenance window (with defaults, if applicable)
func NewMaintenanceWindow() *Maintenance {
return &Maintenance{}
}
// FetchMaintenanceWindow retrieves maintenance [window] with passed cid.
func (a *API) FetchMaintenanceWindow(cid CIDType) (*Maintenance, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid maintenance window CID [none]")
}
maintenanceCID := string(*cid)
matched, err := regexp.MatchString(config.MaintenanceCIDRegex, maintenanceCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid maintenance window CID [%s]", maintenanceCID)
}
result, err := a.Get(maintenanceCID)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch maintenance window, received JSON: %s", string(result))
}
window := &Maintenance{}
if err := json.Unmarshal(result, window); err != nil {
return nil, err
}
return window, nil
}
// FetchMaintenanceWindows retrieves all maintenance [windows] available to API Token.
func (a *API) FetchMaintenanceWindows() (*[]Maintenance, error) {
result, err := a.Get(config.MaintenancePrefix)
if err != nil {
return nil, err
}
var windows []Maintenance
if err := json.Unmarshal(result, &windows); err != nil {
return nil, err
}
return &windows, nil
}
// UpdateMaintenanceWindow updates passed maintenance [window].
func (a *API) UpdateMaintenanceWindow(cfg *Maintenance) (*Maintenance, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid maintenance window config [nil]")
}
maintenanceCID := string(cfg.CID)
matched, err := regexp.MatchString(config.MaintenanceCIDRegex, maintenanceCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid maintenance window CID [%s]", maintenanceCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] update maintenance window, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(maintenanceCID, jsonCfg)
if err != nil {
return nil, err
}
window := &Maintenance{}
if err := json.Unmarshal(result, window); err != nil {
return nil, err
}
return window, nil
}
// CreateMaintenanceWindow creates a new maintenance [window].
func (a *API) CreateMaintenanceWindow(cfg *Maintenance) (*Maintenance, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid maintenance window config [nil]")
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] create maintenance window, sending JSON: %s", string(jsonCfg))
}
result, err := a.Post(config.MaintenancePrefix, jsonCfg)
if err != nil {
return nil, err
}
window := &Maintenance{}
if err := json.Unmarshal(result, window); err != nil {
return nil, err
}
return window, nil
}
// DeleteMaintenanceWindow deletes passed maintenance [window].
func (a *API) DeleteMaintenanceWindow(cfg *Maintenance) (bool, error) {
if cfg == nil {
return false, fmt.Errorf("Invalid maintenance window config [nil]")
}
return a.DeleteMaintenanceWindowByCID(CIDType(&cfg.CID))
}
// DeleteMaintenanceWindowByCID deletes maintenance [window] with passed cid.
func (a *API) DeleteMaintenanceWindowByCID(cid CIDType) (bool, error) {
if cid == nil || *cid == "" {
return false, fmt.Errorf("Invalid maintenance window CID [none]")
}
maintenanceCID := string(*cid)
matched, err := regexp.MatchString(config.MaintenanceCIDRegex, maintenanceCID)
if err != nil {
return false, err
}
if !matched {
return false, fmt.Errorf("Invalid maintenance window CID [%s]", maintenanceCID)
}
_, err = a.Delete(maintenanceCID)
if err != nil {
return false, err
}
return true, nil
}
// SearchMaintenanceWindows returns maintenance [windows] matching
// the specified search query and/or filter. If nil is passed for
// both parameters all maintenance [windows] will be returned.
func (a *API) SearchMaintenanceWindows(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Maintenance, error) {
q := url.Values{}
if searchCriteria != nil && *searchCriteria != "" {
q.Set("search", string(*searchCriteria))
}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchMaintenanceWindows()
}
reqURL := url.URL{
Path: config.MaintenancePrefix,
RawQuery: q.Encode(),
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var windows []Maintenance
if err := json.Unmarshal(result, &windows); err != nil {
return nil, err
}
return &windows, nil
}

View File

@ -0,0 +1,162 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Metric API support - Fetch, Create*, Update, Delete*, and Search
// See: https://login.circonus.com/resources/api/calls/metric
// * : create and delete are handled via check_bundle or check_bundle_metrics
package api
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// Metric defines a metric. See https://login.circonus.com/resources/api/calls/metric for more information.
type Metric struct {
Active bool `json:"_active,omitempty"` // boolean
CheckActive bool `json:"_check_active,omitempty"` // boolean
CheckBundleCID string `json:"_check_bundle,omitempty"` // string
CheckCID string `json:"_check,omitempty"` // string
CheckTags []string `json:"_check_tags,omitempty"` // [] len >= 0
CheckUUID string `json:"_check_uuid,omitempty"` // string
CID string `json:"_cid,omitempty"` // string
Histogram string `json:"_histogram,omitempty"` // string
Link *string `json:"link,omitempty"` // string or null
MetricName string `json:"_metric_name,omitempty"` // string
MetricType string `json:"_metric_type,omitempty"` // string
Notes *string `json:"notes,omitempty"` // string or null
Tags []string `json:"tags,omitempty"` // [] len >= 0
Units *string `json:"units,omitempty"` // string or null
}
// FetchMetric retrieves metric with passed cid.
func (a *API) FetchMetric(cid CIDType) (*Metric, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid metric CID [none]")
}
metricCID := string(*cid)
matched, err := regexp.MatchString(config.MetricCIDRegex, metricCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid metric CID [%s]", metricCID)
}
result, err := a.Get(metricCID)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch metric, received JSON: %s", string(result))
}
metric := &Metric{}
if err := json.Unmarshal(result, metric); err != nil {
return nil, err
}
return metric, nil
}
// FetchMetrics retrieves all metrics available to API Token.
func (a *API) FetchMetrics() (*[]Metric, error) {
result, err := a.Get(config.MetricPrefix)
if err != nil {
return nil, err
}
var metrics []Metric
if err := json.Unmarshal(result, &metrics); err != nil {
return nil, err
}
return &metrics, nil
}
// UpdateMetric updates passed metric.
func (a *API) UpdateMetric(cfg *Metric) (*Metric, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid metric config [nil]")
}
metricCID := string(cfg.CID)
matched, err := regexp.MatchString(config.MetricCIDRegex, metricCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid metric CID [%s]", metricCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] update metric, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(metricCID, jsonCfg)
if err != nil {
return nil, err
}
metric := &Metric{}
if err := json.Unmarshal(result, metric); err != nil {
return nil, err
}
return metric, nil
}
// SearchMetrics returns metrics matching the specified search query
// and/or filter. If nil is passed for both parameters all metrics
// will be returned.
func (a *API) SearchMetrics(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Metric, error) {
q := url.Values{}
if searchCriteria != nil && *searchCriteria != "" {
q.Set("search", string(*searchCriteria))
}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchMetrics()
}
reqURL := url.URL{
Path: config.MetricPrefix,
RawQuery: q.Encode(),
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var metrics []Metric
if err := json.Unmarshal(result, &metrics); err != nil {
return nil, err
}
return &metrics, nil
}

View File

@ -0,0 +1,261 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Metric Cluster API support - Fetch, Create, Update, Delete, and Search
// See: https://login.circonus.com/resources/api/calls/metric_cluster
package api
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// MetricQuery object
type MetricQuery struct {
Query string `json:"query"`
Type string `json:"type"`
}
// MetricCluster defines a metric cluster. See https://login.circonus.com/resources/api/calls/metric_cluster for more information.
type MetricCluster struct {
CID string `json:"_cid,omitempty"` // string
Description string `json:"description"` // string
MatchingMetrics []string `json:"_matching_metrics,omitempty"` // [] len >= 1 (result info only, if query has extras - cannot be set)
MatchingUUIDMetrics map[string][]string `json:"_matching_uuid_metrics,omitempty"` // [] len >= 1 (result info only, if query has extras - cannot be set)
Name string `json:"name"` // string
Queries []MetricQuery `json:"queries"` // [] len >= 1
Tags []string `json:"tags"` // [] len >= 0
}
// NewMetricCluster returns a new MetricCluster (with defaults, if applicable)
func NewMetricCluster() *MetricCluster {
return &MetricCluster{}
}
// FetchMetricCluster retrieves metric cluster with passed cid.
func (a *API) FetchMetricCluster(cid CIDType, extras string) (*MetricCluster, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid metric cluster CID [none]")
}
clusterCID := string(*cid)
matched, err := regexp.MatchString(config.MetricClusterCIDRegex, clusterCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid metric cluster CID [%s]", clusterCID)
}
reqURL := url.URL{
Path: clusterCID,
}
extra := ""
switch extras {
case "metrics":
extra = "_matching_metrics"
case "uuids":
extra = "_matching_uuid_metrics"
}
if extra != "" {
q := url.Values{}
q.Set("extra", extra)
reqURL.RawQuery = q.Encode()
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch metric cluster, received JSON: %s", string(result))
}
cluster := &MetricCluster{}
if err := json.Unmarshal(result, cluster); err != nil {
return nil, err
}
return cluster, nil
}
// FetchMetricClusters retrieves all metric clusters available to API Token.
func (a *API) FetchMetricClusters(extras string) (*[]MetricCluster, error) {
reqURL := url.URL{
Path: config.MetricClusterPrefix,
}
extra := ""
switch extras {
case "metrics":
extra = "_matching_metrics"
case "uuids":
extra = "_matching_uuid_metrics"
}
if extra != "" {
q := url.Values{}
q.Set("extra", extra)
reqURL.RawQuery = q.Encode()
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, err
}
var clusters []MetricCluster
if err := json.Unmarshal(result, &clusters); err != nil {
return nil, err
}
return &clusters, nil
}
// UpdateMetricCluster updates passed metric cluster.
func (a *API) UpdateMetricCluster(cfg *MetricCluster) (*MetricCluster, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid metric cluster config [nil]")
}
clusterCID := string(cfg.CID)
matched, err := regexp.MatchString(config.MetricClusterCIDRegex, clusterCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid metric cluster CID [%s]", clusterCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] update metric cluster, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(clusterCID, jsonCfg)
if err != nil {
return nil, err
}
cluster := &MetricCluster{}
if err := json.Unmarshal(result, cluster); err != nil {
return nil, err
}
return cluster, nil
}
// CreateMetricCluster creates a new metric cluster.
func (a *API) CreateMetricCluster(cfg *MetricCluster) (*MetricCluster, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid metric cluster config [nil]")
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] create metric cluster, sending JSON: %s", string(jsonCfg))
}
result, err := a.Post(config.MetricClusterPrefix, jsonCfg)
if err != nil {
return nil, err
}
cluster := &MetricCluster{}
if err := json.Unmarshal(result, cluster); err != nil {
return nil, err
}
return cluster, nil
}
// DeleteMetricCluster deletes passed metric cluster.
func (a *API) DeleteMetricCluster(cfg *MetricCluster) (bool, error) {
if cfg == nil {
return false, fmt.Errorf("Invalid metric cluster config [nil]")
}
return a.DeleteMetricClusterByCID(CIDType(&cfg.CID))
}
// DeleteMetricClusterByCID deletes metric cluster with passed cid.
func (a *API) DeleteMetricClusterByCID(cid CIDType) (bool, error) {
if cid == nil || *cid == "" {
return false, fmt.Errorf("Invalid metric cluster CID [none]")
}
clusterCID := string(*cid)
matched, err := regexp.MatchString(config.MetricClusterCIDRegex, clusterCID)
if err != nil {
return false, err
}
if !matched {
return false, fmt.Errorf("Invalid metric cluster CID [%s]", clusterCID)
}
_, err = a.Delete(clusterCID)
if err != nil {
return false, err
}
return true, nil
}
// SearchMetricClusters returns metric clusters matching the specified
// search query and/or filter. If nil is passed for both parameters
// all metric clusters will be returned.
func (a *API) SearchMetricClusters(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]MetricCluster, error) {
q := url.Values{}
if searchCriteria != nil && *searchCriteria != "" {
q.Set("search", string(*searchCriteria))
}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchMetricClusters("")
}
reqURL := url.URL{
Path: config.MetricClusterPrefix,
RawQuery: q.Encode(),
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var clusters []MetricCluster
if err := json.Unmarshal(result, &clusters); err != nil {
return nil, err
}
return &clusters, nil
}

View File

@ -0,0 +1,221 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// OutlierReport API support - Fetch, Create, Update, Delete, and Search
// See: https://login.circonus.com/resources/api/calls/report
package api
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// OutlierReport defines a outlier report. See https://login.circonus.com/resources/api/calls/report for more information.
type OutlierReport struct {
CID string `json:"_cid,omitempty"` // string
Config string `json:"config,omitempty"` // string
Created uint `json:"_created,omitempty"` // uint
CreatedBy string `json:"_created_by,omitempty"` // string
LastModified uint `json:"_last_modified,omitempty"` // uint
LastModifiedBy string `json:"_last_modified_by,omitempty"` // string
MetricClusterCID string `json:"metric_cluster,omitempty"` // st ring
Tags []string `json:"tags,omitempty"` // [] len >= 0
Title string `json:"title,omitempty"` // string
}
// NewOutlierReport returns a new OutlierReport (with defaults, if applicable)
func NewOutlierReport() *OutlierReport {
return &OutlierReport{}
}
// FetchOutlierReport retrieves outlier report with passed cid.
func (a *API) FetchOutlierReport(cid CIDType) (*OutlierReport, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid outlier report CID [none]")
}
reportCID := string(*cid)
matched, err := regexp.MatchString(config.OutlierReportCIDRegex, reportCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid outlier report CID [%s]", reportCID)
}
result, err := a.Get(reportCID)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch outlier report, received JSON: %s", string(result))
}
report := &OutlierReport{}
if err := json.Unmarshal(result, report); err != nil {
return nil, err
}
return report, nil
}
// FetchOutlierReports retrieves all outlier reports available to API Token.
func (a *API) FetchOutlierReports() (*[]OutlierReport, error) {
result, err := a.Get(config.OutlierReportPrefix)
if err != nil {
return nil, err
}
var reports []OutlierReport
if err := json.Unmarshal(result, &reports); err != nil {
return nil, err
}
return &reports, nil
}
// UpdateOutlierReport updates passed outlier report.
func (a *API) UpdateOutlierReport(cfg *OutlierReport) (*OutlierReport, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid outlier report config [nil]")
}
reportCID := string(cfg.CID)
matched, err := regexp.MatchString(config.OutlierReportCIDRegex, reportCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid outlier report CID [%s]", reportCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] update outlier report, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(reportCID, jsonCfg)
if err != nil {
return nil, err
}
report := &OutlierReport{}
if err := json.Unmarshal(result, report); err != nil {
return nil, err
}
return report, nil
}
// CreateOutlierReport creates a new outlier report.
func (a *API) CreateOutlierReport(cfg *OutlierReport) (*OutlierReport, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid outlier report config [nil]")
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] create outlier report, sending JSON: %s", string(jsonCfg))
}
result, err := a.Post(config.OutlierReportPrefix, jsonCfg)
if err != nil {
return nil, err
}
report := &OutlierReport{}
if err := json.Unmarshal(result, report); err != nil {
return nil, err
}
return report, nil
}
// DeleteOutlierReport deletes passed outlier report.
func (a *API) DeleteOutlierReport(cfg *OutlierReport) (bool, error) {
if cfg == nil {
return false, fmt.Errorf("Invalid outlier report config [nil]")
}
return a.DeleteOutlierReportByCID(CIDType(&cfg.CID))
}
// DeleteOutlierReportByCID deletes outlier report with passed cid.
func (a *API) DeleteOutlierReportByCID(cid CIDType) (bool, error) {
if cid == nil || *cid == "" {
return false, fmt.Errorf("Invalid outlier report CID [none]")
}
reportCID := string(*cid)
matched, err := regexp.MatchString(config.OutlierReportCIDRegex, reportCID)
if err != nil {
return false, err
}
if !matched {
return false, fmt.Errorf("Invalid outlier report CID [%s]", reportCID)
}
_, err = a.Delete(reportCID)
if err != nil {
return false, err
}
return true, nil
}
// SearchOutlierReports returns outlier report matching the
// specified search query and/or filter. If nil is passed for
// both parameters all outlier report will be returned.
func (a *API) SearchOutlierReports(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]OutlierReport, error) {
q := url.Values{}
if searchCriteria != nil && *searchCriteria != "" {
q.Set("search", string(*searchCriteria))
}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchOutlierReports()
}
reqURL := url.URL{
Path: config.OutlierReportPrefix,
RawQuery: q.Encode(),
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var reports []OutlierReport
if err := json.Unmarshal(result, &reports); err != nil {
return nil, err
}
return &reports, nil
}

View File

@ -0,0 +1,151 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// ProvisionBroker API support - Fetch, Create, and Update
// See: https://login.circonus.com/resources/api/calls/provision_broker
// Note that the provision_broker endpoint does not return standard cid format
// of '/object/item' (e.g. /provision_broker/abc-123) it just returns 'item'
package api
import (
"encoding/json"
"fmt"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// BrokerStratcon defines stratcons for broker
type BrokerStratcon struct {
CN string `json:"cn,omitempty"` // string
Host string `json:"host,omitempty"` // string
Port string `json:"port,omitempty"` // string
}
// ProvisionBroker defines a provision broker [request]. See https://login.circonus.com/resources/api/calls/provision_broker for more details.
type ProvisionBroker struct {
Cert string `json:"_cert,omitempty"` // string
CID string `json:"_cid,omitempty"` // string
CSR string `json:"_csr,omitempty"` // string
ExternalHost string `json:"external_host,omitempty"` // string
ExternalPort string `json:"external_port,omitempty"` // string
IPAddress string `json:"ipaddress,omitempty"` // string
Latitude string `json:"latitude,omitempty"` // string
Longitude string `json:"longitude,omitempty"` // string
Name string `json:"noit_name,omitempty"` // string
Port string `json:"port,omitempty"` // string
PreferReverseConnection bool `json:"prefer_reverse_connection,omitempty"` // boolean
Rebuild bool `json:"rebuild,omitempty"` // boolean
Stratcons []BrokerStratcon `json:"_stratcons,omitempty"` // [] len >= 1
Tags []string `json:"tags,omitempty"` // [] len >= 0
}
// NewProvisionBroker returns a new ProvisionBroker (with defaults, if applicable)
func NewProvisionBroker() *ProvisionBroker {
return &ProvisionBroker{}
}
// FetchProvisionBroker retrieves provision broker [request] with passed cid.
func (a *API) FetchProvisionBroker(cid CIDType) (*ProvisionBroker, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid provision broker request CID [none]")
}
brokerCID := string(*cid)
matched, err := regexp.MatchString(config.ProvisionBrokerCIDRegex, brokerCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid provision broker request CID [%s]", brokerCID)
}
result, err := a.Get(brokerCID)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch broker provision request, received JSON: %s", string(result))
}
broker := &ProvisionBroker{}
if err := json.Unmarshal(result, broker); err != nil {
return nil, err
}
return broker, nil
}
// UpdateProvisionBroker updates a broker definition [request].
func (a *API) UpdateProvisionBroker(cid CIDType, cfg *ProvisionBroker) (*ProvisionBroker, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid provision broker request config [nil]")
}
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid provision broker request CID [none]")
}
brokerCID := string(*cid)
matched, err := regexp.MatchString(config.ProvisionBrokerCIDRegex, brokerCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid provision broker request CID [%s]", brokerCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] update broker provision request, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(brokerCID, jsonCfg)
if err != nil {
return nil, err
}
broker := &ProvisionBroker{}
if err := json.Unmarshal(result, broker); err != nil {
return nil, err
}
return broker, nil
}
// CreateProvisionBroker creates a new provison broker [request].
func (a *API) CreateProvisionBroker(cfg *ProvisionBroker) (*ProvisionBroker, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid provision broker request config [nil]")
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] create broker provision request, sending JSON: %s", string(jsonCfg))
}
result, err := a.Post(config.ProvisionBrokerPrefix, jsonCfg)
if err != nil {
return nil, err
}
broker := &ProvisionBroker{}
if err := json.Unmarshal(result, broker); err != nil {
return nil, err
}
return broker, nil
}

View File

@ -0,0 +1,234 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Rule Set API support - Fetch, Create, Update, Delete, and Search
// See: https://login.circonus.com/resources/api/calls/rule_set
package api
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// RuleSetRule defines a ruleset rule
type RuleSetRule struct {
Criteria string `json:"criteria"` // string
Severity uint `json:"severity"` // uint
Value interface{} `json:"value"` // BUG doc: string, api: actual type returned switches based on Criteria
Wait uint `json:"wait"` // uint
WindowingDuration uint `json:"windowing_duration,omitempty"` // uint
WindowingFunction *string `json:"windowing_function,omitempty"` // string or null
}
// RuleSet defines a ruleset. See https://login.circonus.com/resources/api/calls/rule_set for more information.
type RuleSet struct {
CheckCID string `json:"check"` // string
CID string `json:"_cid,omitempty"` // string
ContactGroups map[uint8][]string `json:"contact_groups"` // [] len 5
Derive *string `json:"derive,omitempty"` // string or null
Link *string `json:"link"` // string or null
MetricName string `json:"metric_name"` // string
MetricTags []string `json:"metric_tags"` // [] len >= 0
MetricType string `json:"metric_type"` // string
Notes *string `json:"notes"` // string or null
Parent *string `json:"parent,omitempty"` // string or null
Rules []RuleSetRule `json:"rules"` // [] len >= 1
Tags []string `json:"tags"` // [] len >= 0
}
// NewRuleSet returns a new RuleSet (with defaults if applicable)
func NewRuleSet() *RuleSet {
return &RuleSet{}
}
// FetchRuleSet retrieves rule set with passed cid.
func (a *API) FetchRuleSet(cid CIDType) (*RuleSet, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid rule set CID [none]")
}
rulesetCID := string(*cid)
matched, err := regexp.MatchString(config.RuleSetCIDRegex, rulesetCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid rule set CID [%s]", rulesetCID)
}
result, err := a.Get(rulesetCID)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch rule set, received JSON: %s", string(result))
}
ruleset := &RuleSet{}
if err := json.Unmarshal(result, ruleset); err != nil {
return nil, err
}
return ruleset, nil
}
// FetchRuleSets retrieves all rule sets available to API Token.
func (a *API) FetchRuleSets() (*[]RuleSet, error) {
result, err := a.Get(config.RuleSetPrefix)
if err != nil {
return nil, err
}
var rulesets []RuleSet
if err := json.Unmarshal(result, &rulesets); err != nil {
return nil, err
}
return &rulesets, nil
}
// UpdateRuleSet updates passed rule set.
func (a *API) UpdateRuleSet(cfg *RuleSet) (*RuleSet, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid rule set config [nil]")
}
rulesetCID := string(cfg.CID)
matched, err := regexp.MatchString(config.RuleSetCIDRegex, rulesetCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid rule set CID [%s]", rulesetCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] update rule set, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(rulesetCID, jsonCfg)
if err != nil {
return nil, err
}
ruleset := &RuleSet{}
if err := json.Unmarshal(result, ruleset); err != nil {
return nil, err
}
return ruleset, nil
}
// CreateRuleSet creates a new rule set.
func (a *API) CreateRuleSet(cfg *RuleSet) (*RuleSet, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid rule set config [nil]")
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] create rule set, sending JSON: %s", string(jsonCfg))
}
resp, err := a.Post(config.RuleSetPrefix, jsonCfg)
if err != nil {
return nil, err
}
ruleset := &RuleSet{}
if err := json.Unmarshal(resp, ruleset); err != nil {
return nil, err
}
return ruleset, nil
}
// DeleteRuleSet deletes passed rule set.
func (a *API) DeleteRuleSet(cfg *RuleSet) (bool, error) {
if cfg == nil {
return false, fmt.Errorf("Invalid rule set config [nil]")
}
return a.DeleteRuleSetByCID(CIDType(&cfg.CID))
}
// DeleteRuleSetByCID deletes rule set with passed cid.
func (a *API) DeleteRuleSetByCID(cid CIDType) (bool, error) {
if cid == nil || *cid == "" {
return false, fmt.Errorf("Invalid rule set CID [none]")
}
rulesetCID := string(*cid)
matched, err := regexp.MatchString(config.RuleSetCIDRegex, rulesetCID)
if err != nil {
return false, err
}
if !matched {
return false, fmt.Errorf("Invalid rule set CID [%s]", rulesetCID)
}
_, err = a.Delete(rulesetCID)
if err != nil {
return false, err
}
return true, nil
}
// SearchRuleSets returns rule sets matching the specified search
// query and/or filter. If nil is passed for both parameters all
// rule sets will be returned.
func (a *API) SearchRuleSets(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]RuleSet, error) {
q := url.Values{}
if searchCriteria != nil && *searchCriteria != "" {
q.Set("search", string(*searchCriteria))
}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchRuleSets()
}
reqURL := url.URL{
Path: config.RuleSetPrefix,
RawQuery: q.Encode(),
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var rulesets []RuleSet
if err := json.Unmarshal(result, &rulesets); err != nil {
return nil, err
}
return &rulesets, nil
}

View File

@ -0,0 +1,231 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// RuleSetGroup API support - Fetch, Create, Update, Delete, and Search
// See: https://login.circonus.com/resources/api/calls/rule_set_group
package api
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// RuleSetGroupFormula defines a formula for raising alerts
type RuleSetGroupFormula struct {
Expression interface{} `json:"expression"` // string or uint BUG doc: string, api: string or numeric
RaiseSeverity uint `json:"raise_severity"` // uint
Wait uint `json:"wait"` // uint
}
// RuleSetGroupCondition defines conditions for raising alerts
type RuleSetGroupCondition struct {
MatchingSeverities []string `json:"matching_serverities"` // [] len >= 1
RuleSetCID string `json:"rule_set"` // string
}
// RuleSetGroup defines a ruleset group. See https://login.circonus.com/resources/api/calls/rule_set_group for more information.
type RuleSetGroup struct {
CID string `json:"_cid,omitempty"` // string
ContactGroups map[uint8][]string `json:"contact_groups"` // [] len == 5
Formulas []RuleSetGroupFormula `json:"formulas"` // [] len >= 0
Name string `json:"name"` // string
RuleSetConditions []RuleSetGroupCondition `json:"rule_set_conditions"` // [] len >= 1
Tags []string `json:"tags"` // [] len >= 0
}
// NewRuleSetGroup returns a new RuleSetGroup (with defaults, if applicable)
func NewRuleSetGroup() *RuleSetGroup {
return &RuleSetGroup{}
}
// FetchRuleSetGroup retrieves rule set group with passed cid.
func (a *API) FetchRuleSetGroup(cid CIDType) (*RuleSetGroup, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid rule set group CID [none]")
}
groupCID := string(*cid)
matched, err := regexp.MatchString(config.RuleSetGroupCIDRegex, groupCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid rule set group CID [%s]", groupCID)
}
result, err := a.Get(groupCID)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch rule set group, received JSON: %s", string(result))
}
rulesetGroup := &RuleSetGroup{}
if err := json.Unmarshal(result, rulesetGroup); err != nil {
return nil, err
}
return rulesetGroup, nil
}
// FetchRuleSetGroups retrieves all rule set groups available to API Token.
func (a *API) FetchRuleSetGroups() (*[]RuleSetGroup, error) {
result, err := a.Get(config.RuleSetGroupPrefix)
if err != nil {
return nil, err
}
var rulesetGroups []RuleSetGroup
if err := json.Unmarshal(result, &rulesetGroups); err != nil {
return nil, err
}
return &rulesetGroups, nil
}
// UpdateRuleSetGroup updates passed rule set group.
func (a *API) UpdateRuleSetGroup(cfg *RuleSetGroup) (*RuleSetGroup, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid rule set group config [nil]")
}
groupCID := string(cfg.CID)
matched, err := regexp.MatchString(config.RuleSetGroupCIDRegex, groupCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid rule set group CID [%s]", groupCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] update rule set group, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(groupCID, jsonCfg)
if err != nil {
return nil, err
}
groups := &RuleSetGroup{}
if err := json.Unmarshal(result, groups); err != nil {
return nil, err
}
return groups, nil
}
// CreateRuleSetGroup creates a new rule set group.
func (a *API) CreateRuleSetGroup(cfg *RuleSetGroup) (*RuleSetGroup, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid rule set group config [nil]")
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] create rule set group, sending JSON: %s", string(jsonCfg))
}
result, err := a.Post(config.RuleSetGroupPrefix, jsonCfg)
if err != nil {
return nil, err
}
group := &RuleSetGroup{}
if err := json.Unmarshal(result, group); err != nil {
return nil, err
}
return group, nil
}
// DeleteRuleSetGroup deletes passed rule set group.
func (a *API) DeleteRuleSetGroup(cfg *RuleSetGroup) (bool, error) {
if cfg == nil {
return false, fmt.Errorf("Invalid rule set group config [nil]")
}
return a.DeleteRuleSetGroupByCID(CIDType(&cfg.CID))
}
// DeleteRuleSetGroupByCID deletes rule set group with passed cid.
func (a *API) DeleteRuleSetGroupByCID(cid CIDType) (bool, error) {
if cid == nil || *cid == "" {
return false, fmt.Errorf("Invalid rule set group CID [none]")
}
groupCID := string(*cid)
matched, err := regexp.MatchString(config.RuleSetGroupCIDRegex, groupCID)
if err != nil {
return false, err
}
if !matched {
return false, fmt.Errorf("Invalid rule set group CID [%s]", groupCID)
}
_, err = a.Delete(groupCID)
if err != nil {
return false, err
}
return true, nil
}
// SearchRuleSetGroups returns rule set groups matching the
// specified search query and/or filter. If nil is passed for
// both parameters all rule set groups will be returned.
func (a *API) SearchRuleSetGroups(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]RuleSetGroup, error) {
q := url.Values{}
if searchCriteria != nil && *searchCriteria != "" {
q.Set("search", string(*searchCriteria))
}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchRuleSetGroups()
}
reqURL := url.URL{
Path: config.RuleSetGroupPrefix,
RawQuery: q.Encode(),
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var groups []RuleSetGroup
if err := json.Unmarshal(result, &groups); err != nil {
return nil, err
}
return &groups, nil
}

View File

@ -0,0 +1,159 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// User API support - Fetch, Update, and Search
// See: https://login.circonus.com/resources/api/calls/user
// Note: Create and Delete are not supported directly via the User API
// endpoint. See the Account endpoint for inviting and removing users
// from specific accounts.
package api
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// UserContactInfo defines known contact details
type UserContactInfo struct {
SMS string `json:"sms,omitempty"` // string
XMPP string `json:"xmpp,omitempty"` // string
}
// User defines a user. See https://login.circonus.com/resources/api/calls/user for more information.
type User struct {
CID string `json:"_cid,omitempty"` // string
ContactInfo UserContactInfo `json:"contact_info,omitempty"` // UserContactInfo
Email string `json:"email"` // string
Firstname string `json:"firstname"` // string
Lastname string `json:"lastname"` // string
}
// FetchUser retrieves user with passed cid. Pass nil for '/user/current'.
func (a *API) FetchUser(cid CIDType) (*User, error) {
var userCID string
if cid == nil || *cid == "" {
userCID = config.UserPrefix + "/current"
} else {
userCID = string(*cid)
}
matched, err := regexp.MatchString(config.UserCIDRegex, userCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid user CID [%s]", userCID)
}
result, err := a.Get(userCID)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch user, received JSON: %s", string(result))
}
user := new(User)
if err := json.Unmarshal(result, user); err != nil {
return nil, err
}
return user, nil
}
// FetchUsers retrieves all users available to API Token.
func (a *API) FetchUsers() (*[]User, error) {
result, err := a.Get(config.UserPrefix)
if err != nil {
return nil, err
}
var users []User
if err := json.Unmarshal(result, &users); err != nil {
return nil, err
}
return &users, nil
}
// UpdateUser updates passed user.
func (a *API) UpdateUser(cfg *User) (*User, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid user config [nil]")
}
userCID := string(cfg.CID)
matched, err := regexp.MatchString(config.UserCIDRegex, userCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid user CID [%s]", userCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] update user, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(userCID, jsonCfg)
if err != nil {
return nil, err
}
user := &User{}
if err := json.Unmarshal(result, user); err != nil {
return nil, err
}
return user, nil
}
// SearchUsers returns users matching a filter (search queries
// are not suppoted by the user endpoint). Pass nil as filter for all
// users available to the API Token.
func (a *API) SearchUsers(filterCriteria *SearchFilterType) (*[]User, error) {
q := url.Values{}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchUsers()
}
reqURL := url.URL{
Path: config.UserPrefix,
RawQuery: q.Encode(),
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var users []User
if err := json.Unmarshal(result, &users); err != nil {
return nil, err
}
return &users, nil
}

View File

@ -0,0 +1,234 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Worksheet API support - Fetch, Create, Update, Delete, and Search
// See: https://login.circonus.com/resources/api/calls/worksheet
package api
import (
"encoding/json"
"fmt"
"net/url"
"regexp"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// WorksheetGraph defines a worksheet cid to be include in the worksheet
type WorksheetGraph struct {
GraphCID string `json:"graph"` // string
}
// WorksheetSmartQuery defines a query to include multiple worksheets
type WorksheetSmartQuery struct {
Name string `json:"name"`
Order []string `json:"order"`
Query string `json:"query"`
}
// Worksheet defines a worksheet. See https://login.circonus.com/resources/api/calls/worksheet for more information.
type Worksheet struct {
CID string `json:"_cid,omitempty"` // string
Description *string `json:"description"` // string or null
Favorite bool `json:"favorite"` // boolean
Graphs []WorksheetGraph `json:"graphs"` // [] len >= 0
Notes *string `json:"notes"` // string or null
SmartQueries []WorksheetSmartQuery `json:"smart_queries,omitempty"` // [] len >= 0
Tags []string `json:"tags"` // [] len >= 0
Title string `json:"title"` // string
}
// NewWorksheet returns a new Worksheet (with defaults, if applicable)
func NewWorksheet() *Worksheet {
return &Worksheet{
Graphs: []WorksheetGraph{}, // graphs is a required attribute and cannot be null
}
}
// FetchWorksheet retrieves worksheet with passed cid.
func (a *API) FetchWorksheet(cid CIDType) (*Worksheet, error) {
if cid == nil || *cid == "" {
return nil, fmt.Errorf("Invalid worksheet CID [none]")
}
worksheetCID := string(*cid)
matched, err := regexp.MatchString(config.WorksheetCIDRegex, worksheetCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid worksheet CID [%s]", worksheetCID)
}
result, err := a.Get(string(*cid))
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] fetch worksheet, received JSON: %s", string(result))
}
worksheet := new(Worksheet)
if err := json.Unmarshal(result, worksheet); err != nil {
return nil, err
}
return worksheet, nil
}
// FetchWorksheets retrieves all worksheets available to API Token.
func (a *API) FetchWorksheets() (*[]Worksheet, error) {
result, err := a.Get(config.WorksheetPrefix)
if err != nil {
return nil, err
}
var worksheets []Worksheet
if err := json.Unmarshal(result, &worksheets); err != nil {
return nil, err
}
return &worksheets, nil
}
// UpdateWorksheet updates passed worksheet.
func (a *API) UpdateWorksheet(cfg *Worksheet) (*Worksheet, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid worksheet config [nil]")
}
worksheetCID := string(cfg.CID)
matched, err := regexp.MatchString(config.WorksheetCIDRegex, worksheetCID)
if err != nil {
return nil, err
}
if !matched {
return nil, fmt.Errorf("Invalid worksheet CID [%s]", worksheetCID)
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] update worksheet, sending JSON: %s", string(jsonCfg))
}
result, err := a.Put(worksheetCID, jsonCfg)
if err != nil {
return nil, err
}
worksheet := &Worksheet{}
if err := json.Unmarshal(result, worksheet); err != nil {
return nil, err
}
return worksheet, nil
}
// CreateWorksheet creates a new worksheet.
func (a *API) CreateWorksheet(cfg *Worksheet) (*Worksheet, error) {
if cfg == nil {
return nil, fmt.Errorf("Invalid worksheet config [nil]")
}
jsonCfg, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
if a.Debug {
a.Log.Printf("[DEBUG] create annotation, sending JSON: %s", string(jsonCfg))
}
result, err := a.Post(config.WorksheetPrefix, jsonCfg)
if err != nil {
return nil, err
}
worksheet := &Worksheet{}
if err := json.Unmarshal(result, worksheet); err != nil {
return nil, err
}
return worksheet, nil
}
// DeleteWorksheet deletes passed worksheet.
func (a *API) DeleteWorksheet(cfg *Worksheet) (bool, error) {
if cfg == nil {
return false, fmt.Errorf("Invalid worksheet config [nil]")
}
return a.DeleteWorksheetByCID(CIDType(&cfg.CID))
}
// DeleteWorksheetByCID deletes worksheet with passed cid.
func (a *API) DeleteWorksheetByCID(cid CIDType) (bool, error) {
if cid == nil || *cid == "" {
return false, fmt.Errorf("Invalid worksheet CID [none]")
}
worksheetCID := string(*cid)
matched, err := regexp.MatchString(config.WorksheetCIDRegex, worksheetCID)
if err != nil {
return false, err
}
if !matched {
return false, fmt.Errorf("Invalid worksheet CID [%s]", worksheetCID)
}
_, err = a.Delete(worksheetCID)
if err != nil {
return false, err
}
return true, nil
}
// SearchWorksheets returns worksheets matching the specified search
// query and/or filter. If nil is passed for both parameters all
// worksheets will be returned.
func (a *API) SearchWorksheets(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Worksheet, error) {
q := url.Values{}
if searchCriteria != nil && *searchCriteria != "" {
q.Set("search", string(*searchCriteria))
}
if filterCriteria != nil && len(*filterCriteria) > 0 {
for filter, criteria := range *filterCriteria {
for _, val := range criteria {
q.Add(filter, val)
}
}
}
if q.Encode() == "" {
return a.FetchWorksheets()
}
reqURL := url.URL{
Path: config.WorksheetPrefix,
RawQuery: q.Encode(),
}
result, err := a.Get(reqURL.String())
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var worksheets []Worksheet
if err := json.Unmarshal(result, &worksheets); err != nil {
return nil, err
}
return &worksheets, nil
}

View File

@ -24,7 +24,8 @@ func init() {
// Get Broker to use when creating a check
func (cm *CheckManager) getBroker() (*api.Broker, error) {
if cm.brokerID != 0 {
broker, err := cm.apih.FetchBrokerByID(cm.brokerID)
cid := fmt.Sprintf("/broker/%d", cm.brokerID)
broker, err := cm.apih.FetchBroker(api.CIDType(&cid))
if err != nil {
return nil, err
}
@ -60,7 +61,7 @@ func (cm *CheckManager) getBrokerCN(broker *api.Broker, submissionURL api.URLTyp
cn := ""
for _, detail := range broker.Details {
if detail.IP == host {
if *detail.IP == host {
cn = detail.CN
break
}
@ -77,32 +78,37 @@ func (cm *CheckManager) getBrokerCN(broker *api.Broker, submissionURL api.URLTyp
// Select a broker for use when creating a check, if a specific broker
// was not specified.
func (cm *CheckManager) selectBroker() (*api.Broker, error) {
var brokerList []api.Broker
var brokerList *[]api.Broker
var err error
enterpriseType := "enterprise"
if len(cm.brokerSelectTag) > 0 {
brokerList, err = cm.apih.FetchBrokerListByTag(cm.brokerSelectTag)
filter := api.SearchFilterType{
"f__tags_has": cm.brokerSelectTag,
}
brokerList, err = cm.apih.SearchBrokers(nil, &filter)
if err != nil {
return nil, err
}
} else {
brokerList, err = cm.apih.FetchBrokerList()
brokerList, err = cm.apih.FetchBrokers()
if err != nil {
return nil, err
}
}
if len(brokerList) == 0 {
if len(*brokerList) == 0 {
return nil, fmt.Errorf("zero brokers found")
}
validBrokers := make(map[string]api.Broker)
haveEnterprise := false
for _, broker := range brokerList {
for _, broker := range *brokerList {
broker := broker
if cm.isValidBroker(&broker) {
validBrokers[broker.Cid] = broker
if broker.Type == "enterprise" {
validBrokers[broker.CID] = broker
if broker.Type == enterpriseType {
haveEnterprise = true
}
}
@ -110,14 +116,14 @@ func (cm *CheckManager) selectBroker() (*api.Broker, error) {
if haveEnterprise { // eliminate non-enterprise brokers from valid brokers
for k, v := range validBrokers {
if v.Type != "enterprise" {
if v.Type != enterpriseType {
delete(validBrokers, k)
}
}
}
if len(validBrokers) == 0 {
return nil, fmt.Errorf("found %d broker(s), zero are valid", len(brokerList))
return nil, fmt.Errorf("found %d broker(s), zero are valid", len(*brokerList))
}
validBrokerKeys := reflect.ValueOf(validBrokers).MapKeys()
@ -134,8 +140,20 @@ func (cm *CheckManager) selectBroker() (*api.Broker, error) {
// Verify broker supports the check type to be used
func (cm *CheckManager) brokerSupportsCheckType(checkType CheckTypeType, details *api.BrokerDetail) bool {
baseType := string(checkType)
for _, module := range details.Modules {
if CheckTypeType(module) == checkType {
if module == baseType {
return true
}
}
if idx := strings.Index(baseType, ":"); idx > 0 {
baseType = baseType[0:idx]
}
for _, module := range details.Modules {
if module == baseType {
return true
}
}
@ -146,10 +164,17 @@ func (cm *CheckManager) brokerSupportsCheckType(checkType CheckTypeType, details
// Is the broker valid (active, supports check type, and reachable)
func (cm *CheckManager) isValidBroker(broker *api.Broker) bool {
brokerHost := ""
brokerPort := ""
var brokerHost string
var brokerPort string
if broker.Type != "circonus" && broker.Type != "enterprise" {
return false
}
valid := false
for _, detail := range broker.Details {
detail := detail
// broker must be active
if detail.Status != statusActive {
@ -168,49 +193,50 @@ func (cm *CheckManager) isValidBroker(broker *api.Broker) bool {
}
if detail.ExternalPort != 0 {
brokerPort = strconv.Itoa(detail.ExternalPort)
brokerPort = strconv.Itoa(int(detail.ExternalPort))
} else {
if detail.Port != 0 {
brokerPort = strconv.Itoa(detail.Port)
if detail.Port != nil && *detail.Port != 0 {
brokerPort = strconv.Itoa(int(*detail.Port))
} else {
brokerPort = "43191"
}
}
if detail.ExternalHost != "" {
brokerHost = detail.ExternalHost
} else {
brokerHost = detail.IP
if detail.ExternalHost != nil && *detail.ExternalHost != "" {
brokerHost = *detail.ExternalHost
} else if detail.IP != nil && *detail.IP != "" {
brokerHost = *detail.IP
}
// broker must be reachable and respond within designated time
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%s", brokerHost, brokerPort), cm.brokerMaxResponseTime)
if err != nil {
if detail.CN != "trap.noit.circonus.net" {
if cm.Debug {
cm.Log.Printf("[DEBUG] Broker '%s' unable to connect, %v\n", broker.Name, err)
}
continue // not able to reach the broker (or respone slow enough for it to be considered not usable)
}
// if circonus trap broker, try port 443
if brokerHost == "" {
cm.Log.Printf("[WARN] Broker '%s' instance %s has no IP or external host set", broker.Name, detail.CN)
continue
}
if brokerHost == "trap.noit.circonus.net" && brokerPort != "443" {
brokerPort = "443"
conn, err = net.DialTimeout("tcp", fmt.Sprintf("%s:%s", detail.CN, brokerPort), cm.brokerMaxResponseTime)
if err != nil {
if cm.Debug {
cm.Log.Printf("[DEBUG] Broker '%s' unable to connect %v\n", broker.Name, err)
}
continue // not able to reach the broker on 443 either (or respone slow enough for it to be considered not usable)
}
retries := 5
for attempt := 1; attempt <= retries; attempt++ {
// broker must be reachable and respond within designated time
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%s", brokerHost, brokerPort), cm.brokerMaxResponseTime)
if err == nil {
conn.Close()
valid = true
break
}
}
conn.Close()
if cm.Debug {
cm.Log.Printf("[DEBUG] Broker '%s' is valid\n", broker.Name)
cm.Log.Printf("[WARN] Broker '%s' unable to connect, %v. Retrying in 2 seconds, attempt %d of %d.", broker.Name, err, attempt, retries)
time.Sleep(2 * time.Second)
}
valid = true
break
if valid {
if cm.Debug {
cm.Log.Printf("[DEBUG] Broker '%s' is valid\n", broker.Name)
}
break
}
}
return valid
}

View File

@ -7,6 +7,7 @@ package checkmgr
import (
"crypto/x509"
"encoding/json"
"errors"
"fmt"
)
@ -41,17 +42,22 @@ type CACert struct {
}
// loadCACert loads the CA cert for the broker designated by the submission url
func (cm *CheckManager) loadCACert() {
func (cm *CheckManager) loadCACert() error {
if cm.certPool != nil {
return
return nil
}
cm.certPool = x509.NewCertPool()
cert, err := cm.fetchCert()
if err != nil {
if cm.Debug {
cm.Log.Printf("[DEBUG] Unable to fetch ca.crt, using default. %+v\n", err)
var cert []byte
var err error
if cm.enabled {
// only attempt to retrieve broker CA cert if
// the check is being managed.
cert, err = cm.fetchCert()
if err != nil {
return err
}
}
@ -60,12 +66,14 @@ func (cm *CheckManager) loadCACert() {
}
cm.certPool.AppendCertsFromPEM(cert)
return nil
}
// fetchCert fetches CA certificate using Circonus API
func (cm *CheckManager) fetchCert() ([]byte, error) {
if !cm.enabled {
return circonusCA, nil
return nil, errors.New("check manager is not enabled")
}
response, err := cm.apih.Get("/pki/ca.crt")

View File

@ -10,11 +10,13 @@ import (
"encoding/hex"
"errors"
"fmt"
"net/url"
"strconv"
"strings"
"time"
"github.com/circonus-labs/circonus-gometrics/api"
"github.com/circonus-labs/circonus-gometrics/api/config"
)
// UpdateCheck determines if the check needs to be updated (new metrics, tags, etc.)
@ -35,7 +37,8 @@ func (cm *CheckManager) UpdateCheck(newMetrics map[string]*api.CheckBundleMetric
}
// refresh check bundle (in case there were changes made by other apps or in UI)
checkBundle, err := cm.apih.FetchCheckBundleByCID(api.CIDType(cm.checkBundle.Cid))
cid := cm.checkBundle.CID
checkBundle, err := cm.apih.FetchCheckBundle(api.CIDType(&cid))
if err != nil {
cm.Log.Printf("[ERROR] unable to fetch up-to-date check bundle %v", err)
return
@ -44,6 +47,8 @@ func (cm *CheckManager) UpdateCheck(newMetrics map[string]*api.CheckBundleMetric
cm.checkBundle = checkBundle
cm.cbmu.Unlock()
// check metric_limit and see if its 0, if so, don't even bother to try to update the check.
cm.addNewMetrics(newMetrics)
if len(cm.metricTags) > 0 {
@ -105,7 +110,7 @@ func (cm *CheckManager) initializeTrapURL() error {
}
if !cm.enabled {
return errors.New("Unable to initialize trap, check manager is disabled.")
return errors.New("unable to initialize trap, check manager is disabled")
}
var err error
@ -114,12 +119,12 @@ func (cm *CheckManager) initializeTrapURL() error {
var broker *api.Broker
if cm.checkSubmissionURL != "" {
check, err = cm.apih.FetchCheckBySubmissionURL(cm.checkSubmissionURL)
check, err = cm.fetchCheckBySubmissionURL(cm.checkSubmissionURL)
if err != nil {
return err
}
if !check.Active {
return fmt.Errorf("[ERROR] Check ID %v is not active", check.Cid)
return fmt.Errorf("[ERROR] Check ID %v is not active", check.CID)
}
// extract check id from check object returned from looking up using submission url
// set m.CheckId to the id
@ -128,30 +133,44 @@ func (cm *CheckManager) initializeTrapURL() error {
// unless the new submission url can be fetched with the API (which is no
// longer possible using the original submission url)
var id int
id, err = strconv.Atoi(strings.Replace(check.Cid, "/check/", "", -1))
id, err = strconv.Atoi(strings.Replace(check.CID, "/check/", "", -1))
if err == nil {
cm.checkID = api.IDType(id)
cm.checkSubmissionURL = ""
} else {
cm.Log.Printf(
"[WARN] SubmissionUrl check to Check ID: unable to convert %s to int %q\n",
check.Cid, err)
check.CID, err)
}
} else if cm.checkID > 0 {
check, err = cm.apih.FetchCheckByID(cm.checkID)
cid := fmt.Sprintf("/check/%d", cm.checkID)
check, err = cm.apih.FetchCheck(api.CIDType(&cid))
if err != nil {
return err
}
if !check.Active {
return fmt.Errorf("[ERROR] Check ID %v is not active", check.Cid)
return fmt.Errorf("[ERROR] Check ID %v is not active", check.CID)
}
} else {
searchCriteria := fmt.Sprintf(
"(active:1)(host:\"%s\")(type:\"%s\")(tags:%s)(notes:%s)",
cm.checkTarget, cm.checkType, strings.Join(cm.checkSearchTag, ","), fmt.Sprintf("cgm_instanceid=%s", cm.checkInstanceID))
checkBundle, err = cm.checkBundleSearch(searchCriteria)
if err != nil {
return err
if checkBundle == nil {
// old search (instanceid as check.target)
searchCriteria := fmt.Sprintf(
"(active:1)(type:\"%s\")(host:\"%s\")(tags:%s)", cm.checkType, cm.checkTarget, strings.Join(cm.checkSearchTag, ","))
checkBundle, err = cm.checkBundleSearch(searchCriteria, map[string][]string{})
if err != nil {
return err
}
}
if checkBundle == nil {
// new search (check.target != instanceid, instanceid encoded in notes field)
searchCriteria := fmt.Sprintf(
"(active:1)(type:\"%s\")(tags:%s)", cm.checkType, strings.Join(cm.checkSearchTag, ","))
filterCriteria := map[string][]string{"f_notes": {*cm.getNotes()}}
checkBundle, err = cm.checkBundleSearch(searchCriteria, filterCriteria)
if err != nil {
return err
}
}
if checkBundle == nil {
@ -166,7 +185,8 @@ func (cm *CheckManager) initializeTrapURL() error {
if checkBundle == nil {
if check != nil {
checkBundle, err = cm.apih.FetchCheckBundleByCID(api.CIDType(check.CheckBundleCid))
cid := check.CheckBundleCID
checkBundle, err = cm.apih.FetchCheckBundle(api.CIDType(&cid))
if err != nil {
return err
}
@ -176,7 +196,8 @@ func (cm *CheckManager) initializeTrapURL() error {
}
if broker == nil {
broker, err = cm.apih.FetchBrokerByCID(api.CIDType(checkBundle.Brokers[0]))
cid := checkBundle.Brokers[0]
broker, err = cm.apih.FetchBroker(api.CIDType(&cid))
if err != nil {
return err
}
@ -188,7 +209,14 @@ func (cm *CheckManager) initializeTrapURL() error {
// determine the trap url to which metrics should be PUT
if checkBundle.Type == "httptrap" {
cm.trapURL = api.URLType(checkBundle.Config.SubmissionURL)
if turl, found := checkBundle.Config[config.SubmissionURL]; found {
cm.trapURL = api.URLType(turl)
} else {
if cm.Debug {
cm.Log.Printf("Missing config.%s %+v", config.SubmissionURL, checkBundle)
}
return fmt.Errorf("[ERROR] Unable to use check, no %s in config", config.SubmissionURL)
}
} else {
// build a submission_url for non-httptrap checks out of mtev_reverse url
if len(checkBundle.ReverseConnectURLs) == 0 {
@ -197,7 +225,14 @@ func (cm *CheckManager) initializeTrapURL() error {
mtevURL := checkBundle.ReverseConnectURLs[0]
mtevURL = strings.Replace(mtevURL, "mtev_reverse", "https", 1)
mtevURL = strings.Replace(mtevURL, "check", "module/httptrap", 1)
cm.trapURL = api.URLType(fmt.Sprintf("%s/%s", mtevURL, checkBundle.Config.ReverseSecret))
if rs, found := checkBundle.Config[config.ReverseSecretKey]; found {
cm.trapURL = api.URLType(fmt.Sprintf("%s/%s", mtevURL, rs))
} else {
if cm.Debug {
cm.Log.Printf("Missing config.%s %+v", config.ReverseSecretKey, checkBundle)
}
return fmt.Errorf("[ERROR] Unable to use check, no %s in config", config.ReverseSecretKey)
}
}
// used when sending as "ServerName" get around certs not having IP SANS
@ -208,26 +243,39 @@ func (cm *CheckManager) initializeTrapURL() error {
}
cm.trapCN = BrokerCNType(cn)
if cm.enabled {
u, err := url.Parse(string(cm.trapURL))
if err != nil {
return err
}
if u.Scheme == "https" {
if err := cm.loadCACert(); err != nil {
return err
}
}
}
cm.trapLastUpdate = time.Now()
return nil
}
// Search for a check bundle given a predetermined set of criteria
func (cm *CheckManager) checkBundleSearch(criteria string) (*api.CheckBundle, error) {
checkBundles, err := cm.apih.CheckBundleSearch(api.SearchQueryType(criteria))
func (cm *CheckManager) checkBundleSearch(criteria string, filter map[string][]string) (*api.CheckBundle, error) {
search := api.SearchQueryType(criteria)
checkBundles, err := cm.apih.SearchCheckBundles(&search, &filter)
if err != nil {
return nil, err
}
if len(checkBundles) == 0 {
if len(*checkBundles) == 0 {
return nil, nil // trigger creation of a new check
}
numActive := 0
checkID := -1
for idx, check := range checkBundles {
for idx, check := range *checkBundles {
if check.Status == statusActive {
numActive++
checkID = idx
@ -235,10 +283,12 @@ func (cm *CheckManager) checkBundleSearch(criteria string) (*api.CheckBundle, er
}
if numActive > 1 {
return nil, fmt.Errorf("[ERROR] Multiple possibilities multiple check bundles match criteria %s\n", criteria)
return nil, fmt.Errorf("[ERROR] multiple check bundles match criteria %s", criteria)
}
return &checkBundles[checkID], nil
bundle := (*checkBundles)[checkID]
return &bundle, nil
}
// Create a new check to receive metrics
@ -257,22 +307,39 @@ func (cm *CheckManager) createNewCheck() (*api.CheckBundle, *api.Broker, error)
return nil, nil, err
}
config := api.CheckBundle{
Brokers: []string{broker.Cid},
Config: api.CheckBundleConfig{AsyncMetrics: true, Secret: checkSecret},
chkcfg := &api.CheckBundle{
Brokers: []string{broker.CID},
Config: make(map[config.Key]string),
DisplayName: string(cm.checkDisplayName),
Metrics: []api.CheckBundleMetric{},
MetricLimit: 0,
Notes: fmt.Sprintf("cgm_instanceid=%s", cm.checkInstanceID),
MetricLimit: config.DefaultCheckBundleMetricLimit,
Notes: cm.getNotes(),
Period: 60,
Status: statusActive,
Tags: append(cm.checkSearchTag, cm.checkTags...),
Target: cm.checkTarget,
Target: string(cm.checkTarget),
Timeout: 10,
Type: string(cm.checkType),
}
checkBundle, err := cm.apih.CreateCheckBundle(config)
if len(cm.customConfigFields) > 0 {
for fld, val := range cm.customConfigFields {
chkcfg.Config[config.Key(fld)] = val
}
}
//
// use the default config settings if these are NOT set by user configuration
//
if val, ok := chkcfg.Config[config.AsyncMetrics]; !ok || val == "" {
chkcfg.Config[config.AsyncMetrics] = "true"
}
if val, ok := chkcfg.Config[config.Secret]; !ok || val == "" {
chkcfg.Config[config.Secret] = checkSecret
}
checkBundle, err := cm.apih.CreateCheckBundle(chkcfg)
if err != nil {
return nil, nil, err
}
@ -290,3 +357,64 @@ func (cm *CheckManager) makeSecret() (string, error) {
hash.Write(x)
return hex.EncodeToString(hash.Sum(nil))[0:16], nil
}
func (cm *CheckManager) getNotes() *string {
notes := fmt.Sprintf("cgm_instanceid|%s", cm.checkInstanceID)
return &notes
}
// FetchCheckBySubmissionURL fetch a check configuration by submission_url
func (cm *CheckManager) fetchCheckBySubmissionURL(submissionURL api.URLType) (*api.Check, error) {
if string(submissionURL) == "" {
return nil, errors.New("[ERROR] Invalid submission URL (blank)")
}
u, err := url.Parse(string(submissionURL))
if err != nil {
return nil, err
}
// valid trap url: scheme://host[:port]/module/httptrap/UUID/secret
// does it smell like a valid trap url path
if !strings.Contains(u.Path, "/module/httptrap/") {
return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', unrecognized path", submissionURL)
}
// extract uuid
pathParts := strings.Split(strings.Replace(u.Path, "/module/httptrap/", "", 1), "/")
if len(pathParts) != 2 {
return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', UUID not where expected", submissionURL)
}
uuid := pathParts[0]
filter := api.SearchFilterType{"f__check_uuid": []string{uuid}}
checks, err := cm.apih.SearchChecks(nil, &filter)
if err != nil {
return nil, err
}
if len(*checks) == 0 {
return nil, fmt.Errorf("[ERROR] No checks found with UUID %s", uuid)
}
numActive := 0
checkID := -1
for idx, check := range *checks {
if check.Active {
numActive++
checkID = idx
}
}
if numActive > 1 {
return nil, fmt.Errorf("[ERROR] Multiple checks with same UUID %s", uuid)
}
check := (*checks)[checkID]
return &check, nil
}

View File

@ -2,25 +2,27 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package checkmgr provides a check management interace to circonus-gometrics
// Package checkmgr provides a check management interface to circonus-gometrics
package checkmgr
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"log"
"net/url"
"os"
"path"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/circonus-labs/circonus-gometrics/api"
"github.com/pkg/errors"
"github.com/tv42/httpunix"
)
// Check management offers:
@ -35,7 +37,7 @@ import (
// - configuration parameters other than Check.SubmissionUrl, Debug and Log are ignored
// - note: SubmissionUrl is **required** in this case as there is no way to derive w/o api
// configure with api token - check management enabled
// - all otehr configuration parameters affect how the trap url is obtained
// - all other configuration parameters affect how the trap url is obtained
// 1. provided (Check.SubmissionUrl)
// 2. via check lookup (CheckConfig.Id)
// 3. via a search using CheckConfig.InstanceId + CheckConfig.SearchTag
@ -59,12 +61,15 @@ type CheckConfig struct {
// used to search for a check to use
// used as check.target when creating a check
InstanceID string
// explicitly set check.target (default: instance id)
TargetHost string
// a custom display name for the check (as viewed in UI Checks)
// default: instance id
DisplayName string
// unique check searching tag (or tags)
// used to search for a check to use (combined with instanceid)
// used as a regular tag when creating a check
SearchTag string
// a custom display name for the check (as viewed in UI Checks)
DisplayName string
// httptrap check secret (for creating a check)
Secret string
// additional tags to add to a check (when creating a check)
@ -82,6 +87,10 @@ type CheckConfig struct {
// overrides the behavior and will re-activate the metric when it is
// encountered. "(true|false)", default "false"
ForceMetricActivation string
// Type of check to use (default: httptrap)
Type string
// Custom check config fields (default: none)
CustomConfigFields map[string]string
}
// BrokerConfig options for broker
@ -94,6 +103,8 @@ type BrokerConfig struct {
// for a broker to be considered viable it must respond to a
// connection attempt within this amount of time e.g. 200ms, 2s, 1m
MaxResponseTime string
// TLS configuration to use when communicating within broker
TLSConfig *tls.Config
}
// Config options
@ -115,6 +126,9 @@ type CheckTypeType string
// CheckInstanceIDType check instance id
type CheckInstanceIDType string
// CheckTargetType check target/host
type CheckTargetType string
// CheckSecretType check secret
type CheckSecretType string
@ -134,14 +148,18 @@ type CheckManager struct {
Debug bool
apih *api.API
initialized bool
initializedmu sync.RWMutex
// check
checkType CheckTypeType
checkID api.IDType
checkInstanceID CheckInstanceIDType
checkTarget string
checkTarget CheckTargetType
checkSearchTag api.TagType
checkSecret CheckSecretType
checkTags api.TagType
customConfigFields map[string]string
checkSubmissionURL api.URLType
checkDisplayName CheckDisplayNameType
forceMetricActivation bool
@ -155,36 +173,45 @@ type CheckManager struct {
brokerID api.IDType
brokerSelectTag api.TagType
brokerMaxResponseTime time.Duration
brokerTLS *tls.Config
// state
checkBundle *api.CheckBundle
cbmu sync.Mutex
availableMetrics map[string]bool
trapURL api.URLType
trapCN BrokerCNType
trapLastUpdate time.Time
trapMaxURLAge time.Duration
trapmu sync.Mutex
certPool *x509.CertPool
checkBundle *api.CheckBundle
cbmu sync.Mutex
availableMetrics map[string]bool
availableMetricsmu sync.Mutex
trapURL api.URLType
trapCN BrokerCNType
trapLastUpdate time.Time
trapMaxURLAge time.Duration
trapmu sync.Mutex
certPool *x509.CertPool
sockRx *regexp.Regexp
}
// Trap config
type Trap struct {
URL *url.URL
TLS *tls.Config
URL *url.URL
TLS *tls.Config
IsSocket bool
SockTransport *httpunix.Transport
}
// NewCheckManager returns a new check manager
func NewCheckManager(cfg *Config) (*CheckManager, error) {
return New(cfg)
}
// New returns a new check manager
func New(cfg *Config) (*CheckManager, error) {
if cfg == nil {
return nil, errors.New("Invalid Check Manager configuration (nil).")
return nil, errors.New("invalid Check Manager configuration (nil)")
}
cm := &CheckManager{
enabled: false,
}
cm := &CheckManager{enabled: true, initialized: false}
// Setup logging for check manager
cm.Debug = cfg.Debug
cm.Log = cfg.Log
if cm.Debug && cm.Log == nil {
@ -194,38 +221,44 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) {
cm.Log = log.New(ioutil.Discard, "", log.LstdFlags)
}
{
rx, err := regexp.Compile(`^http\+unix://(?P<sockfile>.+)/write/(?P<id>.+)$`)
if err != nil {
return nil, errors.Wrap(err, "compiling socket regex")
}
cm.sockRx = rx
}
if cfg.Check.SubmissionURL != "" {
cm.checkSubmissionURL = api.URLType(cfg.Check.SubmissionURL)
}
// Blank API Token *disables* check management
if cfg.API.TokenKey == "" {
if cm.checkSubmissionURL == "" {
return nil, errors.New("Invalid check manager configuration (no API token AND no submission url).")
}
if err := cm.initializeTrapURL(); err != nil {
return nil, err
}
return cm, nil
cm.enabled = false
}
// enable check manager
cm.enabled = true
// initialize api handle
cfg.API.Debug = cm.Debug
cfg.API.Log = cm.Log
apih, err := api.NewAPI(&cfg.API)
if err != nil {
return nil, err
if !cm.enabled && cm.checkSubmissionURL == "" {
return nil, errors.New("invalid check manager configuration (no API token AND no submission url)")
}
if cm.enabled {
// initialize api handle
cfg.API.Debug = cm.Debug
cfg.API.Log = cm.Log
apih, err := api.New(&cfg.API)
if err != nil {
return nil, errors.Wrap(err, "initializing api client")
}
cm.apih = apih
}
cm.apih = apih
// initialize check related data
cm.checkType = defaultCheckType
if cfg.Check.Type != "" {
cm.checkType = CheckTypeType(cfg.Check.Type)
} else {
cm.checkType = defaultCheckType
}
idSetting := "0"
if cfg.Check.ID != "" {
@ -233,11 +266,12 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) {
}
id, err := strconv.Atoi(idSetting)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "converting check id")
}
cm.checkID = api.IDType(id)
cm.checkInstanceID = CheckInstanceIDType(cfg.Check.InstanceID)
cm.checkTarget = CheckTargetType(cfg.Check.TargetHost)
cm.checkDisplayName = CheckDisplayNameType(cfg.Check.DisplayName)
cm.checkSecret = CheckSecretType(cfg.Check.Secret)
@ -247,7 +281,7 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) {
}
fm, err := strconv.ParseBool(fma)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "parsing force metric activation")
}
cm.forceMetricActivation = fm
@ -259,7 +293,12 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) {
if cm.checkInstanceID == "" {
cm.checkInstanceID = CheckInstanceIDType(fmt.Sprintf("%s:%s", hn, an))
}
cm.checkTarget = hn
if cm.checkDisplayName == "" {
cm.checkDisplayName = CheckDisplayNameType(cm.checkInstanceID)
}
if cm.checkTarget == "" {
cm.checkTarget = CheckTargetType(cm.checkInstanceID)
}
if cfg.Check.SearchTag == "" {
cm.checkSearchTag = []string{fmt.Sprintf("service:%s", an)}
@ -271,8 +310,11 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) {
cm.checkTags = strings.Split(strings.Replace(cfg.Check.Tags, " ", "", -1), ",")
}
if cm.checkDisplayName == "" {
cm.checkDisplayName = CheckDisplayNameType(fmt.Sprintf("%s", string(cm.checkInstanceID)))
cm.customConfigFields = make(map[string]string)
if len(cfg.Check.CustomConfigFields) > 0 {
for fld, val := range cfg.Check.CustomConfigFields {
cm.customConfigFields[fld] = val
}
}
dur := cfg.Check.MaxURLAge
@ -281,19 +323,18 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) {
}
maxDur, err := time.ParseDuration(dur)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "parsing max url age")
}
cm.trapMaxURLAge = maxDur
// setup broker
idSetting = "0"
if cfg.Broker.ID != "" {
idSetting = cfg.Broker.ID
}
id, err = strconv.Atoi(idSetting)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "parsing broker id")
}
cm.brokerID = api.IDType(id)
@ -307,41 +348,127 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) {
}
maxDur, err = time.ParseDuration(dur)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "parsing broker max response time")
}
cm.brokerMaxResponseTime = maxDur
// add user specified tls config for broker if provided
cm.brokerTLS = cfg.Broker.TLSConfig
// metrics
cm.availableMetrics = make(map[string]bool)
cm.metricTags = make(map[string][]string)
if err := cm.initializeTrapURL(); err != nil {
return nil, err
}
return cm, nil
}
// GetTrap return the trap url
func (cm *CheckManager) GetTrap() (*Trap, error) {
if cm.trapURL == "" {
if err := cm.initializeTrapURL(); err != nil {
return nil, err
// Initialize for sending metrics
func (cm *CheckManager) Initialize() {
// if not managing the check, quicker initialization
if !cm.enabled {
err := cm.initializeTrapURL()
if err == nil {
cm.initializedmu.Lock()
cm.initialized = true
cm.initializedmu.Unlock()
} else {
cm.Log.Printf("[WARN] error initializing trap %s", err.Error())
}
return
}
// background initialization when we have to reach out to the api
go func() {
cm.apih.EnableExponentialBackoff()
err := cm.initializeTrapURL()
if err == nil {
cm.initializedmu.Lock()
cm.initialized = true
cm.initializedmu.Unlock()
} else {
cm.Log.Printf("[WARN] error initializing trap %s", err.Error())
}
cm.apih.DisableExponentialBackoff()
}()
}
// IsReady reflects if the check has been initialied and metrics can be sent to Circonus
func (cm *CheckManager) IsReady() bool {
cm.initializedmu.RLock()
defer cm.initializedmu.RUnlock()
return cm.initialized
}
// GetSubmissionURL returns submission url for circonus
func (cm *CheckManager) GetSubmissionURL() (*Trap, error) {
if cm.trapURL == "" {
return nil, errors.Errorf("get submission url - submission url unavailable")
}
trap := &Trap{}
u, err := url.Parse(string(cm.trapURL))
if err != nil {
return nil, err
return nil, errors.Wrap(err, "get submission url")
}
trap.URL = u
if u.Scheme == "http+unix" {
service := "circonus-agent"
sockPath := ""
metricID := ""
subNames := cm.sockRx.SubexpNames()
matches := cm.sockRx.FindAllStringSubmatch(string(cm.trapURL), -1)
for _, match := range matches {
for idx, val := range match {
switch subNames[idx] {
case "sockfile":
sockPath = val
case "id":
metricID = val
}
}
}
if sockPath == "" || metricID == "" {
return nil, errors.Errorf("get submission url - invalid socket url (%s)", cm.trapURL)
}
u, err = url.Parse(fmt.Sprintf("http+unix://%s/write/%s", service, metricID))
if err != nil {
return nil, errors.Wrap(err, "get submission url")
}
trap.URL = u
trap.SockTransport = &httpunix.Transport{
DialTimeout: 100 * time.Millisecond,
RequestTimeout: 1 * time.Second,
ResponseHeaderTimeout: 1 * time.Second,
}
trap.SockTransport.RegisterLocation(service, sockPath)
trap.IsSocket = true
}
if u.Scheme == "https" {
// preference user-supplied TLS configuration
if cm.brokerTLS != nil {
trap.TLS = cm.brokerTLS
return trap, nil
}
// api.circonus.com uses a public CA signed certificate
// trap.noit.circonus.net uses Circonus CA private certificate
// enterprise brokers use private CA certificate
if trap.URL.Hostname() == "api.circonus.com" {
return trap, nil
}
if cm.certPool == nil {
cm.loadCACert()
if err := cm.loadCACert(); err != nil {
return nil, errors.Wrap(err, "get submission url")
}
}
t := &tls.Config{
RootCAs: cm.certPool,
@ -362,18 +489,19 @@ func (cm *CheckManager) ResetTrap() error {
}
cm.trapURL = ""
cm.certPool = nil
err := cm.initializeTrapURL()
return err
cm.certPool = nil // force re-fetching CA cert (if custom TLS config not supplied)
return cm.initializeTrapURL()
}
// RefreshTrap check when the last time the URL was reset, reset if needed
func (cm *CheckManager) RefreshTrap() {
func (cm *CheckManager) RefreshTrap() error {
if cm.trapURL == "" {
return
return nil
}
if time.Since(cm.trapLastUpdate) >= cm.trapMaxURLAge {
cm.ResetTrap()
return cm.ResetTrap()
}
return nil
}

View File

@ -10,12 +10,17 @@ import (
// IsMetricActive checks whether a given metric name is currently active(enabled)
func (cm *CheckManager) IsMetricActive(name string) bool {
active, _ := cm.availableMetrics[name]
return active
cm.availableMetricsmu.Lock()
defer cm.availableMetricsmu.Unlock()
return cm.availableMetrics[name]
}
// ActivateMetric determines if a given metric should be activated
func (cm *CheckManager) ActivateMetric(name string) bool {
cm.availableMetricsmu.Lock()
defer cm.availableMetricsmu.Unlock()
active, exists := cm.availableMetrics[name]
if !exists {
@ -33,41 +38,57 @@ func (cm *CheckManager) ActivateMetric(name string) bool {
func (cm *CheckManager) AddMetricTags(metricName string, tags []string, appendTags bool) bool {
tagsUpdated := false
if len(tags) == 0 {
if appendTags && len(tags) == 0 {
return tagsUpdated
}
if _, exists := cm.metricTags[metricName]; !exists {
currentTags, exists := cm.metricTags[metricName]
if !exists {
foundMetric := false
for _, metric := range cm.checkBundle.Metrics {
if metric.Name == metricName {
foundMetric = true
cm.metricTags[metricName] = metric.Tags
break
if cm.checkBundle != nil {
for _, metric := range cm.checkBundle.Metrics {
if metric.Name == metricName {
foundMetric = true
currentTags = metric.Tags
break
}
}
}
if !foundMetric {
cm.metricTags[metricName] = []string{}
currentTags = []string{}
}
}
action := "no new"
action := ""
if appendTags {
numNewTags := countNewTags(cm.metricTags[metricName], tags)
numNewTags := countNewTags(currentTags, tags)
if numNewTags > 0 {
action = "Added"
cm.metricTags[metricName] = append(cm.metricTags[metricName], tags...)
currentTags = append(currentTags, tags...)
tagsUpdated = true
}
} else {
action = "Set"
cm.metricTags[metricName] = tags
tagsUpdated = true
if len(tags) != len(currentTags) {
action = "Set"
currentTags = tags
tagsUpdated = true
} else {
numNewTags := countNewTags(currentTags, tags)
if numNewTags > 0 {
action = "Set"
currentTags = tags
tagsUpdated = true
}
}
}
if cm.Debug {
if tagsUpdated {
cm.metricTags[metricName] = currentTags
}
if cm.Debug && action != "" {
cm.Log.Printf("[DEBUG] %s metric tag(s) %s %v\n", action, metricName, tags)
}
@ -116,7 +137,9 @@ func (cm *CheckManager) inventoryMetrics() {
for _, metric := range cm.checkBundle.Metrics {
availableMetrics[metric.Name] = metric.Status == "active"
}
cm.availableMetricsmu.Lock()
cm.availableMetrics = availableMetrics
cm.availableMetricsmu.Unlock()
}
// countNewTags returns a count of new tags which do not exist in the current list of tags

View File

@ -30,22 +30,35 @@
package circonusgometrics
import (
"errors"
"bufio"
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/circonus-labs/circonus-gometrics/api"
"github.com/circonus-labs/circonus-gometrics/checkmgr"
"github.com/pkg/errors"
)
const (
defaultFlushInterval = "10s" // 10 * time.Second
)
// Metric defines an individual metric
type Metric struct {
Type string `json:"_type"`
Value interface{} `json:"_value"`
}
// Metrics holds host metrics
type Metrics map[string]Metric
// Config options for circonus-gometrics
type Config struct {
Log *log.Logger
@ -58,14 +71,22 @@ type Config struct {
// API, Check and Broker configuration options
CheckManager checkmgr.Config
// how frequenly to submit metrics to Circonus, default 10 seconds
// how frequenly to submit metrics to Circonus, default 10 seconds.
// Set to 0 to disable automatic flushes and call Flush manually.
Interval string
}
type prevMetrics struct {
metrics *Metrics
metricsmu sync.Mutex
ts time.Time
}
// CirconusMetrics state
type CirconusMetrics struct {
Log *log.Logger
Debug bool
Log *log.Logger
Debug bool
resetCounters bool
resetGauges bool
resetHistograms bool
@ -73,7 +94,9 @@ type CirconusMetrics struct {
flushInterval time.Duration
flushing bool
flushmu sync.Mutex
packagingmu sync.Mutex
check *checkmgr.CheckManager
lastMetrics *prevMetrics
counters map[string]uint64
cm sync.Mutex
@ -81,7 +104,7 @@ type CirconusMetrics struct {
counterFuncs map[string]func() uint64
cfm sync.Mutex
gauges map[string]string
gauges map[string]interface{}
gm sync.Mutex
gaugeFuncs map[string]func() int64
@ -99,118 +122,142 @@ type CirconusMetrics struct {
// NewCirconusMetrics returns a CirconusMetrics instance
func NewCirconusMetrics(cfg *Config) (*CirconusMetrics, error) {
return New(cfg)
}
// New returns a CirconusMetrics instance
func New(cfg *Config) (*CirconusMetrics, error) {
if cfg == nil {
return nil, errors.New("Invalid configuration (nil).")
return nil, errors.New("invalid configuration (nil)")
}
cm := &CirconusMetrics{
counters: make(map[string]uint64),
counterFuncs: make(map[string]func() uint64),
gauges: make(map[string]string),
gauges: make(map[string]interface{}),
gaugeFuncs: make(map[string]func() int64),
histograms: make(map[string]*Histogram),
text: make(map[string]string),
textFuncs: make(map[string]func() string),
lastMetrics: &prevMetrics{},
}
cm.Debug = cfg.Debug
cm.Log = cfg.Log
// Logging
{
cm.Debug = cfg.Debug
cm.Log = cfg.Log
if cm.Debug && cfg.Log == nil {
cm.Log = log.New(os.Stderr, "", log.LstdFlags)
}
if cm.Log == nil {
cm.Log = log.New(ioutil.Discard, "", log.LstdFlags)
if cm.Debug && cm.Log == nil {
cm.Log = log.New(os.Stderr, "", log.LstdFlags)
}
if cm.Log == nil {
cm.Log = log.New(ioutil.Discard, "", log.LstdFlags)
}
}
fi := defaultFlushInterval
if cfg.Interval != "" {
fi = cfg.Interval
// Flush Interval
{
fi := defaultFlushInterval
if cfg.Interval != "" {
fi = cfg.Interval
}
dur, err := time.ParseDuration(fi)
if err != nil {
return nil, errors.Wrap(err, "parsing flush interval")
}
cm.flushInterval = dur
}
dur, err := time.ParseDuration(fi)
if err != nil {
return nil, err
}
cm.flushInterval = dur
var setting bool
// metric resets
cm.resetCounters = true
if cfg.ResetCounters != "" {
if setting, err = strconv.ParseBool(cfg.ResetCounters); err == nil {
cm.resetCounters = setting
setting, err := strconv.ParseBool(cfg.ResetCounters)
if err != nil {
return nil, errors.Wrap(err, "parsing reset counters")
}
cm.resetCounters = setting
}
cm.resetGauges = true
if cfg.ResetGauges != "" {
if setting, err = strconv.ParseBool(cfg.ResetGauges); err == nil {
cm.resetGauges = setting
setting, err := strconv.ParseBool(cfg.ResetGauges)
if err != nil {
return nil, errors.Wrap(err, "parsing reset gauges")
}
cm.resetGauges = setting
}
cm.resetHistograms = true
if cfg.ResetHistograms != "" {
if setting, err = strconv.ParseBool(cfg.ResetHistograms); err == nil {
cm.resetHistograms = setting
setting, err := strconv.ParseBool(cfg.ResetHistograms)
if err != nil {
return nil, errors.Wrap(err, "parsing reset histograms")
}
cm.resetHistograms = setting
}
cm.resetText = true
if cfg.ResetText != "" {
if setting, err = strconv.ParseBool(cfg.ResetText); err == nil {
cm.resetText = setting
setting, err := strconv.ParseBool(cfg.ResetText)
if err != nil {
return nil, errors.Wrap(err, "parsing reset text")
}
cm.resetText = setting
}
cfg.CheckManager.Debug = cm.Debug
cfg.CheckManager.Log = cm.Log
// check manager
{
cfg.CheckManager.Debug = cm.Debug
cfg.CheckManager.Log = cm.Log
check, err := checkmgr.NewCheckManager(&cfg.CheckManager)
if err != nil {
return nil, err
check, err := checkmgr.New(&cfg.CheckManager)
if err != nil {
return nil, errors.Wrap(err, "creating new check manager")
}
cm.check = check
}
cm.check = check
if _, err := cm.check.GetTrap(); err != nil {
return nil, err
// start background initialization
cm.check.Initialize()
// if automatic flush is enabled, start it.
// NOTE: submit will jettison metrics until initialization has completed.
if cm.flushInterval > time.Duration(0) {
go func() {
for range time.NewTicker(cm.flushInterval).C {
cm.Flush()
}
}()
}
return cm, nil
}
// Start initializes the CirconusMetrics instance based on
// configuration settings and sets the httptrap check url to
// which metrics should be sent. It then starts a perdiodic
// submission process of all metrics collected.
// Start deprecated NOP, automatic flush is started in New if flush interval > 0.
func (m *CirconusMetrics) Start() {
go func() {
for _ = range time.NewTicker(m.flushInterval).C {
m.Flush()
}
}()
// nop
}
// Flush metrics kicks off the process of sending metrics to Circonus
func (m *CirconusMetrics) Flush() {
if m.flushing {
return
}
m.flushmu.Lock()
m.flushing = true
m.flushmu.Unlock()
// Ready returns true or false indicating if the check is ready to accept metrics
func (m *CirconusMetrics) Ready() bool {
return m.check.IsReady()
}
func (m *CirconusMetrics) packageMetrics() (map[string]*api.CheckBundleMetric, Metrics) {
m.packagingmu.Lock()
defer m.packagingmu.Unlock()
if m.Debug {
m.Log.Println("[DEBUG] Flushing metrics")
m.Log.Println("[DEBUG] Packaging metrics")
}
// check for new metrics and enable them automatically
newMetrics := make(map[string]*api.CheckBundleMetric)
counters, gauges, histograms, text := m.snapshot()
output := make(map[string]interface{})
newMetrics := make(map[string]*api.CheckBundleMetric)
output := make(Metrics, len(counters)+len(gauges)+len(histograms)+len(text))
for name, value := range counters {
send := m.check.IsMetricActive(name)
if !send && m.check.ActivateMetric(name) {
@ -222,10 +269,7 @@ func (m *CirconusMetrics) Flush() {
}
}
if send {
output[name] = map[string]interface{}{
"_type": "n",
"_value": value,
}
output[name] = Metric{Type: "L", Value: value}
}
}
@ -240,10 +284,7 @@ func (m *CirconusMetrics) Flush() {
}
}
if send {
output[name] = map[string]interface{}{
"_type": "n",
"_value": value,
}
output[name] = Metric{Type: m.getGaugeType(value), Value: value}
}
}
@ -258,10 +299,7 @@ func (m *CirconusMetrics) Flush() {
}
}
if send {
output[name] = map[string]interface{}{
"_type": "n",
"_value": value.DecStrings(),
}
output[name] = Metric{Type: "n", Value: value.DecStrings()}
}
}
@ -276,13 +314,85 @@ func (m *CirconusMetrics) Flush() {
}
}
if send {
output[name] = map[string]interface{}{
"_type": "s",
"_value": value,
}
output[name] = Metric{Type: "s", Value: value}
}
}
m.lastMetrics.metricsmu.Lock()
defer m.lastMetrics.metricsmu.Unlock()
m.lastMetrics.metrics = &output
m.lastMetrics.ts = time.Now()
return newMetrics, output
}
// PromOutput returns lines of metrics in prom format
func (m *CirconusMetrics) PromOutput() (*bytes.Buffer, error) {
m.lastMetrics.metricsmu.Lock()
defer m.lastMetrics.metricsmu.Unlock()
if m.lastMetrics.metrics == nil {
return nil, errors.New("no metrics available")
}
var b bytes.Buffer
w := bufio.NewWriter(&b)
ts := m.lastMetrics.ts.UnixNano() / int64(time.Millisecond)
for name, metric := range *m.lastMetrics.metrics {
switch metric.Type {
case "n":
if strings.HasPrefix(fmt.Sprintf("%v", metric.Value), "[H[") {
continue // circonus histogram != prom "histogram" (aka percentile)
}
case "s":
continue // text metrics unsupported
}
fmt.Fprintf(w, "%s %v %d\n", name, metric.Value, ts)
}
err := w.Flush()
if err != nil {
return nil, errors.Wrap(err, "flushing metric buffer")
}
return &b, err
}
// FlushMetrics flushes current metrics to a structure and returns it (does NOT send to Circonus)
func (m *CirconusMetrics) FlushMetrics() *Metrics {
m.flushmu.Lock()
if m.flushing {
m.flushmu.Unlock()
return &Metrics{}
}
m.flushing = true
m.flushmu.Unlock()
_, output := m.packageMetrics()
m.flushmu.Lock()
m.flushing = false
m.flushmu.Unlock()
return &output
}
// Flush metrics kicks off the process of sending metrics to Circonus
func (m *CirconusMetrics) Flush() {
m.flushmu.Lock()
if m.flushing {
m.flushmu.Unlock()
return
}
m.flushing = true
m.flushmu.Unlock()
newMetrics, output := m.packageMetrics()
if len(output) > 0 {
m.submit(output, newMetrics)
} else {

View File

@ -4,6 +4,8 @@
package circonusgometrics
import "fmt"
// A Counter is a monotonically increasing unsigned integer.
//
// Use a counter to derive rates (e.g., record total number of requests, derive
@ -40,6 +42,19 @@ func (m *CirconusMetrics) RemoveCounter(metric string) {
delete(m.counters, metric)
}
// GetCounterTest returns the current value for a counter. (note: it is a function specifically for "testing", disable automatic submission during testing.)
func (m *CirconusMetrics) GetCounterTest(metric string) (uint64, error) {
m.cm.Lock()
defer m.cm.Unlock()
if val, ok := m.counters[metric]; ok {
return val, nil
}
return 0, fmt.Errorf("Counter metric '%s' not found", metric)
}
// SetCounterFunc set counter to a function [called at flush interval]
func (m *CirconusMetrics) SetCounterFunc(metric string, fn func() uint64) {
m.cfm.Lock()

View File

@ -22,7 +22,48 @@ func (m *CirconusMetrics) Gauge(metric string, val interface{}) {
func (m *CirconusMetrics) SetGauge(metric string, val interface{}) {
m.gm.Lock()
defer m.gm.Unlock()
m.gauges[metric] = m.gaugeValString(val)
m.gauges[metric] = val
}
// AddGauge adds value to existing gauge
func (m *CirconusMetrics) AddGauge(metric string, val interface{}) {
m.gm.Lock()
defer m.gm.Unlock()
v, ok := m.gauges[metric]
if !ok {
m.gauges[metric] = val
return
}
switch val.(type) {
default:
// ignore it, unsupported type
case int:
m.gauges[metric] = v.(int) + val.(int)
case int8:
m.gauges[metric] = v.(int8) + val.(int8)
case int16:
m.gauges[metric] = v.(int16) + val.(int16)
case int32:
m.gauges[metric] = v.(int32) + val.(int32)
case int64:
m.gauges[metric] = v.(int64) + val.(int64)
case uint:
m.gauges[metric] = v.(uint) + val.(uint)
case uint8:
m.gauges[metric] = v.(uint8) + val.(uint8)
case uint16:
m.gauges[metric] = v.(uint16) + val.(uint16)
case uint32:
m.gauges[metric] = v.(uint32) + val.(uint32)
case uint64:
m.gauges[metric] = v.(uint64) + val.(uint64)
case float32:
m.gauges[metric] = v.(float32) + val.(float32)
case float64:
m.gauges[metric] = v.(float64) + val.(float64)
}
}
// RemoveGauge removes a gauge
@ -32,6 +73,18 @@ func (m *CirconusMetrics) RemoveGauge(metric string) {
delete(m.gauges, metric)
}
// GetGaugeTest returns the current value for a gauge. (note: it is a function specifically for "testing", disable automatic submission during testing.)
func (m *CirconusMetrics) GetGaugeTest(metric string) (interface{}, error) {
m.gm.Lock()
defer m.gm.Unlock()
if val, ok := m.gauges[metric]; ok {
return val, nil
}
return nil, fmt.Errorf("Gauge metric '%s' not found", metric)
}
// SetGaugeFunc sets a gauge to a function [called at flush interval]
func (m *CirconusMetrics) SetGaugeFunc(metric string, fn func() int64) {
m.gfm.Lock()
@ -46,36 +99,31 @@ func (m *CirconusMetrics) RemoveGaugeFunc(metric string) {
delete(m.gaugeFuncs, metric)
}
// gaugeValString converts an interface value (of a supported type) to a string
func (m *CirconusMetrics) gaugeValString(val interface{}) string {
vs := ""
switch v := val.(type) {
default:
// ignore it, unsupported type
// getGaugeType returns accurate resmon type for underlying type of gauge value
func (m *CirconusMetrics) getGaugeType(v interface{}) string {
mt := "n"
switch v.(type) {
case int:
vs = fmt.Sprintf("%d", v)
mt = "i"
case int8:
vs = fmt.Sprintf("%d", v)
mt = "i"
case int16:
vs = fmt.Sprintf("%d", v)
mt = "i"
case int32:
vs = fmt.Sprintf("%d", v)
case int64:
vs = fmt.Sprintf("%d", v)
mt = "i"
case uint:
vs = fmt.Sprintf("%d", v)
mt = "I"
case uint8:
vs = fmt.Sprintf("%d", v)
mt = "I"
case uint16:
vs = fmt.Sprintf("%d", v)
mt = "I"
case uint32:
vs = fmt.Sprintf("%d", v)
mt = "I"
case int64:
mt = "l"
case uint64:
vs = fmt.Sprintf("%d", v)
case float32:
vs = fmt.Sprintf("%f", v)
case float64:
vs = fmt.Sprintf("%f", v)
mt = "L"
}
return vs
return mt
}

View File

@ -5,6 +5,7 @@
package circonusgometrics
import (
"fmt"
"sync"
"github.com/circonus-labs/circonusllhist"
@ -27,6 +28,17 @@ func (m *CirconusMetrics) RecordValue(metric string, val float64) {
m.SetHistogramValue(metric, val)
}
// RecordCountForValue adds count n for value to a histogram
func (m *CirconusMetrics) RecordCountForValue(metric string, val float64, n int64) {
hist := m.NewHistogram(metric)
m.hm.Lock()
hist.rw.Lock()
hist.hist.RecordValues(val, n)
hist.rw.Unlock()
m.hm.Unlock()
}
// SetHistogramValue adds a value to a histogram
func (m *CirconusMetrics) SetHistogramValue(metric string, val float64) {
hist := m.NewHistogram(metric)
@ -38,6 +50,18 @@ func (m *CirconusMetrics) SetHistogramValue(metric string, val float64) {
m.hm.Unlock()
}
// GetHistogramTest returns the current value for a gauge. (note: it is a function specifically for "testing", disable automatic submission during testing.)
func (m *CirconusMetrics) GetHistogramTest(metric string) ([]string, error) {
m.hm.Lock()
defer m.hm.Unlock()
if hist, ok := m.histograms[metric]; ok {
return hist.hist.DecStrings(), nil
}
return []string{""}, fmt.Errorf("Histogram metric '%s' not found", metric)
}
// RemoveHistogram removes a histogram
func (m *CirconusMetrics) RemoveHistogram(metric string) {
m.hm.Lock()

View File

@ -6,8 +6,8 @@ package circonusgometrics
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
@ -17,17 +17,24 @@ import (
"time"
"github.com/circonus-labs/circonus-gometrics/api"
"github.com/hashicorp/go-retryablehttp"
retryablehttp "github.com/hashicorp/go-retryablehttp"
"github.com/pkg/errors"
)
func (m *CirconusMetrics) submit(output map[string]interface{}, newMetrics map[string]*api.CheckBundleMetric) {
func (m *CirconusMetrics) submit(output Metrics, newMetrics map[string]*api.CheckBundleMetric) {
// if there is nowhere to send metrics to, just return.
if !m.check.IsReady() {
m.Log.Printf("[WARN] check not ready, skipping metric submission")
return
}
// update check if there are any new metrics or, if metric tags have been added since last submit
m.check.UpdateCheck(newMetrics)
str, err := json.Marshal(output)
if err != nil {
m.Log.Printf("[ERROR] marshling output %+v", err)
m.Log.Printf("[ERROR] marshaling output %+v", err)
return
}
@ -37,15 +44,21 @@ func (m *CirconusMetrics) submit(output map[string]interface{}, newMetrics map[s
return
}
// OK response from circonus-agent does not
// indicate how many metrics were received
if numStats == -1 {
numStats = len(output)
}
if m.Debug {
m.Log.Printf("[DEBUG] %d stats sent\n", numStats)
}
}
func (m *CirconusMetrics) trapCall(payload []byte) (int, error) {
trap, err := m.check.GetTrap()
trap, err := m.check.GetSubmissionURL()
if err != nil {
return 0, err
return 0, errors.Wrap(err, "trap call")
}
dataReader := bytes.NewReader(payload)
@ -59,10 +72,14 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) {
// keep last HTTP error in the event of retry failure
var lastHTTPError error
retryPolicy := func(resp *http.Response, err error) (bool, error) {
retryPolicy := func(ctx context.Context, resp *http.Response, err error) (bool, error) {
if ctxErr := ctx.Err(); ctxErr != nil {
return false, ctxErr
}
if err != nil {
lastHTTPError = err
return true, err
return true, errors.Wrap(err, "retry policy")
}
// Check the response code. We retry on 500-range responses to allow
// the server time to recover, as 500's are typically not permanent
@ -92,20 +109,24 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) {
TLSClientConfig: trap.TLS,
DisableKeepAlives: true,
MaxIdleConnsPerHost: -1,
DisableCompression: true,
DisableCompression: false,
}
} else {
} else if trap.URL.Scheme == "http" {
client.HTTPClient.Transport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
DisableKeepAlives: true,
MaxIdleConnsPerHost: -1,
DisableCompression: true,
DisableCompression: false,
}
} else if trap.IsSocket {
m.Log.Println("using socket transport")
client.HTTPClient.Transport = trap.SockTransport
} else {
return 0, errors.Errorf("unknown scheme (%s), skipping submission", trap.URL.Scheme)
}
client.RetryWaitMin = 1 * time.Second
client.RetryWaitMax = 5 * time.Second
@ -120,7 +141,8 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) {
client.CheckRetry = retryPolicy
attempts := -1
client.RequestLogHook = func(logger *log.Logger, req *http.Request, retryNumber int) {
client.RequestLogHook = func(logger retryablehttp.Logger, req *http.Request, retryNumber int) {
//client.RequestLogHook = func(logger *log.Logger, req *http.Request, retryNumber int) {
attempts = retryNumber
}
@ -132,10 +154,17 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) {
if attempts == client.RetryMax {
m.check.RefreshTrap()
}
return 0, err
return 0, errors.Wrap(err, "trap call")
}
defer resp.Body.Close()
// no content - expected result from
// circonus-agent when metrics accepted
if resp.StatusCode == http.StatusNoContent {
return -1, nil
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
m.Log.Printf("[ERROR] reading body, proceeding. %s\n", err)
@ -146,7 +175,7 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) {
m.Log.Printf("[ERROR] parsing body, proceeding. %v (%s)\n", err, body)
}
if resp.StatusCode != 200 {
if resp.StatusCode != http.StatusOK {
return 0, errors.New("[ERROR] bad response code: " + strconv.Itoa(resp.StatusCode))
}
switch v := response["stats"].(type) {

View File

@ -17,7 +17,6 @@ func (m *CirconusMetrics) TrackHTTPLatency(name string, handler func(http.Respon
start := time.Now().UnixNano()
handler(rw, req)
elapsed := time.Now().UnixNano() - start
//hist := m.NewHistogram("go`HTTP`" + req.Method + "`" + name + "`latency")
m.RecordValue("go`HTTP`"+req.Method+"`"+name+"`latency", float64(elapsed)/float64(time.Second))
}
}

View File

@ -33,7 +33,7 @@ func (m *CirconusMetrics) Reset() {
m.counters = make(map[string]uint64)
m.counterFuncs = make(map[string]func() uint64)
m.gauges = make(map[string]string)
m.gauges = make(map[string]interface{})
m.gaugeFuncs = make(map[string]func() int64)
m.histograms = make(map[string]*Histogram)
m.text = make(map[string]string)
@ -41,81 +41,95 @@ func (m *CirconusMetrics) Reset() {
}
// snapshot returns a copy of the values of all registered counters and gauges.
func (m *CirconusMetrics) snapshot() (c map[string]uint64, g map[string]string, h map[string]*circonusllhist.Histogram, t map[string]string) {
func (m *CirconusMetrics) snapshot() (c map[string]uint64, g map[string]interface{}, h map[string]*circonusllhist.Histogram, t map[string]string) {
c = m.snapCounters()
g = m.snapGauges()
h = m.snapHistograms()
t = m.snapText()
return
}
func (m *CirconusMetrics) snapCounters() map[string]uint64 {
m.cm.Lock()
defer m.cm.Unlock()
m.cfm.Lock()
defer m.cfm.Unlock()
m.gm.Lock()
defer m.gm.Unlock()
c := make(map[string]uint64, len(m.counters)+len(m.counterFuncs))
m.gfm.Lock()
defer m.gfm.Unlock()
m.hm.Lock()
defer m.hm.Unlock()
m.tm.Lock()
defer m.tm.Unlock()
m.tfm.Lock()
defer m.tfm.Unlock()
c = make(map[string]uint64, len(m.counters)+len(m.counterFuncs))
for n, v := range m.counters {
c[n] = v
}
if m.resetCounters && len(c) > 0 {
m.counters = make(map[string]uint64)
}
for n, f := range m.counterFuncs {
c[n] = f()
}
//g = make(map[string]int64, len(m.gauges)+len(m.gaugeFuncs))
g = make(map[string]string, len(m.gauges)+len(m.gaugeFuncs))
return c
}
func (m *CirconusMetrics) snapGauges() map[string]interface{} {
m.gm.Lock()
defer m.gm.Unlock()
m.gfm.Lock()
defer m.gfm.Unlock()
g := make(map[string]interface{}, len(m.gauges)+len(m.gaugeFuncs))
for n, v := range m.gauges {
g[n] = v
}
for n, f := range m.gaugeFuncs {
g[n] = m.gaugeValString(f())
if m.resetGauges && len(g) > 0 {
m.gauges = make(map[string]interface{})
}
h = make(map[string]*circonusllhist.Histogram, len(m.histograms))
for n, f := range m.gaugeFuncs {
g[n] = f()
}
return g
}
func (m *CirconusMetrics) snapHistograms() map[string]*circonusllhist.Histogram {
m.hm.Lock()
defer m.hm.Unlock()
h := make(map[string]*circonusllhist.Histogram, len(m.histograms))
for n, hist := range m.histograms {
hist.rw.Lock()
h[n] = hist.hist.CopyAndReset()
hist.rw.Unlock()
}
if m.resetHistograms && len(h) > 0 {
m.histograms = make(map[string]*Histogram)
}
return h
}
func (m *CirconusMetrics) snapText() map[string]string {
m.tm.Lock()
defer m.tm.Unlock()
m.tfm.Lock()
defer m.tfm.Unlock()
t := make(map[string]string, len(m.text)+len(m.textFuncs))
t = make(map[string]string, len(m.text)+len(m.textFuncs))
for n, v := range m.text {
t[n] = v
}
if m.resetText && len(t) > 0 {
m.text = make(map[string]string)
}
for n, f := range m.textFuncs {
t[n] = f()
}
if m.resetCounters {
m.counters = make(map[string]uint64)
m.counterFuncs = make(map[string]func() uint64)
}
if m.resetGauges {
m.gauges = make(map[string]string)
m.gaugeFuncs = make(map[string]func() int64)
}
if m.resetHistograms {
m.histograms = make(map[string]*Histogram)
}
if m.resetText {
m.text = make(map[string]string)
m.textFuncs = make(map[string]func() string)
}
return
return t
}

View File

@ -8,17 +8,24 @@ package circonusllhist
import (
"bytes"
"encoding/base64"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"math"
"strconv"
"strings"
"sync"
"time"
)
const (
DEFAULT_HIST_SIZE = int16(100)
defaultHistSize = uint16(100)
)
var power_of_ten = [...]float64{
var powerOfTen = [...]float64{
1, 10, 100, 1000, 10000, 100000, 1e+06, 1e+07, 1e+08, 1e+09, 1e+10,
1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20,
1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30,
@ -49,28 +56,39 @@ var power_of_ten = [...]float64{
}
// A Bracket is a part of a cumulative distribution.
type Bin struct {
type bin struct {
count uint64
val int8
exp int8
count uint64
}
func NewBinRaw(val int8, exp int8, count uint64) *Bin {
return &Bin{
func newBinRaw(val int8, exp int8, count uint64) *bin {
return &bin{
count: count,
val: val,
exp: exp,
count: count,
}
}
func NewBin() *Bin {
return NewBinRaw(0, 0, 0)
func newBin() *bin {
return newBinRaw(0, 0, 0)
}
func NewBinFromFloat64(d float64) *Bin {
hb := NewBinRaw(0, 0, 0)
hb.SetFromFloat64(d)
func newBinFromFloat64(d float64) *bin {
hb := newBinRaw(0, 0, 0)
hb.setFromFloat64(d)
return hb
}
func (hb *Bin) SetFromFloat64(d float64) *Bin {
type fastL2 struct {
l1, l2 int
}
func (hb *bin) newFastL2() fastL2 {
return fastL2{l1: int(uint8(hb.exp)), l2: int(uint8(hb.val))}
}
func (hb *bin) setFromFloat64(d float64) *bin {
hb.val = -1
if math.IsInf(d, 0) || math.IsNaN(d) {
return hb
@ -93,7 +111,7 @@ func (hb *Bin) SetFromFloat64(d float64) *Bin {
}
return hb
}
d = d / hb.PowerOfTen()
d = d / hb.powerOfTen()
d = d * 10
hb.val = int8(sign * int(math.Floor(d+1e-13)))
if hb.val == 100 || hb.val == -100 {
@ -116,136 +134,308 @@ func (hb *Bin) SetFromFloat64(d float64) *Bin {
}
return hb
}
func (hb *Bin) PowerOfTen() float64 {
idx := int(hb.exp)
if idx < 0 {
idx = 256 + idx
}
return power_of_ten[idx]
func (hb *bin) powerOfTen() float64 {
idx := int(uint8(hb.exp))
return powerOfTen[idx]
}
func (hb *Bin) IsNaN() bool {
if hb.val > 99 || hb.val < -99 {
func (hb *bin) isNaN() bool {
// aval := abs(hb.val)
aval := hb.val
if aval < 0 {
aval = -aval
}
if 99 < aval { // in [100... ]: nan
return true
}
if 9 < aval { // in [10 - 99]: valid range
return false
}
if 0 < aval { // in [1 - 9 ]: nan
return true
}
if 0 == aval { // in [0] : zero bucket
return false
}
return false
}
func (hb *Bin) Val() int8 {
return hb.val
}
func (hb *Bin) Exp() int8 {
return hb.exp
}
func (hb *Bin) Count() uint64 {
return hb.count
}
func (hb *Bin) Value() float64 {
if hb.IsNaN() {
func (hb *bin) value() float64 {
if hb.isNaN() {
return math.NaN()
}
if hb.val < 10 && hb.val > -10 {
return 0.0
}
return (float64(hb.val) / 10.0) * hb.PowerOfTen()
return (float64(hb.val) / 10.0) * hb.powerOfTen()
}
func (hb *Bin) BinWidth() float64 {
if hb.IsNaN() {
func (hb *bin) binWidth() float64 {
if hb.isNaN() {
return math.NaN()
}
if hb.val < 10 && hb.val > -10 {
return 0.0
}
return hb.PowerOfTen() / 10.0
return hb.powerOfTen() / 10.0
}
func (hb *Bin) Midpoint() float64 {
if hb.IsNaN() {
func (hb *bin) midpoint() float64 {
if hb.isNaN() {
return math.NaN()
}
out := hb.Value()
out := hb.value()
if out == 0 {
return 0
}
interval := hb.BinWidth()
interval := hb.binWidth()
if out < 0 {
interval = interval * -1
}
return out + interval/2.0
}
func (hb *Bin) Left() float64 {
if hb.IsNaN() {
func (hb *bin) left() float64 {
if hb.isNaN() {
return math.NaN()
}
out := hb.Value()
out := hb.value()
if out >= 0 {
return out
}
return out - hb.BinWidth()
return out - hb.binWidth()
}
func (h1 *Bin) Compare(h2 *Bin) int {
if h1.val == h2.val && h1.exp == h2.exp {
return 0
func (h1 *bin) compare(h2 *bin) int {
var v1, v2 int
// 1) slide exp positive
// 2) shift by size of val multiple by (val != 0)
// 3) then add or subtract val accordingly
if h1.val >= 0 {
v1 = ((int(h1.exp)+256)<<8)*int(((int(h1.val)|(^int(h1.val)+1))>>8)&1) + int(h1.val)
} else {
v1 = ((int(h1.exp)+256)<<8)*int(((int(h1.val)|(^int(h1.val)+1))>>8)&1) - int(h1.val)
}
if h1.val == -1 {
return 1
if h2.val >= 0 {
v2 = ((int(h2.exp)+256)<<8)*int(((int(h2.val)|(^int(h2.val)+1))>>8)&1) + int(h2.val)
} else {
v2 = ((int(h2.exp)+256)<<8)*int(((int(h2.val)|(^int(h2.val)+1))>>8)&1) - int(h2.val)
}
if h2.val == -1 {
return -1
}
if h1.val == 0 {
if h2.val > 0 {
return 1
}
return -1
}
if h2.val == 0 {
if h1.val < 0 {
return 1
}
return -1
}
if h1.val < 0 && h2.val > 0 {
return 1
}
if h1.val > 0 && h2.val < 0 {
return -1
}
if h1.exp == h2.exp {
if h1.val < h2.val {
return 1
}
return -1
}
if h1.exp > h2.exp {
if h1.val < 0 {
return 1
}
return -1
}
if h1.exp < h2.exp {
if h1.val < 0 {
return -1
}
return 1
}
return 0
// return the difference
return v2 - v1
}
// This histogram structure tracks values are two decimal digits of precision
// with a bounded error that remains bounded upon composition
type Histogram struct {
mutex sync.Mutex
bvs []Bin
used int16
allocd int16
bvs []bin
used uint16
allocd uint16
lookup [256][]uint16
mutex sync.RWMutex
useLocks bool
}
const (
BVL1, BVL1MASK uint64 = iota, 0xff << (8 * iota)
BVL2, BVL2MASK
BVL3, BVL3MASK
BVL4, BVL4MASK
BVL5, BVL5MASK
BVL6, BVL6MASK
BVL7, BVL7MASK
BVL8, BVL8MASK
)
func getBytesRequired(val uint64) (len int8) {
if 0 != (BVL8MASK|BVL7MASK|BVL6MASK|BVL5MASK)&val {
if 0 != BVL8MASK&val {
return int8(BVL8)
}
if 0 != BVL7MASK&val {
return int8(BVL7)
}
if 0 != BVL6MASK&val {
return int8(BVL6)
}
if 0 != BVL5MASK&val {
return int8(BVL5)
}
} else {
if 0 != BVL4MASK&val {
return int8(BVL4)
}
if 0 != BVL3MASK&val {
return int8(BVL3)
}
if 0 != BVL2MASK&val {
return int8(BVL2)
}
}
return int8(BVL1)
}
func writeBin(out io.Writer, in bin, idx int) (err error) {
err = binary.Write(out, binary.BigEndian, in.val)
if err != nil {
return
}
err = binary.Write(out, binary.BigEndian, in.exp)
if err != nil {
return
}
var tgtType int8 = getBytesRequired(in.count)
err = binary.Write(out, binary.BigEndian, tgtType)
if err != nil {
return
}
var bcount = make([]uint8, 8)
b := bcount[0 : tgtType+1]
for i := tgtType; i >= 0; i-- {
b[i] = uint8(uint64(in.count>>(uint8(i)*8)) & 0xff)
}
err = binary.Write(out, binary.BigEndian, b)
if err != nil {
return
}
return
}
func readBin(in io.Reader) (out bin, err error) {
err = binary.Read(in, binary.BigEndian, &out.val)
if err != nil {
return
}
err = binary.Read(in, binary.BigEndian, &out.exp)
if err != nil {
return
}
var bvl uint8
err = binary.Read(in, binary.BigEndian, &bvl)
if err != nil {
return
}
if bvl > uint8(BVL8) {
return out, errors.New("encoding error: bvl value is greater than max allowable")
}
bcount := make([]byte, 8)
b := bcount[0 : bvl+1]
err = binary.Read(in, binary.BigEndian, b)
if err != nil {
return
}
var count uint64 = 0
for i := int(bvl + 1); i >= 0; i-- {
count |= (uint64(bcount[i]) << (uint8(i) * 8))
}
out.count = count
return
}
func Deserialize(in io.Reader) (h *Histogram, err error) {
h = New()
if h.bvs == nil {
h.bvs = make([]bin, 0, defaultHistSize)
}
var nbin int16
err = binary.Read(in, binary.BigEndian, &nbin)
if err != nil {
return
}
for ii := int16(0); ii < nbin; ii++ {
bb, err := readBin(in)
if err != nil {
return h, err
}
h.insertBin(&bb, int64(bb.count))
}
return h, nil
}
func (h *Histogram) Serialize(w io.Writer) error {
var nbin int16 = int16(len(h.bvs))
if err := binary.Write(w, binary.BigEndian, nbin); err != nil {
return err
}
for i := 0; i < len(h.bvs); i++ {
if err := writeBin(w, h.bvs[i], i); err != nil {
return err
}
}
return nil
}
func (h *Histogram) SerializeB64(w io.Writer) error {
buf := bytes.NewBuffer([]byte{})
h.Serialize(buf)
encoder := base64.NewEncoder(base64.StdEncoding, w)
if _, err := encoder.Write(buf.Bytes()); err != nil {
return err
}
encoder.Close()
return nil
}
// New returns a new Histogram
func New() *Histogram {
return &Histogram{
allocd: DEFAULT_HIST_SIZE,
used: 0,
bvs: make([]Bin, DEFAULT_HIST_SIZE),
allocd: defaultHistSize,
used: 0,
bvs: make([]bin, defaultHistSize),
useLocks: true,
}
}
// New returns a Histogram without locking
func NewNoLocks() *Histogram {
return &Histogram{
allocd: defaultHistSize,
used: 0,
bvs: make([]bin, defaultHistSize),
useLocks: false,
}
}
// NewFromStrings returns a Histogram created from DecStrings strings
func NewFromStrings(strs []string, locks bool) (*Histogram, error) {
bin, err := stringsToBin(strs)
if err != nil {
return nil, err
}
return newFromBins(bin, locks), nil
}
// NewFromBins returns a Histogram created from a bins struct slice
func newFromBins(bins []bin, locks bool) *Histogram {
return &Histogram{
allocd: uint16(len(bins) + 10), // pad it with 10
used: uint16(len(bins)),
bvs: bins,
useLocks: locks,
}
}
@ -266,9 +456,24 @@ func (h *Histogram) Mean() float64 {
// Reset forgets all bins in the histogram (they remain allocated)
func (h *Histogram) Reset() {
h.mutex.Lock()
if h.useLocks {
h.mutex.Lock()
defer h.mutex.Unlock()
}
for i := 0; i < 256; i++ {
if h.lookup[i] != nil {
for j := range h.lookup[i] {
h.lookup[i][j] = 0
}
}
}
h.used = 0
h.mutex.Unlock()
}
// RecordIntScale records an integer scaler value, returning an error if the
// value is out of range.
func (h *Histogram) RecordIntScale(val int64, scale int) error {
return h.RecordIntScales(val, scale, 1)
}
// RecordValue records the given value, returning an error if the value is out
@ -277,6 +482,12 @@ func (h *Histogram) RecordValue(v float64) error {
return h.RecordValues(v, 1)
}
// RecordDuration records the given time.Duration in seconds, returning an error
// if the value is out of range.
func (h *Histogram) RecordDuration(v time.Duration) error {
return h.RecordIntScale(int64(v), -9)
}
// RecordCorrectedValue records the given value, correcting for stalls in the
// recording process. This only works for processes which are recording values
// at an expected interval (e.g., doing jitter analysis). Processes which are
@ -304,17 +515,23 @@ func (h *Histogram) RecordCorrectedValue(v, expectedInterval int64) error {
}
// find where a new bin should go
func (h *Histogram) InternalFind(hb *Bin) (bool, int16) {
func (h *Histogram) internalFind(hb *bin) (bool, uint16) {
if h.used == 0 {
return false, 0
}
f2 := hb.newFastL2()
if h.lookup[f2.l1] != nil {
if idx := h.lookup[f2.l1][f2.l2]; idx != 0 {
return true, idx - 1
}
}
rv := -1
idx := int16(0)
l := int16(0)
r := h.used - 1
idx := uint16(0)
l := int(0)
r := int(h.used - 1)
for l < r {
check := (r + l) / 2
rv = h.bvs[check].Compare(hb)
rv = h.bvs[check].compare(hb)
if rv == 0 {
l = check
r = check
@ -325,9 +542,9 @@ func (h *Histogram) InternalFind(hb *Bin) (bool, int16) {
}
}
if rv != 0 {
rv = h.bvs[l].Compare(hb)
rv = h.bvs[l].compare(hb)
}
idx = l
idx = uint16(l)
if rv == 0 {
return true, idx
}
@ -338,23 +555,22 @@ func (h *Histogram) InternalFind(hb *Bin) (bool, int16) {
return false, idx
}
func (h *Histogram) InsertBin(hb *Bin, count int64) uint64 {
h.mutex.Lock()
defer h.mutex.Unlock()
if count == 0 {
return 0
func (h *Histogram) insertBin(hb *bin, count int64) uint64 {
if h.useLocks {
h.mutex.Lock()
defer h.mutex.Unlock()
}
found, idx := h.InternalFind(hb)
found, idx := h.internalFind(hb)
if !found {
if h.used == h.allocd {
new_bvs := make([]Bin, h.allocd+DEFAULT_HIST_SIZE)
new_bvs := make([]bin, h.allocd+defaultHistSize)
if idx > 0 {
copy(new_bvs[0:], h.bvs[0:idx])
}
if idx < h.used {
copy(new_bvs[idx+1:], h.bvs[idx:])
}
h.allocd = h.allocd + DEFAULT_HIST_SIZE
h.allocd = h.allocd + defaultHistSize
h.bvs = new_bvs
} else {
copy(h.bvs[idx+1:], h.bvs[idx:h.used])
@ -363,13 +579,20 @@ func (h *Histogram) InsertBin(hb *Bin, count int64) uint64 {
h.bvs[idx].exp = hb.exp
h.bvs[idx].count = uint64(count)
h.used++
for i := idx; i < h.used; i++ {
f2 := h.bvs[i].newFastL2()
if h.lookup[f2.l1] == nil {
h.lookup[f2.l1] = make([]uint16, 256)
}
h.lookup[f2.l1][f2.l2] = uint16(i) + 1
}
return h.bvs[idx].count
}
var newval uint64
if count < 0 {
newval = h.bvs[idx].count - uint64(-count)
} else {
if count >= 0 {
newval = h.bvs[idx].count + uint64(count)
} else {
newval = h.bvs[idx].count - uint64(-count)
}
if newval < h.bvs[idx].count { //rolled
newval = ^uint64(0)
@ -378,23 +601,59 @@ func (h *Histogram) InsertBin(hb *Bin, count int64) uint64 {
return newval - h.bvs[idx].count
}
// RecordIntScales records n occurrences of the given value, returning an error if
// the value is out of range.
func (h *Histogram) RecordIntScales(val int64, scale int, n int64) error {
sign := int64(1)
if val == 0 {
scale = 0
} else {
scale++
if val < 0 {
val = 0 - val
sign = -1
}
if val < 10 {
val *= 10
scale -= 1
}
for val >= 100 {
val /= 10
scale++
}
}
if scale < -128 {
val = 0
scale = 0
} else if scale > 127 {
val = 0xff
scale = 0
}
val *= sign
hb := bin{val: int8(val), exp: int8(scale), count: 0}
h.insertBin(&hb, n)
return nil
}
// RecordValues records n occurrences of the given value, returning an error if
// the value is out of range.
func (h *Histogram) RecordValues(v float64, n int64) error {
var hb Bin
hb.SetFromFloat64(v)
h.InsertBin(&hb, n)
var hb bin
hb.setFromFloat64(v)
h.insertBin(&hb, n)
return nil
}
// Approximate mean
func (h *Histogram) ApproxMean() float64 {
h.mutex.Lock()
defer h.mutex.Unlock()
if h.useLocks {
h.mutex.RLock()
defer h.mutex.RUnlock()
}
divisor := 0.0
sum := 0.0
for i := int16(0); i < h.used; i++ {
midpoint := h.bvs[i].Midpoint()
for i := uint16(0); i < h.used; i++ {
midpoint := h.bvs[i].midpoint()
cardinality := float64(h.bvs[i].count)
divisor += cardinality
sum += midpoint * cardinality
@ -407,11 +666,13 @@ func (h *Histogram) ApproxMean() float64 {
// Approximate sum
func (h *Histogram) ApproxSum() float64 {
h.mutex.Lock()
defer h.mutex.Unlock()
if h.useLocks {
h.mutex.RLock()
defer h.mutex.RUnlock()
}
sum := 0.0
for i := int16(0); i < h.used; i++ {
midpoint := h.bvs[i].Midpoint()
for i := uint16(0); i < h.used; i++ {
midpoint := h.bvs[i].midpoint()
cardinality := float64(h.bvs[i].count)
sum += midpoint * cardinality
}
@ -419,10 +680,12 @@ func (h *Histogram) ApproxSum() float64 {
}
func (h *Histogram) ApproxQuantile(q_in []float64) ([]float64, error) {
h.mutex.Lock()
defer h.mutex.Unlock()
if h.useLocks {
h.mutex.RLock()
defer h.mutex.RUnlock()
}
q_out := make([]float64, len(q_in))
i_q, i_b := 0, int16(0)
i_q, i_b := 0, uint16(0)
total_cnt, bin_width, bin_left, lower_cnt, upper_cnt := 0.0, 0.0, 0.0, 0.0, 0.0
if len(q_in) == 0 {
return q_out, nil
@ -435,7 +698,7 @@ func (h *Histogram) ApproxQuantile(q_in []float64) ([]float64, error) {
}
// Add up the bins
for i_b = 0; i_b < h.used; i_b++ {
if !h.bvs[i_b].IsNaN() {
if !h.bvs[i_b].isNaN() {
total_cnt += float64(h.bvs[i_b].count)
}
}
@ -451,11 +714,11 @@ func (h *Histogram) ApproxQuantile(q_in []float64) ([]float64, error) {
}
for i_b = 0; i_b < h.used; i_b++ {
if h.bvs[i_b].IsNaN() {
if h.bvs[i_b].isNaN() {
continue
}
bin_width = h.bvs[i_b].BinWidth()
bin_left = h.bvs[i_b].Left()
bin_width = h.bvs[i_b].binWidth()
bin_left = h.bvs[i_b].left()
lower_cnt = upper_cnt
upper_cnt = lower_cnt + float64(h.bvs[i_b].count)
break
@ -463,8 +726,8 @@ func (h *Histogram) ApproxQuantile(q_in []float64) ([]float64, error) {
for i_q = 0; i_q < len(q_in); i_q++ {
for i_b < (h.used-1) && upper_cnt < q_out[i_q] {
i_b++
bin_width = h.bvs[i_b].BinWidth()
bin_left = h.bvs[i_b].Left()
bin_width = h.bvs[i_b].binWidth()
bin_left = h.bvs[i_b].left()
lower_cnt = upper_cnt
upper_cnt = lower_cnt + float64(h.bvs[i_b].count)
}
@ -485,8 +748,10 @@ func (h *Histogram) ApproxQuantile(q_in []float64) ([]float64, error) {
// ValueAtQuantile returns the recorded value at the given quantile (0..1).
func (h *Histogram) ValueAtQuantile(q float64) float64 {
h.mutex.Lock()
defer h.mutex.Unlock()
if h.useLocks {
h.mutex.RLock()
defer h.mutex.RUnlock()
}
q_in := make([]float64, 1)
q_in[0] = q
q_out, err := h.ApproxQuantile(q_in)
@ -505,17 +770,21 @@ func (h *Histogram) SignificantFigures() int64 {
// Equals returns true if the two Histograms are equivalent, false if not.
func (h *Histogram) Equals(other *Histogram) bool {
h.mutex.Lock()
other.mutex.Lock()
defer h.mutex.Unlock()
defer other.mutex.Unlock()
if h.useLocks {
h.mutex.RLock()
defer h.mutex.RUnlock()
}
if other.useLocks {
other.mutex.RLock()
defer other.mutex.RUnlock()
}
switch {
case
h.used != other.used:
return false
default:
for i := int16(0); i < h.used; i++ {
if h.bvs[i].Compare(&other.bvs[i]) != 0 {
for i := uint16(0); i < h.used; i++ {
if h.bvs[i].compare(&other.bvs[i]) != 0 {
return false
}
if h.bvs[i].count != other.bvs[i].count {
@ -526,30 +795,123 @@ func (h *Histogram) Equals(other *Histogram) bool {
return true
}
func (h *Histogram) CopyAndReset() *Histogram {
h.mutex.Lock()
defer h.mutex.Unlock()
newhist := &Histogram{
allocd: h.allocd,
used: h.used,
bvs: h.bvs,
// Copy creates and returns an exact copy of a histogram.
func (h *Histogram) Copy() *Histogram {
if h.useLocks {
h.mutex.Lock()
defer h.mutex.Unlock()
}
h.allocd = DEFAULT_HIST_SIZE
h.bvs = make([]Bin, DEFAULT_HIST_SIZE)
h.used = 0
newhist := New()
newhist.allocd = h.allocd
newhist.used = h.used
newhist.useLocks = h.useLocks
newhist.bvs = []bin{}
for _, v := range h.bvs {
newhist.bvs = append(newhist.bvs, v)
}
for i, u := range h.lookup {
for _, v := range u {
newhist.lookup[i] = append(newhist.lookup[i], v)
}
}
return newhist
}
// FullReset resets a histogram to default empty values.
func (h *Histogram) FullReset() {
if h.useLocks {
h.mutex.Lock()
defer h.mutex.Unlock()
}
h.allocd = defaultHistSize
h.bvs = make([]bin, defaultHistSize)
h.used = 0
h.lookup = [256][]uint16{}
}
// CopyAndReset creates and returns an exact copy of a histogram,
// and resets it to default empty values.
func (h *Histogram) CopyAndReset() *Histogram {
newhist := h.Copy()
h.FullReset()
return newhist
}
func (h *Histogram) DecStrings() []string {
h.mutex.Lock()
defer h.mutex.Unlock()
if h.useLocks {
h.mutex.Lock()
defer h.mutex.Unlock()
}
out := make([]string, h.used)
for i, bin := range h.bvs[0:h.used] {
var buffer bytes.Buffer
buffer.WriteString("H[")
buffer.WriteString(fmt.Sprintf("%3.1e", bin.Value()))
buffer.WriteString(fmt.Sprintf("%3.1e", bin.value()))
buffer.WriteString("]=")
buffer.WriteString(fmt.Sprintf("%v", bin.count))
out[i] = buffer.String()
}
return out
}
// takes the output of DecStrings and deserializes it into a Bin struct slice
func stringsToBin(strs []string) ([]bin, error) {
bins := make([]bin, len(strs))
for i, str := range strs {
// H[0.0e+00]=1
// H[0.0e+00]= <1>
countString := strings.Split(str, "=")[1]
countInt, err := strconv.ParseInt(countString, 10, 64)
if err != nil {
return nil, err
}
// H[ <0.0> e+00]=1
valString := strings.Split(strings.Split(strings.Split(str, "=")[0], "e")[0], "[")[1]
valInt, err := strconv.ParseFloat(valString, 64)
if err != nil {
return nil, err
}
// H[0.0e <+00> ]=1
expString := strings.Split(strings.Split(strings.Split(str, "=")[0], "e")[1], "]")[0]
expInt, err := strconv.ParseInt(expString, 10, 8)
if err != nil {
return nil, err
}
bins[i] = *newBinRaw(int8(valInt*10), int8(expInt), uint64(countInt))
}
return bins, nil
}
// UnmarshalJSON - histogram will come in a base64 encoded serialized form
func (h *Histogram) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
data, err := base64.StdEncoding.DecodeString(s)
if err != nil {
return err
}
h, err = Deserialize(bytes.NewBuffer(data))
return err
}
func (h *Histogram) MarshalJSON() ([]byte, error) {
buf := bytes.NewBuffer([]byte{})
err := h.SerializeB64(buf)
if err != nil {
return buf.Bytes(), err
}
return json.Marshal(buf.String())
}

67
vendor/github.com/hashicorp/consul/api/README.md generated vendored Normal file
View File

@ -0,0 +1,67 @@
Consul API client
=================
This package provides the `api` package which attempts to
provide programmatic access to the full Consul API.
Currently, all of the Consul APIs included in version 0.6.0 are supported.
Documentation
=============
The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api)
Usage
=====
Below is an example of using the Consul client:
```go
package main
import "github.com/hashicorp/consul/api"
import "fmt"
func main() {
// Get a new client
client, err := api.NewClient(api.DefaultConfig())
if err != nil {
panic(err)
}
// Get a handle to the KV API
kv := client.KV()
// PUT a new KV pair
p := &api.KVPair{Key: "REDIS_MAXCLIENTS", Value: []byte("1000")}
_, err = kv.Put(p, nil)
if err != nil {
panic(err)
}
// Lookup the pair
pair, _, err := kv.Get("REDIS_MAXCLIENTS", nil)
if err != nil {
panic(err)
}
fmt.Printf("KV: %v %s\n", pair.Key, pair.Value)
}
```
To run this example, start a Consul server:
```bash
consul agent -dev
```
Copy the code above into a file such as `main.go`.
Install and run. You'll see a key (`REDIS_MAXCLIENTS`) and value (`1000`) printed.
```bash
$ go get
$ go run main.go
KV: REDIS_MAXCLIENTS 1000
```
After running the code, you can also view the values in the Consul UI on your local machine at http://localhost:8500/ui/dc1/kv

1116
vendor/github.com/hashicorp/consul/api/acl.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1035
vendor/github.com/hashicorp/consul/api/agent.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

966
vendor/github.com/hashicorp/consul/api/api.go generated vendored Normal file
View File

@ -0,0 +1,966 @@
package api
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-rootcerts"
)
const (
// HTTPAddrEnvName defines an environment variable name which sets
// the HTTP address if there is no -http-addr specified.
HTTPAddrEnvName = "CONSUL_HTTP_ADDR"
// HTTPTokenEnvName defines an environment variable name which sets
// the HTTP token.
HTTPTokenEnvName = "CONSUL_HTTP_TOKEN"
// HTTPTokenFileEnvName defines an environment variable name which sets
// the HTTP token file.
HTTPTokenFileEnvName = "CONSUL_HTTP_TOKEN_FILE"
// HTTPAuthEnvName defines an environment variable name which sets
// the HTTP authentication header.
HTTPAuthEnvName = "CONSUL_HTTP_AUTH"
// HTTPSSLEnvName defines an environment variable name which sets
// whether or not to use HTTPS.
HTTPSSLEnvName = "CONSUL_HTTP_SSL"
// HTTPCAFile defines an environment variable name which sets the
// CA file to use for talking to Consul over TLS.
HTTPCAFile = "CONSUL_CACERT"
// HTTPCAPath defines an environment variable name which sets the
// path to a directory of CA certs to use for talking to Consul over TLS.
HTTPCAPath = "CONSUL_CAPATH"
// HTTPClientCert defines an environment variable name which sets the
// client cert file to use for talking to Consul over TLS.
HTTPClientCert = "CONSUL_CLIENT_CERT"
// HTTPClientKey defines an environment variable name which sets the
// client key file to use for talking to Consul over TLS.
HTTPClientKey = "CONSUL_CLIENT_KEY"
// HTTPTLSServerName defines an environment variable name which sets the
// server name to use as the SNI host when connecting via TLS
HTTPTLSServerName = "CONSUL_TLS_SERVER_NAME"
// HTTPSSLVerifyEnvName defines an environment variable name which sets
// whether or not to disable certificate checking.
HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY"
// GRPCAddrEnvName defines an environment variable name which sets the gRPC
// address for consul connect envoy. Note this isn't actually used by the api
// client in this package but is defined here for consistency with all the
// other ENV names we use.
GRPCAddrEnvName = "CONSUL_GRPC_ADDR"
)
// QueryOptions are used to parameterize a query
type QueryOptions struct {
// Providing a datacenter overwrites the DC provided
// by the Config
Datacenter string
// AllowStale allows any Consul server (non-leader) to service
// a read. This allows for lower latency and higher throughput
AllowStale bool
// RequireConsistent forces the read to be fully consistent.
// This is more expensive but prevents ever performing a stale
// read.
RequireConsistent bool
// UseCache requests that the agent cache results locally. See
// https://www.consul.io/api/index.html#agent-caching for more details on the
// semantics.
UseCache bool
// MaxAge limits how old a cached value will be returned if UseCache is true.
// If there is a cached response that is older than the MaxAge, it is treated
// as a cache miss and a new fetch invoked. If the fetch fails, the error is
// returned. Clients that wish to allow for stale results on error can set
// StaleIfError to a longer duration to change this behavior. It is ignored
// if the endpoint supports background refresh caching. See
// https://www.consul.io/api/index.html#agent-caching for more details.
MaxAge time.Duration
// StaleIfError specifies how stale the client will accept a cached response
// if the servers are unavailable to fetch a fresh one. Only makes sense when
// UseCache is true and MaxAge is set to a lower, non-zero value. It is
// ignored if the endpoint supports background refresh caching. See
// https://www.consul.io/api/index.html#agent-caching for more details.
StaleIfError time.Duration
// WaitIndex is used to enable a blocking query. Waits
// until the timeout or the next index is reached
WaitIndex uint64
// WaitHash is used by some endpoints instead of WaitIndex to perform blocking
// on state based on a hash of the response rather than a monotonic index.
// This is required when the state being blocked on is not stored in Raft, for
// example agent-local proxy configuration.
WaitHash string
// WaitTime is used to bound the duration of a wait.
// Defaults to that of the Config, but can be overridden.
WaitTime time.Duration
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
// Near is used to provide a node name that will sort the results
// in ascending order based on the estimated round trip time from
// that node. Setting this to "_agent" will use the agent's node
// for the sort.
Near string
// NodeMeta is used to filter results by nodes with the given
// metadata key/value pairs. Currently, only one key/value pair can
// be provided for filtering.
NodeMeta map[string]string
// RelayFactor is used in keyring operations to cause responses to be
// relayed back to the sender through N other random nodes. Must be
// a value from 0 to 5 (inclusive).
RelayFactor uint8
// Connect filters prepared query execution to only include Connect-capable
// services. This currently affects prepared query execution.
Connect bool
// ctx is an optional context pass through to the underlying HTTP
// request layer. Use Context() and WithContext() to manage this.
ctx context.Context
// Filter requests filtering data prior to it being returned. The string
// is a go-bexpr compatible expression.
Filter string
}
func (o *QueryOptions) Context() context.Context {
if o != nil && o.ctx != nil {
return o.ctx
}
return context.Background()
}
func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions {
o2 := new(QueryOptions)
if o != nil {
*o2 = *o
}
o2.ctx = ctx
return o2
}
// WriteOptions are used to parameterize a write
type WriteOptions struct {
// Providing a datacenter overwrites the DC provided
// by the Config
Datacenter string
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
// RelayFactor is used in keyring operations to cause responses to be
// relayed back to the sender through N other random nodes. Must be
// a value from 0 to 5 (inclusive).
RelayFactor uint8
// ctx is an optional context pass through to the underlying HTTP
// request layer. Use Context() and WithContext() to manage this.
ctx context.Context
}
func (o *WriteOptions) Context() context.Context {
if o != nil && o.ctx != nil {
return o.ctx
}
return context.Background()
}
func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions {
o2 := new(WriteOptions)
if o != nil {
*o2 = *o
}
o2.ctx = ctx
return o2
}
// QueryMeta is used to return meta data about a query
type QueryMeta struct {
// LastIndex. This can be used as a WaitIndex to perform
// a blocking query
LastIndex uint64
// LastContentHash. This can be used as a WaitHash to perform a blocking query
// for endpoints that support hash-based blocking. Endpoints that do not
// support it will return an empty hash.
LastContentHash string
// Time of last contact from the leader for the
// server servicing the request
LastContact time.Duration
// Is there a known leader
KnownLeader bool
// How long did the request take
RequestTime time.Duration
// Is address translation enabled for HTTP responses on this agent
AddressTranslationEnabled bool
// CacheHit is true if the result was served from agent-local cache.
CacheHit bool
// CacheAge is set if request was ?cached and indicates how stale the cached
// response is.
CacheAge time.Duration
}
// WriteMeta is used to return meta data about a write
type WriteMeta struct {
// How long did the request take
RequestTime time.Duration
}
// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication
type HttpBasicAuth struct {
// Username to use for HTTP Basic Authentication
Username string
// Password to use for HTTP Basic Authentication
Password string
}
// Config is used to configure the creation of a client
type Config struct {
// Address is the address of the Consul server
Address string
// Scheme is the URI scheme for the Consul server
Scheme string
// Datacenter to use. If not provided, the default agent datacenter is used.
Datacenter string
// Transport is the Transport to use for the http client.
Transport *http.Transport
// HttpClient is the client to use. Default will be
// used if not provided.
HttpClient *http.Client
// HttpAuth is the auth info to use for http access.
HttpAuth *HttpBasicAuth
// WaitTime limits how long a Watch will block. If not provided,
// the agent default values will be used.
WaitTime time.Duration
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
// TokenFile is a file containing the current token to use for this client.
// If provided it is read once at startup and never again.
TokenFile string
TLSConfig TLSConfig
}
// TLSConfig is used to generate a TLSClientConfig that's useful for talking to
// Consul using TLS.
type TLSConfig struct {
// Address is the optional address of the Consul server. The port, if any
// will be removed from here and this will be set to the ServerName of the
// resulting config.
Address string
// CAFile is the optional path to the CA certificate used for Consul
// communication, defaults to the system bundle if not specified.
CAFile string
// CAPath is the optional path to a directory of CA certificates to use for
// Consul communication, defaults to the system bundle if not specified.
CAPath string
// CertFile is the optional path to the certificate for Consul
// communication. If this is set then you need to also set KeyFile.
CertFile string
// KeyFile is the optional path to the private key for Consul communication.
// If this is set then you need to also set CertFile.
KeyFile string
// InsecureSkipVerify if set to true will disable TLS host verification.
InsecureSkipVerify bool
}
// DefaultConfig returns a default configuration for the client. By default this
// will pool and reuse idle connections to Consul. If you have a long-lived
// client object, this is the desired behavior and should make the most efficient
// use of the connections to Consul. If you don't reuse a client object, which
// is not recommended, then you may notice idle connections building up over
// time. To avoid this, use the DefaultNonPooledConfig() instead.
func DefaultConfig() *Config {
return defaultConfig(cleanhttp.DefaultPooledTransport)
}
// DefaultNonPooledConfig returns a default configuration for the client which
// does not pool connections. This isn't a recommended configuration because it
// will reconnect to Consul on every request, but this is useful to avoid the
// accumulation of idle connections if you make many client objects during the
// lifetime of your application.
func DefaultNonPooledConfig() *Config {
return defaultConfig(cleanhttp.DefaultTransport)
}
// defaultConfig returns the default configuration for the client, using the
// given function to make the transport.
func defaultConfig(transportFn func() *http.Transport) *Config {
config := &Config{
Address: "127.0.0.1:8500",
Scheme: "http",
Transport: transportFn(),
}
if addr := os.Getenv(HTTPAddrEnvName); addr != "" {
config.Address = addr
}
if tokenFile := os.Getenv(HTTPTokenFileEnvName); tokenFile != "" {
config.TokenFile = tokenFile
}
if token := os.Getenv(HTTPTokenEnvName); token != "" {
config.Token = token
}
if auth := os.Getenv(HTTPAuthEnvName); auth != "" {
var username, password string
if strings.Contains(auth, ":") {
split := strings.SplitN(auth, ":", 2)
username = split[0]
password = split[1]
} else {
username = auth
}
config.HttpAuth = &HttpBasicAuth{
Username: username,
Password: password,
}
}
if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" {
enabled, err := strconv.ParseBool(ssl)
if err != nil {
log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err)
}
if enabled {
config.Scheme = "https"
}
}
if v := os.Getenv(HTTPTLSServerName); v != "" {
config.TLSConfig.Address = v
}
if v := os.Getenv(HTTPCAFile); v != "" {
config.TLSConfig.CAFile = v
}
if v := os.Getenv(HTTPCAPath); v != "" {
config.TLSConfig.CAPath = v
}
if v := os.Getenv(HTTPClientCert); v != "" {
config.TLSConfig.CertFile = v
}
if v := os.Getenv(HTTPClientKey); v != "" {
config.TLSConfig.KeyFile = v
}
if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" {
doVerify, err := strconv.ParseBool(v)
if err != nil {
log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err)
}
if !doVerify {
config.TLSConfig.InsecureSkipVerify = true
}
}
return config
}
// TLSConfig is used to generate a TLSClientConfig that's useful for talking to
// Consul using TLS.
func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) {
tlsClientConfig := &tls.Config{
InsecureSkipVerify: tlsConfig.InsecureSkipVerify,
}
if tlsConfig.Address != "" {
server := tlsConfig.Address
hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]")
if hasPort {
var err error
server, _, err = net.SplitHostPort(server)
if err != nil {
return nil, err
}
}
tlsClientConfig.ServerName = server
}
if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" {
tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile)
if err != nil {
return nil, err
}
tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
}
if tlsConfig.CAFile != "" || tlsConfig.CAPath != "" {
rootConfig := &rootcerts.Config{
CAFile: tlsConfig.CAFile,
CAPath: tlsConfig.CAPath,
}
if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil {
return nil, err
}
}
return tlsClientConfig, nil
}
func (c *Config) GenerateEnv() []string {
env := make([]string, 0, 10)
env = append(env,
fmt.Sprintf("%s=%s", HTTPAddrEnvName, c.Address),
fmt.Sprintf("%s=%s", HTTPTokenEnvName, c.Token),
fmt.Sprintf("%s=%s", HTTPTokenFileEnvName, c.TokenFile),
fmt.Sprintf("%s=%t", HTTPSSLEnvName, c.Scheme == "https"),
fmt.Sprintf("%s=%s", HTTPCAFile, c.TLSConfig.CAFile),
fmt.Sprintf("%s=%s", HTTPCAPath, c.TLSConfig.CAPath),
fmt.Sprintf("%s=%s", HTTPClientCert, c.TLSConfig.CertFile),
fmt.Sprintf("%s=%s", HTTPClientKey, c.TLSConfig.KeyFile),
fmt.Sprintf("%s=%s", HTTPTLSServerName, c.TLSConfig.Address),
fmt.Sprintf("%s=%t", HTTPSSLVerifyEnvName, !c.TLSConfig.InsecureSkipVerify))
if c.HttpAuth != nil {
env = append(env, fmt.Sprintf("%s=%s:%s", HTTPAuthEnvName, c.HttpAuth.Username, c.HttpAuth.Password))
} else {
env = append(env, fmt.Sprintf("%s=", HTTPAuthEnvName))
}
return env
}
// Client provides a client to the Consul API
type Client struct {
config Config
}
// NewClient returns a new client
func NewClient(config *Config) (*Client, error) {
// bootstrap the config
defConfig := DefaultConfig()
if len(config.Address) == 0 {
config.Address = defConfig.Address
}
if len(config.Scheme) == 0 {
config.Scheme = defConfig.Scheme
}
if config.Transport == nil {
config.Transport = defConfig.Transport
}
if config.TLSConfig.Address == "" {
config.TLSConfig.Address = defConfig.TLSConfig.Address
}
if config.TLSConfig.CAFile == "" {
config.TLSConfig.CAFile = defConfig.TLSConfig.CAFile
}
if config.TLSConfig.CAPath == "" {
config.TLSConfig.CAPath = defConfig.TLSConfig.CAPath
}
if config.TLSConfig.CertFile == "" {
config.TLSConfig.CertFile = defConfig.TLSConfig.CertFile
}
if config.TLSConfig.KeyFile == "" {
config.TLSConfig.KeyFile = defConfig.TLSConfig.KeyFile
}
if !config.TLSConfig.InsecureSkipVerify {
config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify
}
if config.HttpClient == nil {
var err error
config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig)
if err != nil {
return nil, err
}
}
parts := strings.SplitN(config.Address, "://", 2)
if len(parts) == 2 {
switch parts[0] {
case "http":
config.Scheme = "http"
case "https":
config.Scheme = "https"
case "unix":
trans := cleanhttp.DefaultTransport()
trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", parts[1])
}
config.HttpClient = &http.Client{
Transport: trans,
}
default:
return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0])
}
config.Address = parts[1]
}
// If the TokenFile is set, always use that, even if a Token is configured.
// This is because when TokenFile is set it is read into the Token field.
// We want any derived clients to have to re-read the token file.
if config.TokenFile != "" {
data, err := ioutil.ReadFile(config.TokenFile)
if err != nil {
return nil, fmt.Errorf("Error loading token file: %s", err)
}
if token := strings.TrimSpace(string(data)); token != "" {
config.Token = token
}
}
if config.Token == "" {
config.Token = defConfig.Token
}
return &Client{config: *config}, nil
}
// NewHttpClient returns an http client configured with the given Transport and TLS
// config.
func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) {
client := &http.Client{
Transport: transport,
}
// TODO (slackpad) - Once we get some run time on the HTTP/2 support we
// should turn it on by default if TLS is enabled. We would basically
// just need to call http2.ConfigureTransport(transport) here. We also
// don't want to introduce another external dependency on
// golang.org/x/net/http2 at this time. For a complete recipe for how
// to enable HTTP/2 support on a transport suitable for the API client
// library see agent/http_test.go:TestHTTPServer_H2.
if transport.TLSClientConfig == nil {
tlsClientConfig, err := SetupTLSConfig(&tlsConf)
if err != nil {
return nil, err
}
transport.TLSClientConfig = tlsClientConfig
}
return client, nil
}
// request is used to help build up a request
type request struct {
config *Config
method string
url *url.URL
params url.Values
body io.Reader
header http.Header
obj interface{}
ctx context.Context
}
// setQueryOptions is used to annotate the request with
// additional query options
func (r *request) setQueryOptions(q *QueryOptions) {
if q == nil {
return
}
if q.Datacenter != "" {
r.params.Set("dc", q.Datacenter)
}
if q.AllowStale {
r.params.Set("stale", "")
}
if q.RequireConsistent {
r.params.Set("consistent", "")
}
if q.WaitIndex != 0 {
r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10))
}
if q.WaitTime != 0 {
r.params.Set("wait", durToMsec(q.WaitTime))
}
if q.WaitHash != "" {
r.params.Set("hash", q.WaitHash)
}
if q.Token != "" {
r.header.Set("X-Consul-Token", q.Token)
}
if q.Near != "" {
r.params.Set("near", q.Near)
}
if q.Filter != "" {
r.params.Set("filter", q.Filter)
}
if len(q.NodeMeta) > 0 {
for key, value := range q.NodeMeta {
r.params.Add("node-meta", key+":"+value)
}
}
if q.RelayFactor != 0 {
r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor)))
}
if q.Connect {
r.params.Set("connect", "true")
}
if q.UseCache && !q.RequireConsistent {
r.params.Set("cached", "")
cc := []string{}
if q.MaxAge > 0 {
cc = append(cc, fmt.Sprintf("max-age=%.0f", q.MaxAge.Seconds()))
}
if q.StaleIfError > 0 {
cc = append(cc, fmt.Sprintf("stale-if-error=%.0f", q.StaleIfError.Seconds()))
}
if len(cc) > 0 {
r.header.Set("Cache-Control", strings.Join(cc, ", "))
}
}
r.ctx = q.ctx
}
// durToMsec converts a duration to a millisecond specified string. If the
// user selected a positive value that rounds to 0 ms, then we will use 1 ms
// so they get a short delay, otherwise Consul will translate the 0 ms into
// a huge default delay.
func durToMsec(dur time.Duration) string {
ms := dur / time.Millisecond
if dur > 0 && ms == 0 {
ms = 1
}
return fmt.Sprintf("%dms", ms)
}
// serverError is a string we look for to detect 500 errors.
const serverError = "Unexpected response code: 500"
// IsRetryableError returns true for 500 errors from the Consul servers, and
// network connection errors. These are usually retryable at a later time.
// This applies to reads but NOT to writes. This may return true for errors
// on writes that may have still gone through, so do not use this to retry
// any write operations.
func IsRetryableError(err error) bool {
if err == nil {
return false
}
if _, ok := err.(net.Error); ok {
return true
}
// TODO (slackpad) - Make a real error type here instead of using
// a string check.
return strings.Contains(err.Error(), serverError)
}
// setWriteOptions is used to annotate the request with
// additional write options
func (r *request) setWriteOptions(q *WriteOptions) {
if q == nil {
return
}
if q.Datacenter != "" {
r.params.Set("dc", q.Datacenter)
}
if q.Token != "" {
r.header.Set("X-Consul-Token", q.Token)
}
if q.RelayFactor != 0 {
r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor)))
}
r.ctx = q.ctx
}
// toHTTP converts the request to an HTTP request
func (r *request) toHTTP() (*http.Request, error) {
// Encode the query parameters
r.url.RawQuery = r.params.Encode()
// Check if we should encode the body
if r.body == nil && r.obj != nil {
b, err := encodeBody(r.obj)
if err != nil {
return nil, err
}
r.body = b
}
// Create the HTTP request
req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body)
if err != nil {
return nil, err
}
req.URL.Host = r.url.Host
req.URL.Scheme = r.url.Scheme
req.Host = r.url.Host
req.Header = r.header
// Setup auth
if r.config.HttpAuth != nil {
req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password)
}
if r.ctx != nil {
return req.WithContext(r.ctx), nil
}
return req, nil
}
// newRequest is used to create a new request
func (c *Client) newRequest(method, path string) *request {
r := &request{
config: &c.config,
method: method,
url: &url.URL{
Scheme: c.config.Scheme,
Host: c.config.Address,
Path: path,
},
params: make(map[string][]string),
header: make(http.Header),
}
if c.config.Datacenter != "" {
r.params.Set("dc", c.config.Datacenter)
}
if c.config.WaitTime != 0 {
r.params.Set("wait", durToMsec(r.config.WaitTime))
}
if c.config.Token != "" {
r.header.Set("X-Consul-Token", r.config.Token)
}
return r
}
// doRequest runs a request with our client
func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
req, err := r.toHTTP()
if err != nil {
return 0, nil, err
}
start := time.Now()
resp, err := c.config.HttpClient.Do(req)
diff := time.Since(start)
return diff, resp, err
}
// Query is used to do a GET request against an endpoint
// and deserialize the response into an interface using
// standard Consul conventions.
func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
r := c.newRequest("GET", endpoint)
r.setQueryOptions(q)
rtt, resp, err := c.doRequest(r)
if err != nil {
return nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if err := decodeBody(resp, out); err != nil {
return nil, err
}
return qm, nil
}
// write is used to do a PUT request against an endpoint
// and serialize/deserialized using the standard Consul conventions.
func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
r := c.newRequest("PUT", endpoint)
r.setWriteOptions(q)
r.obj = in
rtt, resp, err := requireOK(c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
if out != nil {
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
} else if _, err := ioutil.ReadAll(resp.Body); err != nil {
return nil, err
}
return wm, nil
}
// parseQueryMeta is used to help parse query meta-data
//
// TODO(rb): bug? the error from this function is never handled
func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
header := resp.Header
// Parse the X-Consul-Index (if it's set - hash based blocking queries don't
// set this)
if indexStr := header.Get("X-Consul-Index"); indexStr != "" {
index, err := strconv.ParseUint(indexStr, 10, 64)
if err != nil {
return fmt.Errorf("Failed to parse X-Consul-Index: %v", err)
}
q.LastIndex = index
}
q.LastContentHash = header.Get("X-Consul-ContentHash")
// Parse the X-Consul-LastContact
last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64)
if err != nil {
return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err)
}
q.LastContact = time.Duration(last) * time.Millisecond
// Parse the X-Consul-KnownLeader
switch header.Get("X-Consul-KnownLeader") {
case "true":
q.KnownLeader = true
default:
q.KnownLeader = false
}
// Parse X-Consul-Translate-Addresses
switch header.Get("X-Consul-Translate-Addresses") {
case "true":
q.AddressTranslationEnabled = true
default:
q.AddressTranslationEnabled = false
}
// Parse Cache info
if cacheStr := header.Get("X-Cache"); cacheStr != "" {
q.CacheHit = strings.EqualFold(cacheStr, "HIT")
}
if ageStr := header.Get("Age"); ageStr != "" {
age, err := strconv.ParseUint(ageStr, 10, 64)
if err != nil {
return fmt.Errorf("Failed to parse Age Header: %v", err)
}
q.CacheAge = time.Duration(age) * time.Second
}
return nil
}
// decodeBody is used to JSON decode a body
func decodeBody(resp *http.Response, out interface{}) error {
dec := json.NewDecoder(resp.Body)
return dec.Decode(out)
}
// encodeBody is used to encode a request body
func encodeBody(obj interface{}) (io.Reader, error) {
buf := bytes.NewBuffer(nil)
enc := json.NewEncoder(buf)
if err := enc.Encode(obj); err != nil {
return nil, err
}
return buf, nil
}
// requireOK is used to wrap doRequest and check for a 200
func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
if e != nil {
if resp != nil {
resp.Body.Close()
}
return d, nil, e
}
if resp.StatusCode != 200 {
return d, nil, generateUnexpectedResponseCodeError(resp)
}
return d, resp, nil
}
func (req *request) filterQuery(filter string) {
if filter == "" {
return
}
req.params.Set("filter", filter)
}
// generateUnexpectedResponseCodeError consumes the rest of the body, closes
// the body stream and generates an error indicating the status code was
// unexpected.
func generateUnexpectedResponseCodeError(resp *http.Response) error {
var buf bytes.Buffer
io.Copy(&buf, resp.Body)
resp.Body.Close()
return fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
}
func requireNotFoundOrOK(d time.Duration, resp *http.Response, e error) (bool, time.Duration, *http.Response, error) {
if e != nil {
if resp != nil {
resp.Body.Close()
}
return false, d, nil, e
}
switch resp.StatusCode {
case 200:
return true, d, resp, nil
case 404:
return false, d, resp, nil
default:
return false, d, nil, generateUnexpectedResponseCodeError(resp)
}
}

244
vendor/github.com/hashicorp/consul/api/catalog.go generated vendored Normal file
View File

@ -0,0 +1,244 @@
package api
type Weights struct {
Passing int
Warning int
}
type Node struct {
ID string
Node string
Address string
Datacenter string
TaggedAddresses map[string]string
Meta map[string]string
CreateIndex uint64
ModifyIndex uint64
}
type CatalogService struct {
ID string
Node string
Address string
Datacenter string
TaggedAddresses map[string]string
NodeMeta map[string]string
ServiceID string
ServiceName string
ServiceAddress string
ServiceTags []string
ServiceMeta map[string]string
ServicePort int
ServiceWeights Weights
ServiceEnableTagOverride bool
// DEPRECATED (ProxyDestination) - remove the next comment!
// We forgot to ever add ServiceProxyDestination here so no need to deprecate!
ServiceProxy *AgentServiceConnectProxyConfig
CreateIndex uint64
Checks HealthChecks
ModifyIndex uint64
}
type CatalogNode struct {
Node *Node
Services map[string]*AgentService
}
type CatalogRegistration struct {
ID string
Node string
Address string
TaggedAddresses map[string]string
NodeMeta map[string]string
Datacenter string
Service *AgentService
Check *AgentCheck
Checks HealthChecks
SkipNodeUpdate bool
}
type CatalogDeregistration struct {
Node string
Address string // Obsolete.
Datacenter string
ServiceID string
CheckID string
}
// Catalog can be used to query the Catalog endpoints
type Catalog struct {
c *Client
}
// Catalog returns a handle to the catalog endpoints
func (c *Client) Catalog() *Catalog {
return &Catalog{c}
}
func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) {
r := c.c.newRequest("PUT", "/v1/catalog/register")
r.setWriteOptions(q)
r.obj = reg
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}
func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) {
r := c.c.newRequest("PUT", "/v1/catalog/deregister")
r.setWriteOptions(q)
r.obj = dereg
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}
// Datacenters is used to query for all the known datacenters
func (c *Catalog) Datacenters() ([]string, error) {
r := c.c.newRequest("GET", "/v1/catalog/datacenters")
_, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []string
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Nodes is used to query all the known nodes
func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/nodes")
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*Node
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Services is used to query for all known services
func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/services")
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out map[string][]string
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Service is used to query catalog entries for a given service
func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
var tags []string
if tag != "" {
tags = []string{tag}
}
return c.service(service, tags, q, false)
}
// Supports multiple tags for filtering
func (c *Catalog) ServiceMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
return c.service(service, tags, q, false)
}
// Connect is used to query catalog entries for a given Connect-enabled service
func (c *Catalog) Connect(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
var tags []string
if tag != "" {
tags = []string{tag}
}
return c.service(service, tags, q, true)
}
// Supports multiple tags for filtering
func (c *Catalog) ConnectMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
return c.service(service, tags, q, true)
}
func (c *Catalog) service(service string, tags []string, q *QueryOptions, connect bool) ([]*CatalogService, *QueryMeta, error) {
path := "/v1/catalog/service/" + service
if connect {
path = "/v1/catalog/connect/" + service
}
r := c.c.newRequest("GET", path)
r.setQueryOptions(q)
if len(tags) > 0 {
for _, tag := range tags {
r.params.Add("tag", tag)
}
}
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*CatalogService
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Node is used to query for service information about a single node
func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/node/"+node)
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out *CatalogNode
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}

255
vendor/github.com/hashicorp/consul/api/config_entry.go generated vendored Normal file
View File

@ -0,0 +1,255 @@
package api
import (
"bytes"
"encoding/json"
"fmt"
"io"
"strconv"
"strings"
"github.com/mitchellh/mapstructure"
)
const (
ServiceDefaults string = "service-defaults"
ProxyDefaults string = "proxy-defaults"
ProxyConfigGlobal string = "global"
)
type ConfigEntry interface {
GetKind() string
GetName() string
GetCreateIndex() uint64
GetModifyIndex() uint64
}
type ServiceConfigEntry struct {
Kind string
Name string
Protocol string
CreateIndex uint64
ModifyIndex uint64
}
func (s *ServiceConfigEntry) GetKind() string {
return s.Kind
}
func (s *ServiceConfigEntry) GetName() string {
return s.Name
}
func (s *ServiceConfigEntry) GetCreateIndex() uint64 {
return s.CreateIndex
}
func (s *ServiceConfigEntry) GetModifyIndex() uint64 {
return s.ModifyIndex
}
type ProxyConfigEntry struct {
Kind string
Name string
Config map[string]interface{}
CreateIndex uint64
ModifyIndex uint64
}
func (p *ProxyConfigEntry) GetKind() string {
return p.Kind
}
func (p *ProxyConfigEntry) GetName() string {
return p.Name
}
func (p *ProxyConfigEntry) GetCreateIndex() uint64 {
return p.CreateIndex
}
func (p *ProxyConfigEntry) GetModifyIndex() uint64 {
return p.ModifyIndex
}
type rawEntryListResponse struct {
kind string
Entries []map[string]interface{}
}
func makeConfigEntry(kind, name string) (ConfigEntry, error) {
switch kind {
case ServiceDefaults:
return &ServiceConfigEntry{Name: name}, nil
case ProxyDefaults:
return &ProxyConfigEntry{Name: name}, nil
default:
return nil, fmt.Errorf("invalid config entry kind: %s", kind)
}
}
func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) {
var entry ConfigEntry
kindVal, ok := raw["Kind"]
if !ok {
kindVal, ok = raw["kind"]
}
if !ok {
return nil, fmt.Errorf("Payload does not contain a kind/Kind key at the top level")
}
if kindStr, ok := kindVal.(string); ok {
newEntry, err := makeConfigEntry(kindStr, "")
if err != nil {
return nil, err
}
entry = newEntry
} else {
return nil, fmt.Errorf("Kind value in payload is not a string")
}
decodeConf := &mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
Result: &entry,
WeaklyTypedInput: true,
}
decoder, err := mapstructure.NewDecoder(decodeConf)
if err != nil {
return nil, err
}
return entry, decoder.Decode(raw)
}
func DecodeConfigEntryFromJSON(data []byte) (ConfigEntry, error) {
var raw map[string]interface{}
if err := json.Unmarshal(data, &raw); err != nil {
return nil, err
}
return DecodeConfigEntry(raw)
}
// Config can be used to query the Config endpoints
type ConfigEntries struct {
c *Client
}
// Config returns a handle to the Config endpoints
func (c *Client) ConfigEntries() *ConfigEntries {
return &ConfigEntries{c}
}
func (conf *ConfigEntries) Get(kind string, name string, q *QueryOptions) (ConfigEntry, *QueryMeta, error) {
if kind == "" || name == "" {
return nil, nil, fmt.Errorf("Both kind and name parameters must not be empty")
}
entry, err := makeConfigEntry(kind, name)
if err != nil {
return nil, nil, err
}
r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s/%s", kind, name))
r.setQueryOptions(q)
rtt, resp, err := requireOK(conf.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if err := decodeBody(resp, entry); err != nil {
return nil, nil, err
}
return entry, qm, nil
}
func (conf *ConfigEntries) List(kind string, q *QueryOptions) ([]ConfigEntry, *QueryMeta, error) {
if kind == "" {
return nil, nil, fmt.Errorf("The kind parameter must not be empty")
}
r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s", kind))
r.setQueryOptions(q)
rtt, resp, err := requireOK(conf.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var raw []map[string]interface{}
if err := decodeBody(resp, &raw); err != nil {
return nil, nil, err
}
var entries []ConfigEntry
for _, rawEntry := range raw {
entry, err := DecodeConfigEntry(rawEntry)
if err != nil {
return nil, nil, err
}
entries = append(entries, entry)
}
return entries, qm, nil
}
func (conf *ConfigEntries) Set(entry ConfigEntry, w *WriteOptions) (bool, *WriteMeta, error) {
return conf.set(entry, nil, w)
}
func (conf *ConfigEntries) CAS(entry ConfigEntry, index uint64, w *WriteOptions) (bool, *WriteMeta, error) {
return conf.set(entry, map[string]string{"cas": strconv.FormatUint(index, 10)}, w)
}
func (conf *ConfigEntries) set(entry ConfigEntry, params map[string]string, w *WriteOptions) (bool, *WriteMeta, error) {
r := conf.c.newRequest("PUT", "/v1/config")
r.setWriteOptions(w)
for param, value := range params {
r.params.Set(param, value)
}
r.obj = entry
rtt, resp, err := requireOK(conf.c.doRequest(r))
if err != nil {
return false, nil, err
}
defer resp.Body.Close()
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
return false, nil, fmt.Errorf("Failed to read response: %v", err)
}
res := strings.Contains(buf.String(), "true")
wm := &WriteMeta{RequestTime: rtt}
return res, wm, nil
}
func (conf *ConfigEntries) Delete(kind string, name string, w *WriteOptions) (*WriteMeta, error) {
if kind == "" || name == "" {
return nil, fmt.Errorf("Both kind and name parameters must not be empty")
}
r := conf.c.newRequest("DELETE", fmt.Sprintf("/v1/config/%s/%s", kind, name))
r.setWriteOptions(w)
rtt, resp, err := requireOK(conf.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
}

12
vendor/github.com/hashicorp/consul/api/connect.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
package api
// Connect can be used to work with endpoints related to Connect, the
// feature for securely connecting services within Consul.
type Connect struct {
c *Client
}
// Connect returns a handle to the connect-related endpoints
func (c *Client) Connect() *Connect {
return &Connect{c}
}

174
vendor/github.com/hashicorp/consul/api/connect_ca.go generated vendored Normal file
View File

@ -0,0 +1,174 @@
package api
import (
"fmt"
"time"
"github.com/mitchellh/mapstructure"
)
// CAConfig is the structure for the Connect CA configuration.
type CAConfig struct {
// Provider is the CA provider implementation to use.
Provider string
// Configuration is arbitrary configuration for the provider. This
// should only contain primitive values and containers (such as lists
// and maps).
Config map[string]interface{}
CreateIndex uint64
ModifyIndex uint64
}
// CommonCAProviderConfig is the common options available to all CA providers.
type CommonCAProviderConfig struct {
LeafCertTTL time.Duration
SkipValidate bool
CSRMaxPerSecond float32
CSRMaxConcurrent int
}
// ConsulCAProviderConfig is the config for the built-in Consul CA provider.
type ConsulCAProviderConfig struct {
CommonCAProviderConfig `mapstructure:",squash"`
PrivateKey string
RootCert string
RotationPeriod time.Duration
}
// ParseConsulCAConfig takes a raw config map and returns a parsed
// ConsulCAProviderConfig.
func ParseConsulCAConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) {
var config ConsulCAProviderConfig
decodeConf := &mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
Result: &config,
WeaklyTypedInput: true,
}
decoder, err := mapstructure.NewDecoder(decodeConf)
if err != nil {
return nil, err
}
if err := decoder.Decode(raw); err != nil {
return nil, fmt.Errorf("error decoding config: %s", err)
}
return &config, nil
}
// CARootList is the structure for the results of listing roots.
type CARootList struct {
ActiveRootID string
TrustDomain string
Roots []*CARoot
}
// CARoot represents a root CA certificate that is trusted.
type CARoot struct {
// ID is a globally unique ID (UUID) representing this CA root.
ID string
// Name is a human-friendly name for this CA root. This value is
// opaque to Consul and is not used for anything internally.
Name string
// RootCertPEM is the PEM-encoded public certificate.
RootCertPEM string `json:"RootCert"`
// Active is true if this is the current active CA. This must only
// be true for exactly one CA. For any method that modifies roots in the
// state store, tests should be written to verify that multiple roots
// cannot be active.
Active bool
CreateIndex uint64
ModifyIndex uint64
}
// LeafCert is a certificate that has been issued by a Connect CA.
type LeafCert struct {
// SerialNumber is the unique serial number for this certificate.
// This is encoded in standard hex separated by :.
SerialNumber string
// CertPEM and PrivateKeyPEM are the PEM-encoded certificate and private
// key for that cert, respectively. This should not be stored in the
// state store, but is present in the sign API response.
CertPEM string `json:",omitempty"`
PrivateKeyPEM string `json:",omitempty"`
// Service is the name of the service for which the cert was issued.
// ServiceURI is the cert URI value.
Service string
ServiceURI string
// ValidAfter and ValidBefore are the validity periods for the
// certificate.
ValidAfter time.Time
ValidBefore time.Time
CreateIndex uint64
ModifyIndex uint64
}
// CARoots queries the list of available roots.
func (h *Connect) CARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/connect/ca/roots")
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out CARootList
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, qm, nil
}
// CAGetConfig returns the current CA configuration.
func (h *Connect) CAGetConfig(q *QueryOptions) (*CAConfig, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/connect/ca/configuration")
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out CAConfig
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, qm, nil
}
// CASetConfig sets the current CA configuration.
func (h *Connect) CASetConfig(conf *CAConfig, q *WriteOptions) (*WriteMeta, error) {
r := h.c.newRequest("PUT", "/v1/connect/ca/configuration")
r.setWriteOptions(q)
r.obj = conf
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}

View File

@ -0,0 +1,302 @@
package api
import (
"bytes"
"fmt"
"io"
"time"
)
// Intention defines an intention for the Connect Service Graph. This defines
// the allowed or denied behavior of a connection between two services using
// Connect.
type Intention struct {
// ID is the UUID-based ID for the intention, always generated by Consul.
ID string
// Description is a human-friendly description of this intention.
// It is opaque to Consul and is only stored and transferred in API
// requests.
Description string
// SourceNS, SourceName are the namespace and name, respectively, of
// the source service. Either of these may be the wildcard "*", but only
// the full value can be a wildcard. Partial wildcards are not allowed.
// The source may also be a non-Consul service, as specified by SourceType.
//
// DestinationNS, DestinationName is the same, but for the destination
// service. The same rules apply. The destination is always a Consul
// service.
SourceNS, SourceName string
DestinationNS, DestinationName string
// SourceType is the type of the value for the source.
SourceType IntentionSourceType
// Action is whether this is a whitelist or blacklist intention.
Action IntentionAction
// DefaultAddr, DefaultPort of the local listening proxy (if any) to
// make this connection.
DefaultAddr string
DefaultPort int
// Meta is arbitrary metadata associated with the intention. This is
// opaque to Consul but is served in API responses.
Meta map[string]string
// Precedence is the order that the intention will be applied, with
// larger numbers being applied first. This is a read-only field, on
// any intention update it is updated.
Precedence int
// CreatedAt and UpdatedAt keep track of when this record was created
// or modified.
CreatedAt, UpdatedAt time.Time
CreateIndex uint64
ModifyIndex uint64
}
// String returns human-friendly output describing ths intention.
func (i *Intention) String() string {
return fmt.Sprintf("%s => %s (%s)",
i.SourceString(),
i.DestinationString(),
i.Action)
}
// SourceString returns the namespace/name format for the source, or
// just "name" if the namespace is the default namespace.
func (i *Intention) SourceString() string {
return i.partString(i.SourceNS, i.SourceName)
}
// DestinationString returns the namespace/name format for the source, or
// just "name" if the namespace is the default namespace.
func (i *Intention) DestinationString() string {
return i.partString(i.DestinationNS, i.DestinationName)
}
func (i *Intention) partString(ns, n string) string {
// For now we omit the default namespace from the output. In the future
// we might want to look at this and show this in a multi-namespace world.
if ns != "" && ns != IntentionDefaultNamespace {
n = ns + "/" + n
}
return n
}
// IntentionDefaultNamespace is the default namespace value.
const IntentionDefaultNamespace = "default"
// IntentionAction is the action that the intention represents. This
// can be "allow" or "deny" to whitelist or blacklist intentions.
type IntentionAction string
const (
IntentionActionAllow IntentionAction = "allow"
IntentionActionDeny IntentionAction = "deny"
)
// IntentionSourceType is the type of the source within an intention.
type IntentionSourceType string
const (
// IntentionSourceConsul is a service within the Consul catalog.
IntentionSourceConsul IntentionSourceType = "consul"
)
// IntentionMatch are the arguments for the intention match API.
type IntentionMatch struct {
By IntentionMatchType
Names []string
}
// IntentionMatchType is the target for a match request. For example,
// matching by source will look for all intentions that match the given
// source value.
type IntentionMatchType string
const (
IntentionMatchSource IntentionMatchType = "source"
IntentionMatchDestination IntentionMatchType = "destination"
)
// IntentionCheck are the arguments for the intention check API. For
// more documentation see the IntentionCheck function.
type IntentionCheck struct {
// Source and Destination are the source and destination values to
// check. The destination is always a Consul service, but the source
// may be other values as defined by the SourceType.
Source, Destination string
// SourceType is the type of the value for the source.
SourceType IntentionSourceType
}
// Intentions returns the list of intentions.
func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/connect/intentions")
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*Intention
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// IntentionGet retrieves a single intention.
func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/connect/intentions/"+id)
r.setQueryOptions(q)
rtt, resp, err := h.c.doRequest(r)
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if resp.StatusCode == 404 {
return nil, qm, nil
} else if resp.StatusCode != 200 {
var buf bytes.Buffer
io.Copy(&buf, resp.Body)
return nil, nil, fmt.Errorf(
"Unexpected response %d: %s", resp.StatusCode, buf.String())
}
var out Intention
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, qm, nil
}
// IntentionDelete deletes a single intention.
func (h *Connect) IntentionDelete(id string, q *WriteOptions) (*WriteMeta, error) {
r := h.c.newRequest("DELETE", "/v1/connect/intentions/"+id)
r.setWriteOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
qm := &WriteMeta{}
qm.RequestTime = rtt
return qm, nil
}
// IntentionMatch returns the list of intentions that match a given source
// or destination. The returned intentions are ordered by precedence where
// result[0] is the highest precedence (if that matches, then that rule overrides
// all other rules).
//
// Matching can be done for multiple names at the same time. The resulting
// map is keyed by the given names. Casing is preserved.
func (h *Connect) IntentionMatch(args *IntentionMatch, q *QueryOptions) (map[string][]*Intention, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/connect/intentions/match")
r.setQueryOptions(q)
r.params.Set("by", string(args.By))
for _, name := range args.Names {
r.params.Add("name", name)
}
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out map[string][]*Intention
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// IntentionCheck returns whether a given source/destination would be allowed
// or not given the current set of intentions and the configuration of Consul.
func (h *Connect) IntentionCheck(args *IntentionCheck, q *QueryOptions) (bool, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/connect/intentions/check")
r.setQueryOptions(q)
r.params.Set("source", args.Source)
r.params.Set("destination", args.Destination)
if args.SourceType != "" {
r.params.Set("source-type", string(args.SourceType))
}
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return false, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out struct{ Allowed bool }
if err := decodeBody(resp, &out); err != nil {
return false, nil, err
}
return out.Allowed, qm, nil
}
// IntentionCreate will create a new intention. The ID in the given
// structure must be empty and a generate ID will be returned on
// success.
func (c *Connect) IntentionCreate(ixn *Intention, q *WriteOptions) (string, *WriteMeta, error) {
r := c.c.newRequest("POST", "/v1/connect/intentions")
r.setWriteOptions(q)
r.obj = ixn
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// IntentionUpdate will update an existing intention. The ID in the given
// structure must be non-empty.
func (c *Connect) IntentionUpdate(ixn *Intention, q *WriteOptions) (*WriteMeta, error) {
r := c.c.newRequest("PUT", "/v1/connect/intentions/"+ixn.ID)
r.setWriteOptions(q)
r.obj = ixn
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}

106
vendor/github.com/hashicorp/consul/api/coordinate.go generated vendored Normal file
View File

@ -0,0 +1,106 @@
package api
import (
"github.com/hashicorp/serf/coordinate"
)
// CoordinateEntry represents a node and its associated network coordinate.
type CoordinateEntry struct {
Node string
Segment string
Coord *coordinate.Coordinate
}
// CoordinateDatacenterMap has the coordinates for servers in a given datacenter
// and area. Network coordinates are only compatible within the same area.
type CoordinateDatacenterMap struct {
Datacenter string
AreaID string
Coordinates []CoordinateEntry
}
// Coordinate can be used to query the coordinate endpoints
type Coordinate struct {
c *Client
}
// Coordinate returns a handle to the coordinate endpoints
func (c *Client) Coordinate() *Coordinate {
return &Coordinate{c}
}
// Datacenters is used to return the coordinates of all the servers in the WAN
// pool.
func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) {
r := c.c.newRequest("GET", "/v1/coordinate/datacenters")
_, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []*CoordinateDatacenterMap
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Nodes is used to return the coordinates of all the nodes in the LAN pool.
func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/coordinate/nodes")
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*CoordinateEntry
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Update inserts or updates the LAN coordinate of a node.
func (c *Coordinate) Update(coord *CoordinateEntry, q *WriteOptions) (*WriteMeta, error) {
r := c.c.newRequest("PUT", "/v1/coordinate/update")
r.setWriteOptions(q)
r.obj = coord
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}
// Node is used to return the coordinates of a single in the LAN pool.
func (c *Coordinate) Node(node string, q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/coordinate/node/"+node)
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*CoordinateEntry
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}

106
vendor/github.com/hashicorp/consul/api/debug.go generated vendored Normal file
View File

@ -0,0 +1,106 @@
package api
import (
"fmt"
"io/ioutil"
"strconv"
)
// Debug can be used to query the /debug/pprof endpoints to gather
// profiling information about the target agent.Debug
//
// The agent must have enable_debug set to true for profiling to be enabled
// and for these endpoints to function.
type Debug struct {
c *Client
}
// Debug returns a handle that exposes the internal debug endpoints.
func (c *Client) Debug() *Debug {
return &Debug{c}
}
// Heap returns a pprof heap dump
func (d *Debug) Heap() ([]byte, error) {
r := d.c.newRequest("GET", "/debug/pprof/heap")
_, resp, err := d.c.doRequest(r)
if err != nil {
return nil, fmt.Errorf("error making request: %s", err)
}
defer resp.Body.Close()
// We return a raw response because we're just passing through a response
// from the pprof handlers
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("error decoding body: %s", err)
}
return body, nil
}
// Profile returns a pprof CPU profile for the specified number of seconds
func (d *Debug) Profile(seconds int) ([]byte, error) {
r := d.c.newRequest("GET", "/debug/pprof/profile")
// Capture a profile for the specified number of seconds
r.params.Set("seconds", strconv.Itoa(seconds))
_, resp, err := d.c.doRequest(r)
if err != nil {
return nil, fmt.Errorf("error making request: %s", err)
}
defer resp.Body.Close()
// We return a raw response because we're just passing through a response
// from the pprof handlers
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("error decoding body: %s", err)
}
return body, nil
}
// Trace returns an execution trace
func (d *Debug) Trace(seconds int) ([]byte, error) {
r := d.c.newRequest("GET", "/debug/pprof/trace")
// Capture a trace for the specified number of seconds
r.params.Set("seconds", strconv.Itoa(seconds))
_, resp, err := d.c.doRequest(r)
if err != nil {
return nil, fmt.Errorf("error making request: %s", err)
}
defer resp.Body.Close()
// We return a raw response because we're just passing through a response
// from the pprof handlers
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("error decoding body: %s", err)
}
return body, nil
}
// Goroutine returns a pprof goroutine profile
func (d *Debug) Goroutine() ([]byte, error) {
r := d.c.newRequest("GET", "/debug/pprof/goroutine")
_, resp, err := d.c.doRequest(r)
if err != nil {
return nil, fmt.Errorf("error making request: %s", err)
}
defer resp.Body.Close()
// We return a raw response because we're just passing through a response
// from the pprof handlers
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("error decoding body: %s", err)
}
return body, nil
}

104
vendor/github.com/hashicorp/consul/api/event.go generated vendored Normal file
View File

@ -0,0 +1,104 @@
package api
import (
"bytes"
"strconv"
)
// Event can be used to query the Event endpoints
type Event struct {
c *Client
}
// UserEvent represents an event that was fired by the user
type UserEvent struct {
ID string
Name string
Payload []byte
NodeFilter string
ServiceFilter string
TagFilter string
Version int
LTime uint64
}
// Event returns a handle to the event endpoints
func (c *Client) Event() *Event {
return &Event{c}
}
// Fire is used to fire a new user event. Only the Name, Payload and Filters
// are respected. This returns the ID or an associated error. Cross DC requests
// are supported.
func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) {
r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name)
r.setWriteOptions(q)
if params.NodeFilter != "" {
r.params.Set("node", params.NodeFilter)
}
if params.ServiceFilter != "" {
r.params.Set("service", params.ServiceFilter)
}
if params.TagFilter != "" {
r.params.Set("tag", params.TagFilter)
}
if params.Payload != nil {
r.body = bytes.NewReader(params.Payload)
}
rtt, resp, err := requireOK(e.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out UserEvent
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// List is used to get the most recent events an agent has received.
// This list can be optionally filtered by the name. This endpoint supports
// quasi-blocking queries. The index is not monotonic, nor does it provide provide
// LastContact or KnownLeader.
func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) {
r := e.c.newRequest("GET", "/v1/event/list")
r.setQueryOptions(q)
if name != "" {
r.params.Set("name", name)
}
rtt, resp, err := requireOK(e.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*UserEvent
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// IDToIndex is a bit of a hack. This simulates the index generation to
// convert an event ID into a WaitIndex.
func (e *Event) IDToIndex(uuid string) uint64 {
lower := uuid[0:8] + uuid[9:13] + uuid[14:18]
upper := uuid[19:23] + uuid[24:36]
lowVal, err := strconv.ParseUint(lower, 16, 64)
if err != nil {
panic("Failed to convert " + lower)
}
highVal, err := strconv.ParseUint(upper, 16, 64)
if err != nil {
panic("Failed to convert " + upper)
}
return lowVal ^ highVal
}

16
vendor/github.com/hashicorp/consul/api/go.mod generated vendored Normal file
View File

@ -0,0 +1,16 @@
module github.com/hashicorp/consul/api
go 1.12
replace github.com/hashicorp/consul/sdk => ../sdk
require (
github.com/hashicorp/consul/sdk v0.1.1
github.com/hashicorp/go-cleanhttp v0.5.1
github.com/hashicorp/go-rootcerts v1.0.0
github.com/hashicorp/go-uuid v1.0.1
github.com/hashicorp/serf v0.8.2
github.com/mitchellh/mapstructure v1.1.2
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c
github.com/stretchr/testify v1.3.0
)

76
vendor/github.com/hashicorp/consul/api/go.sum generated vendored Normal file
View File

@ -0,0 +1,76 @@
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

330
vendor/github.com/hashicorp/consul/api/health.go generated vendored Normal file
View File

@ -0,0 +1,330 @@
package api
import (
"encoding/json"
"fmt"
"strings"
"time"
)
const (
// HealthAny is special, and is used as a wild card,
// not as a specific state.
HealthAny = "any"
HealthPassing = "passing"
HealthWarning = "warning"
HealthCritical = "critical"
HealthMaint = "maintenance"
)
const (
// NodeMaint is the special key set by a node in maintenance mode.
NodeMaint = "_node_maintenance"
// ServiceMaintPrefix is the prefix for a service in maintenance mode.
ServiceMaintPrefix = "_service_maintenance:"
)
// HealthCheck is used to represent a single check
type HealthCheck struct {
Node string
CheckID string
Name string
Status string
Notes string
Output string
ServiceID string
ServiceName string
ServiceTags []string
Definition HealthCheckDefinition
CreateIndex uint64
ModifyIndex uint64
}
// HealthCheckDefinition is used to store the details about
// a health check's execution.
type HealthCheckDefinition struct {
HTTP string
Header map[string][]string
Method string
TLSSkipVerify bool
TCP string
IntervalDuration time.Duration `json:"-"`
TimeoutDuration time.Duration `json:"-"`
DeregisterCriticalServiceAfterDuration time.Duration `json:"-"`
// DEPRECATED in Consul 1.4.1. Use the above time.Duration fields instead.
Interval ReadableDuration
Timeout ReadableDuration
DeregisterCriticalServiceAfter ReadableDuration
}
func (d *HealthCheckDefinition) MarshalJSON() ([]byte, error) {
type Alias HealthCheckDefinition
out := &struct {
Interval string
Timeout string
DeregisterCriticalServiceAfter string
*Alias
}{
Interval: d.Interval.String(),
Timeout: d.Timeout.String(),
DeregisterCriticalServiceAfter: d.DeregisterCriticalServiceAfter.String(),
Alias: (*Alias)(d),
}
if d.IntervalDuration != 0 {
out.Interval = d.IntervalDuration.String()
} else if d.Interval != 0 {
out.Interval = d.Interval.String()
}
if d.TimeoutDuration != 0 {
out.Timeout = d.TimeoutDuration.String()
} else if d.Timeout != 0 {
out.Timeout = d.Timeout.String()
}
if d.DeregisterCriticalServiceAfterDuration != 0 {
out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfterDuration.String()
} else if d.DeregisterCriticalServiceAfter != 0 {
out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfter.String()
}
return json.Marshal(out)
}
func (d *HealthCheckDefinition) UnmarshalJSON(data []byte) error {
type Alias HealthCheckDefinition
aux := &struct {
Interval string
Timeout string
DeregisterCriticalServiceAfter string
*Alias
}{
Alias: (*Alias)(d),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
// Parse the values into both the time.Duration and old ReadableDuration fields.
var err error
if aux.Interval != "" {
if d.IntervalDuration, err = time.ParseDuration(aux.Interval); err != nil {
return err
}
d.Interval = ReadableDuration(d.IntervalDuration)
}
if aux.Timeout != "" {
if d.TimeoutDuration, err = time.ParseDuration(aux.Timeout); err != nil {
return err
}
d.Timeout = ReadableDuration(d.TimeoutDuration)
}
if aux.DeregisterCriticalServiceAfter != "" {
if d.DeregisterCriticalServiceAfterDuration, err = time.ParseDuration(aux.DeregisterCriticalServiceAfter); err != nil {
return err
}
d.DeregisterCriticalServiceAfter = ReadableDuration(d.DeregisterCriticalServiceAfterDuration)
}
return nil
}
// HealthChecks is a collection of HealthCheck structs.
type HealthChecks []*HealthCheck
// AggregatedStatus returns the "best" status for the list of health checks.
// Because a given entry may have many service and node-level health checks
// attached, this function determines the best representative of the status as
// as single string using the following heuristic:
//
// maintenance > critical > warning > passing
//
func (c HealthChecks) AggregatedStatus() string {
var passing, warning, critical, maintenance bool
for _, check := range c {
id := string(check.CheckID)
if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) {
maintenance = true
continue
}
switch check.Status {
case HealthPassing:
passing = true
case HealthWarning:
warning = true
case HealthCritical:
critical = true
default:
return ""
}
}
switch {
case maintenance:
return HealthMaint
case critical:
return HealthCritical
case warning:
return HealthWarning
case passing:
return HealthPassing
default:
return HealthPassing
}
}
// ServiceEntry is used for the health service endpoint
type ServiceEntry struct {
Node *Node
Service *AgentService
Checks HealthChecks
}
// Health can be used to query the Health endpoints
type Health struct {
c *Client
}
// Health returns a handle to the health endpoints
func (c *Client) Health() *Health {
return &Health{c}
}
// Node is used to query for checks belonging to a given node
func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/health/node/"+node)
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out HealthChecks
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Checks is used to return the checks associated with a service
func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/health/checks/"+service)
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out HealthChecks
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Service is used to query health information along with service info
// for a given service. It can optionally do server-side filtering on a tag
// or nodes with passing health checks only.
func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
var tags []string
if tag != "" {
tags = []string{tag}
}
return h.service(service, tags, passingOnly, q, false)
}
func (h *Health) ServiceMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
return h.service(service, tags, passingOnly, q, false)
}
// Connect is equivalent to Service except that it will only return services
// which are Connect-enabled and will returns the connection address for Connect
// client's to use which may be a proxy in front of the named service. If
// passingOnly is true only instances where both the service and any proxy are
// healthy will be returned.
func (h *Health) Connect(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
var tags []string
if tag != "" {
tags = []string{tag}
}
return h.service(service, tags, passingOnly, q, true)
}
func (h *Health) ConnectMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
return h.service(service, tags, passingOnly, q, true)
}
func (h *Health) service(service string, tags []string, passingOnly bool, q *QueryOptions, connect bool) ([]*ServiceEntry, *QueryMeta, error) {
path := "/v1/health/service/" + service
if connect {
path = "/v1/health/connect/" + service
}
r := h.c.newRequest("GET", path)
r.setQueryOptions(q)
if len(tags) > 0 {
for _, tag := range tags {
r.params.Add("tag", tag)
}
}
if passingOnly {
r.params.Set(HealthPassing, "1")
}
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*ServiceEntry
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// State is used to retrieve all the checks in a given state.
// The wildcard "any" state can also be used for all checks.
func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
switch state {
case HealthAny:
case HealthWarning:
case HealthCritical:
case HealthPassing:
default:
return nil, nil, fmt.Errorf("Unsupported state: %v", state)
}
r := h.c.newRequest("GET", "/v1/health/state/"+state)
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out HealthChecks
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}

286
vendor/github.com/hashicorp/consul/api/kv.go generated vendored Normal file
View File

@ -0,0 +1,286 @@
package api
import (
"bytes"
"fmt"
"io"
"net/http"
"strconv"
"strings"
)
// KVPair is used to represent a single K/V entry
type KVPair struct {
// Key is the name of the key. It is also part of the URL path when accessed
// via the API.
Key string
// CreateIndex holds the index corresponding the creation of this KVPair. This
// is a read-only field.
CreateIndex uint64
// ModifyIndex is used for the Check-And-Set operations and can also be fed
// back into the WaitIndex of the QueryOptions in order to perform blocking
// queries.
ModifyIndex uint64
// LockIndex holds the index corresponding to a lock on this key, if any. This
// is a read-only field.
LockIndex uint64
// Flags are any user-defined flags on the key. It is up to the implementer
// to check these values, since Consul does not treat them specially.
Flags uint64
// Value is the value for the key. This can be any value, but it will be
// base64 encoded upon transport.
Value []byte
// Session is a string representing the ID of the session. Any other
// interactions with this key over the same session must specify the same
// session ID.
Session string
}
// KVPairs is a list of KVPair objects
type KVPairs []*KVPair
// KV is used to manipulate the K/V API
type KV struct {
c *Client
}
// KV is used to return a handle to the K/V apis
func (c *Client) KV() *KV {
return &KV{c}
}
// Get is used to lookup a single key. The returned pointer
// to the KVPair will be nil if the key does not exist.
func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
resp, qm, err := k.getInternal(key, nil, q)
if err != nil {
return nil, nil, err
}
if resp == nil {
return nil, qm, nil
}
defer resp.Body.Close()
var entries []*KVPair
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
if len(entries) > 0 {
return entries[0], qm, nil
}
return nil, qm, nil
}
// List is used to lookup all keys under a prefix
func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) {
resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q)
if err != nil {
return nil, nil, err
}
if resp == nil {
return nil, qm, nil
}
defer resp.Body.Close()
var entries []*KVPair
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// Keys is used to list all the keys under a prefix. Optionally,
// a separator can be used to limit the responses.
func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) {
params := map[string]string{"keys": ""}
if separator != "" {
params["separator"] = separator
}
resp, qm, err := k.getInternal(prefix, params, q)
if err != nil {
return nil, nil, err
}
if resp == nil {
return nil, qm, nil
}
defer resp.Body.Close()
var entries []string
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) {
r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/"))
r.setQueryOptions(q)
for param, val := range params {
r.params.Set(param, val)
}
rtt, resp, err := k.c.doRequest(r)
if err != nil {
return nil, nil, err
}
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if resp.StatusCode == 404 {
resp.Body.Close()
return nil, qm, nil
} else if resp.StatusCode != 200 {
resp.Body.Close()
return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
}
return resp, qm, nil
}
// Put is used to write a new value. Only the
// Key, Flags and Value is respected.
func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) {
params := make(map[string]string, 1)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
_, wm, err := k.put(p.Key, params, p.Value, q)
return wm, err
}
// CAS is used for a Check-And-Set operation. The Key,
// ModifyIndex, Flags and Value are respected. Returns true
// on success or false on failures.
func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
params := make(map[string]string, 2)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
params["cas"] = strconv.FormatUint(p.ModifyIndex, 10)
return k.put(p.Key, params, p.Value, q)
}
// Acquire is used for a lock acquisition operation. The Key,
// Flags, Value and Session are respected. Returns true
// on success or false on failures.
func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
params := make(map[string]string, 2)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
params["acquire"] = p.Session
return k.put(p.Key, params, p.Value, q)
}
// Release is used for a lock release operation. The Key,
// Flags, Value and Session are respected. Returns true
// on success or false on failures.
func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
params := make(map[string]string, 2)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
params["release"] = p.Session
return k.put(p.Key, params, p.Value, q)
}
func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) {
if len(key) > 0 && key[0] == '/' {
return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key)
}
r := k.c.newRequest("PUT", "/v1/kv/"+key)
r.setWriteOptions(q)
for param, val := range params {
r.params.Set(param, val)
}
r.body = bytes.NewReader(body)
rtt, resp, err := requireOK(k.c.doRequest(r))
if err != nil {
return false, nil, err
}
defer resp.Body.Close()
qm := &WriteMeta{}
qm.RequestTime = rtt
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
return false, nil, fmt.Errorf("Failed to read response: %v", err)
}
res := strings.Contains(buf.String(), "true")
return res, qm, nil
}
// Delete is used to delete a single key
func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) {
_, qm, err := k.deleteInternal(key, nil, w)
return qm, err
}
// DeleteCAS is used for a Delete Check-And-Set operation. The Key
// and ModifyIndex are respected. Returns true on success or false on failures.
func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
params := map[string]string{
"cas": strconv.FormatUint(p.ModifyIndex, 10),
}
return k.deleteInternal(p.Key, params, q)
}
// DeleteTree is used to delete all keys under a prefix
func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) {
_, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w)
return qm, err
}
func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) {
r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/"))
r.setWriteOptions(q)
for param, val := range params {
r.params.Set(param, val)
}
rtt, resp, err := requireOK(k.c.doRequest(r))
if err != nil {
return false, nil, err
}
defer resp.Body.Close()
qm := &WriteMeta{}
qm.RequestTime = rtt
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
return false, nil, fmt.Errorf("Failed to read response: %v", err)
}
res := strings.Contains(buf.String(), "true")
return res, qm, nil
}
// The Txn function has been deprecated from the KV object; please see the Txn
// object for more information about Transactions.
func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) {
var ops TxnOps
for _, op := range txn {
ops = append(ops, &TxnOp{KV: op})
}
respOk, txnResp, qm, err := k.c.txn(ops, q)
if err != nil {
return false, nil, nil, err
}
// Convert from the internal format.
kvResp := KVTxnResponse{
Errors: txnResp.Errors,
}
for _, result := range txnResp.Results {
kvResp.Results = append(kvResp.Results, result.KV)
}
return respOk, &kvResp, qm, nil
}

386
vendor/github.com/hashicorp/consul/api/lock.go generated vendored Normal file
View File

@ -0,0 +1,386 @@
package api
import (
"fmt"
"sync"
"time"
)
const (
// DefaultLockSessionName is the Session Name we assign if none is provided
DefaultLockSessionName = "Consul API Lock"
// DefaultLockSessionTTL is the default session TTL if no Session is provided
// when creating a new Lock. This is used because we do not have another
// other check to depend upon.
DefaultLockSessionTTL = "15s"
// DefaultLockWaitTime is how long we block for at a time to check if lock
// acquisition is possible. This affects the minimum time it takes to cancel
// a Lock acquisition.
DefaultLockWaitTime = 15 * time.Second
// DefaultLockRetryTime is how long we wait after a failed lock acquisition
// before attempting to do the lock again. This is so that once a lock-delay
// is in effect, we do not hot loop retrying the acquisition.
DefaultLockRetryTime = 5 * time.Second
// DefaultMonitorRetryTime is how long we wait after a failed monitor check
// of a lock (500 response code). This allows the monitor to ride out brief
// periods of unavailability, subject to the MonitorRetries setting in the
// lock options which is by default set to 0, disabling this feature. This
// affects locks and semaphores.
DefaultMonitorRetryTime = 2 * time.Second
// LockFlagValue is a magic flag we set to indicate a key
// is being used for a lock. It is used to detect a potential
// conflict with a semaphore.
LockFlagValue = 0x2ddccbc058a50c18
)
var (
// ErrLockHeld is returned if we attempt to double lock
ErrLockHeld = fmt.Errorf("Lock already held")
// ErrLockNotHeld is returned if we attempt to unlock a lock
// that we do not hold.
ErrLockNotHeld = fmt.Errorf("Lock not held")
// ErrLockInUse is returned if we attempt to destroy a lock
// that is in use.
ErrLockInUse = fmt.Errorf("Lock in use")
// ErrLockConflict is returned if the flags on a key
// used for a lock do not match expectation
ErrLockConflict = fmt.Errorf("Existing key does not match lock use")
)
// Lock is used to implement client-side leader election. It is follows the
// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html.
type Lock struct {
c *Client
opts *LockOptions
isHeld bool
sessionRenew chan struct{}
lockSession string
l sync.Mutex
}
// LockOptions is used to parameterize the Lock behavior.
type LockOptions struct {
Key string // Must be set and have write permissions
Value []byte // Optional, value to associate with the lock
Session string // Optional, created if not specified
SessionOpts *SessionEntry // Optional, options to use when creating a session
SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given)
SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given)
MonitorRetries int // Optional, defaults to 0 which means no retries
MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime
LockTryOnce bool // Optional, defaults to false which means try forever
}
// LockKey returns a handle to a lock struct which can be used
// to acquire and release the mutex. The key used must have
// write permissions.
func (c *Client) LockKey(key string) (*Lock, error) {
opts := &LockOptions{
Key: key,
}
return c.LockOpts(opts)
}
// LockOpts returns a handle to a lock struct which can be used
// to acquire and release the mutex. The key used must have
// write permissions.
func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) {
if opts.Key == "" {
return nil, fmt.Errorf("missing key")
}
if opts.SessionName == "" {
opts.SessionName = DefaultLockSessionName
}
if opts.SessionTTL == "" {
opts.SessionTTL = DefaultLockSessionTTL
} else {
if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
return nil, fmt.Errorf("invalid SessionTTL: %v", err)
}
}
if opts.MonitorRetryTime == 0 {
opts.MonitorRetryTime = DefaultMonitorRetryTime
}
if opts.LockWaitTime == 0 {
opts.LockWaitTime = DefaultLockWaitTime
}
l := &Lock{
c: c,
opts: opts,
}
return l, nil
}
// Lock attempts to acquire the lock and blocks while doing so.
// Providing a non-nil stopCh can be used to abort the lock attempt.
// Returns a channel that is closed if our lock is lost or an error.
// This channel could be closed at any time due to session invalidation,
// communication errors, operator intervention, etc. It is NOT safe to
// assume that the lock is held until Unlock() unless the Session is specifically
// created without any associated health checks. By default Consul sessions
// prefer liveness over safety and an application must be able to handle
// the lock being lost.
func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
// Hold the lock as we try to acquire
l.l.Lock()
defer l.l.Unlock()
// Check if we already hold the lock
if l.isHeld {
return nil, ErrLockHeld
}
// Check if we need to create a session first
l.lockSession = l.opts.Session
if l.lockSession == "" {
s, err := l.createSession()
if err != nil {
return nil, fmt.Errorf("failed to create session: %v", err)
}
l.sessionRenew = make(chan struct{})
l.lockSession = s
session := l.c.Session()
go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew)
// If we fail to acquire the lock, cleanup the session
defer func() {
if !l.isHeld {
close(l.sessionRenew)
l.sessionRenew = nil
}
}()
}
// Setup the query options
kv := l.c.KV()
qOpts := &QueryOptions{
WaitTime: l.opts.LockWaitTime,
}
start := time.Now()
attempts := 0
WAIT:
// Check if we should quit
select {
case <-stopCh:
return nil, nil
default:
}
// Handle the one-shot mode.
if l.opts.LockTryOnce && attempts > 0 {
elapsed := time.Since(start)
if elapsed > l.opts.LockWaitTime {
return nil, nil
}
// Query wait time should not exceed the lock wait time
qOpts.WaitTime = l.opts.LockWaitTime - elapsed
}
attempts++
// Look for an existing lock, blocking until not taken
pair, meta, err := kv.Get(l.opts.Key, qOpts)
if err != nil {
return nil, fmt.Errorf("failed to read lock: %v", err)
}
if pair != nil && pair.Flags != LockFlagValue {
return nil, ErrLockConflict
}
locked := false
if pair != nil && pair.Session == l.lockSession {
goto HELD
}
if pair != nil && pair.Session != "" {
qOpts.WaitIndex = meta.LastIndex
goto WAIT
}
// Try to acquire the lock
pair = l.lockEntry(l.lockSession)
locked, _, err = kv.Acquire(pair, nil)
if err != nil {
return nil, fmt.Errorf("failed to acquire lock: %v", err)
}
// Handle the case of not getting the lock
if !locked {
// Determine why the lock failed
qOpts.WaitIndex = 0
pair, meta, err = kv.Get(l.opts.Key, qOpts)
if pair != nil && pair.Session != "" {
//If the session is not null, this means that a wait can safely happen
//using a long poll
qOpts.WaitIndex = meta.LastIndex
goto WAIT
} else {
// If the session is empty and the lock failed to acquire, then it means
// a lock-delay is in effect and a timed wait must be used
select {
case <-time.After(DefaultLockRetryTime):
goto WAIT
case <-stopCh:
return nil, nil
}
}
}
HELD:
// Watch to ensure we maintain leadership
leaderCh := make(chan struct{})
go l.monitorLock(l.lockSession, leaderCh)
// Set that we own the lock
l.isHeld = true
// Locked! All done
return leaderCh, nil
}
// Unlock released the lock. It is an error to call this
// if the lock is not currently held.
func (l *Lock) Unlock() error {
// Hold the lock as we try to release
l.l.Lock()
defer l.l.Unlock()
// Ensure the lock is actually held
if !l.isHeld {
return ErrLockNotHeld
}
// Set that we no longer own the lock
l.isHeld = false
// Stop the session renew
if l.sessionRenew != nil {
defer func() {
close(l.sessionRenew)
l.sessionRenew = nil
}()
}
// Get the lock entry, and clear the lock session
lockEnt := l.lockEntry(l.lockSession)
l.lockSession = ""
// Release the lock explicitly
kv := l.c.KV()
_, _, err := kv.Release(lockEnt, nil)
if err != nil {
return fmt.Errorf("failed to release lock: %v", err)
}
return nil
}
// Destroy is used to cleanup the lock entry. It is not necessary
// to invoke. It will fail if the lock is in use.
func (l *Lock) Destroy() error {
// Hold the lock as we try to release
l.l.Lock()
defer l.l.Unlock()
// Check if we already hold the lock
if l.isHeld {
return ErrLockHeld
}
// Look for an existing lock
kv := l.c.KV()
pair, _, err := kv.Get(l.opts.Key, nil)
if err != nil {
return fmt.Errorf("failed to read lock: %v", err)
}
// Nothing to do if the lock does not exist
if pair == nil {
return nil
}
// Check for possible flag conflict
if pair.Flags != LockFlagValue {
return ErrLockConflict
}
// Check if it is in use
if pair.Session != "" {
return ErrLockInUse
}
// Attempt the delete
didRemove, _, err := kv.DeleteCAS(pair, nil)
if err != nil {
return fmt.Errorf("failed to remove lock: %v", err)
}
if !didRemove {
return ErrLockInUse
}
return nil
}
// createSession is used to create a new managed session
func (l *Lock) createSession() (string, error) {
session := l.c.Session()
se := l.opts.SessionOpts
if se == nil {
se = &SessionEntry{
Name: l.opts.SessionName,
TTL: l.opts.SessionTTL,
}
}
id, _, err := session.Create(se, nil)
if err != nil {
return "", err
}
return id, nil
}
// lockEntry returns a formatted KVPair for the lock
func (l *Lock) lockEntry(session string) *KVPair {
return &KVPair{
Key: l.opts.Key,
Value: l.opts.Value,
Session: session,
Flags: LockFlagValue,
}
}
// monitorLock is a long running routine to monitor a lock ownership
// It closes the stopCh if we lose our leadership.
func (l *Lock) monitorLock(session string, stopCh chan struct{}) {
defer close(stopCh)
kv := l.c.KV()
opts := &QueryOptions{RequireConsistent: true}
WAIT:
retries := l.opts.MonitorRetries
RETRY:
pair, meta, err := kv.Get(l.opts.Key, opts)
if err != nil {
// If configured we can try to ride out a brief Consul unavailability
// by doing retries. Note that we have to attempt the retry in a non-
// blocking fashion so that we have a clean place to reset the retry
// counter if service is restored.
if retries > 0 && IsRetryableError(err) {
time.Sleep(l.opts.MonitorRetryTime)
retries--
opts.WaitIndex = 0
goto RETRY
}
return
}
if pair != nil && pair.Session == session {
opts.WaitIndex = meta.LastIndex
goto WAIT
}
}

11
vendor/github.com/hashicorp/consul/api/operator.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
package api
// Operator can be used to perform low-level operator tasks for Consul.
type Operator struct {
c *Client
}
// Operator returns a handle to the operator endpoints.
func (c *Client) Operator() *Operator {
return &Operator{c}
}

194
vendor/github.com/hashicorp/consul/api/operator_area.go generated vendored Normal file
View File

@ -0,0 +1,194 @@
package api
// The /v1/operator/area endpoints are available only in Consul Enterprise and
// interact with its network area subsystem. Network areas are used to link
// together Consul servers in different Consul datacenters. With network areas,
// Consul datacenters can be linked together in ways other than a fully-connected
// mesh, as is required for Consul's WAN.
import (
"net"
"time"
)
// Area defines a network area.
type Area struct {
// ID is this identifier for an area (a UUID). This must be left empty
// when creating a new area.
ID string
// PeerDatacenter is the peer Consul datacenter that will make up the
// other side of this network area. Network areas always involve a pair
// of datacenters: the datacenter where the area was created, and the
// peer datacenter. This is required.
PeerDatacenter string
// RetryJoin specifies the address of Consul servers to join to, such as
// an IPs or hostnames with an optional port number. This is optional.
RetryJoin []string
// UseTLS specifies whether gossip over this area should be encrypted with TLS
// if possible.
UseTLS bool
}
// AreaJoinResponse is returned when a join occurs and gives the result for each
// address.
type AreaJoinResponse struct {
// The address that was joined.
Address string
// Whether or not the join was a success.
Joined bool
// If we couldn't join, this is the message with information.
Error string
}
// SerfMember is a generic structure for reporting information about members in
// a Serf cluster. This is only used by the area endpoints right now, but this
// could be expanded to other endpoints in the future.
type SerfMember struct {
// ID is the node identifier (a UUID).
ID string
// Name is the node name.
Name string
// Addr has the IP address.
Addr net.IP
// Port is the RPC port.
Port uint16
// Datacenter is the DC name.
Datacenter string
// Role is "client", "server", or "unknown".
Role string
// Build has the version of the Consul agent.
Build string
// Protocol is the protocol of the Consul agent.
Protocol int
// Status is the Serf health status "none", "alive", "leaving", "left",
// or "failed".
Status string
// RTT is the estimated round trip time from the server handling the
// request to the this member. This will be negative if no RTT estimate
// is available.
RTT time.Duration
}
// AreaCreate will create a new network area. The ID in the given structure must
// be empty and a generated ID will be returned on success.
func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) {
r := op.c.newRequest("POST", "/v1/operator/area")
r.setWriteOptions(q)
r.obj = area
rtt, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// AreaUpdate will update the configuration of the network area with the given ID.
func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (string, *WriteMeta, error) {
r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID)
r.setWriteOptions(q)
r.obj = area
rtt, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// AreaGet returns a single network area.
func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) {
var out []*Area
qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}
// AreaList returns all the available network areas.
func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) {
var out []*Area
qm, err := op.c.query("/v1/operator/area", &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}
// AreaDelete deletes the given network area.
func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) {
r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID)
r.setWriteOptions(q)
rtt, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}
// AreaJoin attempts to join the given set of join addresses to the given
// network area. See the Area structure for details about join addresses.
func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) {
r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join")
r.setWriteOptions(q)
r.obj = addresses
rtt, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
var out []*AreaJoinResponse
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, wm, nil
}
// AreaMembers lists the Serf information about the members in the given area.
func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) {
var out []*SerfMember
qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}

View File

@ -0,0 +1,219 @@
package api
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
"time"
)
// AutopilotConfiguration is used for querying/setting the Autopilot configuration.
// Autopilot helps manage operator tasks related to Consul servers like removing
// failed servers from the Raft quorum.
type AutopilotConfiguration struct {
// CleanupDeadServers controls whether to remove dead servers from the Raft
// peer list when a new server joins
CleanupDeadServers bool
// LastContactThreshold is the limit on the amount of time a server can go
// without leader contact before being considered unhealthy.
LastContactThreshold *ReadableDuration
// MaxTrailingLogs is the amount of entries in the Raft Log that a server can
// be behind before being considered unhealthy.
MaxTrailingLogs uint64
// ServerStabilizationTime is the minimum amount of time a server must be
// in a stable, healthy state before it can be added to the cluster. Only
// applicable with Raft protocol version 3 or higher.
ServerStabilizationTime *ReadableDuration
// (Enterprise-only) RedundancyZoneTag is the node tag to use for separating
// servers into zones for redundancy. If left blank, this feature will be disabled.
RedundancyZoneTag string
// (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration
// strategy of waiting until enough newer-versioned servers have been added to the
// cluster before promoting them to voters.
DisableUpgradeMigration bool
// (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when
// performing upgrade migrations. If left blank, the Consul version will be used.
UpgradeVersionTag string
// CreateIndex holds the index corresponding the creation of this configuration.
// This is a read-only field.
CreateIndex uint64
// ModifyIndex will be set to the index of the last update when retrieving the
// Autopilot configuration. Resubmitting a configuration with
// AutopilotCASConfiguration will perform a check-and-set operation which ensures
// there hasn't been a subsequent update since the configuration was retrieved.
ModifyIndex uint64
}
// ServerHealth is the health (from the leader's point of view) of a server.
type ServerHealth struct {
// ID is the raft ID of the server.
ID string
// Name is the node name of the server.
Name string
// Address is the address of the server.
Address string
// The status of the SerfHealth check for the server.
SerfStatus string
// Version is the Consul version of the server.
Version string
// Leader is whether this server is currently the leader.
Leader bool
// LastContact is the time since this node's last contact with the leader.
LastContact *ReadableDuration
// LastTerm is the highest leader term this server has a record of in its Raft log.
LastTerm uint64
// LastIndex is the last log index this server has a record of in its Raft log.
LastIndex uint64
// Healthy is whether or not the server is healthy according to the current
// Autopilot config.
Healthy bool
// Voter is whether this is a voting server.
Voter bool
// StableSince is the last time this server's Healthy value changed.
StableSince time.Time
}
// OperatorHealthReply is a representation of the overall health of the cluster
type OperatorHealthReply struct {
// Healthy is true if all the servers in the cluster are healthy.
Healthy bool
// FailureTolerance is the number of healthy servers that could be lost without
// an outage occurring.
FailureTolerance int
// Servers holds the health of each server.
Servers []ServerHealth
}
// ReadableDuration is a duration type that is serialized to JSON in human readable format.
type ReadableDuration time.Duration
func NewReadableDuration(dur time.Duration) *ReadableDuration {
d := ReadableDuration(dur)
return &d
}
func (d *ReadableDuration) String() string {
return d.Duration().String()
}
func (d *ReadableDuration) Duration() time.Duration {
if d == nil {
return time.Duration(0)
}
return time.Duration(*d)
}
func (d *ReadableDuration) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil
}
func (d *ReadableDuration) UnmarshalJSON(raw []byte) error {
if d == nil {
return fmt.Errorf("cannot unmarshal to nil pointer")
}
str := string(raw)
if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' {
return fmt.Errorf("must be enclosed with quotes: %s", str)
}
dur, err := time.ParseDuration(str[1 : len(str)-1])
if err != nil {
return err
}
*d = ReadableDuration(dur)
return nil
}
// AutopilotGetConfiguration is used to query the current Autopilot configuration.
func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) {
r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration")
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out AutopilotConfiguration
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return &out, nil
}
// AutopilotSetConfiguration is used to set the current Autopilot configuration.
func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error {
r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration")
r.setWriteOptions(q)
r.obj = conf
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// AutopilotCASConfiguration is used to perform a Check-And-Set update on the
// Autopilot configuration. The ModifyIndex value will be respected. Returns
// true on success or false on failures.
func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) {
r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration")
r.setWriteOptions(q)
r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10))
r.obj = conf
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return false, err
}
defer resp.Body.Close()
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
return false, fmt.Errorf("Failed to read response: %v", err)
}
res := strings.Contains(buf.String(), "true")
return res, nil
}
// AutopilotServerHealth
func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) {
r := op.c.newRequest("GET", "/v1/operator/autopilot/health")
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out OperatorHealthReply
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return &out, nil
}

View File

@ -0,0 +1,89 @@
package api
// keyringRequest is used for performing Keyring operations
type keyringRequest struct {
Key string
}
// KeyringResponse is returned when listing the gossip encryption keys
type KeyringResponse struct {
// Whether this response is for a WAN ring
WAN bool
// The datacenter name this request corresponds to
Datacenter string
// Segment has the network segment this request corresponds to.
Segment string
// Messages has information or errors from serf
Messages map[string]string `json:",omitempty"`
// A map of the encryption keys to the number of nodes they're installed on
Keys map[string]int
// The total number of nodes in this ring
NumNodes int
}
// KeyringInstall is used to install a new gossip encryption key into the cluster
func (op *Operator) KeyringInstall(key string, q *WriteOptions) error {
r := op.c.newRequest("POST", "/v1/operator/keyring")
r.setWriteOptions(q)
r.obj = keyringRequest{
Key: key,
}
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// KeyringList is used to list the gossip keys installed in the cluster
func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) {
r := op.c.newRequest("GET", "/v1/operator/keyring")
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []*KeyringResponse
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// KeyringRemove is used to remove a gossip encryption key from the cluster
func (op *Operator) KeyringRemove(key string, q *WriteOptions) error {
r := op.c.newRequest("DELETE", "/v1/operator/keyring")
r.setWriteOptions(q)
r.obj = keyringRequest{
Key: key,
}
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// KeyringUse is used to change the active gossip encryption key
func (op *Operator) KeyringUse(key string, q *WriteOptions) error {
r := op.c.newRequest("PUT", "/v1/operator/keyring")
r.setWriteOptions(q)
r.obj = keyringRequest{
Key: key,
}
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}

View File

@ -0,0 +1,89 @@
package api
// RaftServer has information about a server in the Raft configuration.
type RaftServer struct {
// ID is the unique ID for the server. These are currently the same
// as the address, but they will be changed to a real GUID in a future
// release of Consul.
ID string
// Node is the node name of the server, as known by Consul, or this
// will be set to "(unknown)" otherwise.
Node string
// Address is the IP:port of the server, used for Raft communications.
Address string
// Leader is true if this server is the current cluster leader.
Leader bool
// Protocol version is the raft protocol version used by the server
ProtocolVersion string
// Voter is true if this server has a vote in the cluster. This might
// be false if the server is staging and still coming online, or if
// it's a non-voting server, which will be added in a future release of
// Consul.
Voter bool
}
// RaftConfiguration is returned when querying for the current Raft configuration.
type RaftConfiguration struct {
// Servers has the list of servers in the Raft configuration.
Servers []*RaftServer
// Index has the Raft index of this configuration.
Index uint64
}
// RaftGetConfiguration is used to query the current Raft peer set.
func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) {
r := op.c.newRequest("GET", "/v1/operator/raft/configuration")
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out RaftConfiguration
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return &out, nil
}
// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft
// quorum but no longer known to Serf or the catalog) by address in the form of
// "IP:port".
func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error {
r := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
r.setWriteOptions(q)
r.params.Set("address", string(address))
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft
// quorum but no longer known to Serf or the catalog) by ID.
func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error {
r := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
r.setWriteOptions(q)
r.params.Set("id", string(id))
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}

View File

@ -0,0 +1,11 @@
package api
// SegmentList returns all the available LAN segments.
func (op *Operator) SegmentList(q *QueryOptions) ([]string, *QueryMeta, error) {
var out []string
qm, err := op.c.query("/v1/operator/segment", &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}

View File

@ -0,0 +1,217 @@
package api
// QueryDatacenterOptions sets options about how we fail over if there are no
// healthy nodes in the local datacenter.
type QueryDatacenterOptions struct {
// NearestN is set to the number of remote datacenters to try, based on
// network coordinates.
NearestN int
// Datacenters is a fixed list of datacenters to try after NearestN. We
// never try a datacenter multiple times, so those are subtracted from
// this list before proceeding.
Datacenters []string
}
// QueryDNSOptions controls settings when query results are served over DNS.
type QueryDNSOptions struct {
// TTL is the time to live for the served DNS results.
TTL string
}
// ServiceQuery is used to query for a set of healthy nodes offering a specific
// service.
type ServiceQuery struct {
// Service is the service to query.
Service string
// Near allows baking in the name of a node to automatically distance-
// sort from. The magic "_agent" value is supported, which sorts near
// the agent which initiated the request by default.
Near string
// Failover controls what we do if there are no healthy nodes in the
// local datacenter.
Failover QueryDatacenterOptions
// IgnoreCheckIDs is an optional list of health check IDs to ignore when
// considering which nodes are healthy. It is useful as an emergency measure
// to temporarily override some health check that is producing false negatives
// for example.
IgnoreCheckIDs []string
// If OnlyPassing is true then we will only include nodes with passing
// health checks (critical AND warning checks will cause a node to be
// discarded)
OnlyPassing bool
// Tags are a set of required and/or disallowed tags. If a tag is in
// this list it must be present. If the tag is preceded with "!" then
// it is disallowed.
Tags []string
// NodeMeta is a map of required node metadata fields. If a key/value
// pair is in this map it must be present on the node in order for the
// service entry to be returned.
NodeMeta map[string]string
// ServiceMeta is a map of required service metadata fields. If a key/value
// pair is in this map it must be present on the node in order for the
// service entry to be returned.
ServiceMeta map[string]string
// Connect if true will filter the prepared query results to only
// include Connect-capable services. These include both native services
// and proxies for matching services. Note that if a proxy matches,
// the constraints in the query above (Near, OnlyPassing, etc.) apply
// to the _proxy_ and not the service being proxied. In practice, proxies
// should be directly next to their services so this isn't an issue.
Connect bool
}
// QueryTemplate carries the arguments for creating a templated query.
type QueryTemplate struct {
// Type specifies the type of the query template. Currently only
// "name_prefix_match" is supported. This field is required.
Type string
// Regexp allows specifying a regex pattern to match against the name
// of the query being executed.
Regexp string
}
// PreparedQueryDefinition defines a complete prepared query.
type PreparedQueryDefinition struct {
// ID is this UUID-based ID for the query, always generated by Consul.
ID string
// Name is an optional friendly name for the query supplied by the
// user. NOTE - if this feature is used then it will reduce the security
// of any read ACL associated with this query/service since this name
// can be used to locate nodes with supplying any ACL.
Name string
// Session is an optional session to tie this query's lifetime to. If
// this is omitted then the query will not expire.
Session string
// Token is the ACL token used when the query was created, and it is
// used when a query is subsequently executed. This token, or a token
// with management privileges, must be used to change the query later.
Token string
// Service defines a service query (leaving things open for other types
// later).
Service ServiceQuery
// DNS has options that control how the results of this query are
// served over DNS.
DNS QueryDNSOptions
// Template is used to pass through the arguments for creating a
// prepared query with an attached template. If a template is given,
// interpolations are possible in other struct fields.
Template QueryTemplate
}
// PreparedQueryExecuteResponse has the results of executing a query.
type PreparedQueryExecuteResponse struct {
// Service is the service that was queried.
Service string
// Nodes has the nodes that were output by the query.
Nodes []ServiceEntry
// DNS has the options for serving these results over DNS.
DNS QueryDNSOptions
// Datacenter is the datacenter that these results came from.
Datacenter string
// Failovers is a count of how many times we had to query a remote
// datacenter.
Failovers int
}
// PreparedQuery can be used to query the prepared query endpoints.
type PreparedQuery struct {
c *Client
}
// PreparedQuery returns a handle to the prepared query endpoints.
func (c *Client) PreparedQuery() *PreparedQuery {
return &PreparedQuery{c}
}
// Create makes a new prepared query. The ID of the new query is returned.
func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) {
r := c.c.newRequest("POST", "/v1/query")
r.setWriteOptions(q)
r.obj = query
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// Update makes updates to an existing prepared query.
func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) {
return c.c.write("/v1/query/"+query.ID, query, nil, q)
}
// List is used to fetch all the prepared queries (always requires a management
// token).
func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) {
var out []*PreparedQueryDefinition
qm, err := c.c.query("/v1/query", &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Get is used to fetch a specific prepared query.
func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) {
var out []*PreparedQueryDefinition
qm, err := c.c.query("/v1/query/"+queryID, &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Delete is used to delete a specific prepared query.
func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) {
r := c.c.newRequest("DELETE", "/v1/query/"+queryID)
r.setWriteOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}
// Execute is used to execute a specific prepared query. You can execute using
// a query ID or name.
func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) {
var out *PreparedQueryExecuteResponse
qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}

24
vendor/github.com/hashicorp/consul/api/raw.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
package api
// Raw can be used to do raw queries against custom endpoints
type Raw struct {
c *Client
}
// Raw returns a handle to query endpoints
func (c *Client) Raw() *Raw {
return &Raw{c}
}
// Query is used to do a GET request against an endpoint
// and deserialize the response into an interface using
// standard Consul conventions.
func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
return raw.c.query(endpoint, out, q)
}
// Write is used to do a PUT request against an endpoint
// and serialize/deserialized using the standard Consul conventions.
func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
return raw.c.write(endpoint, in, out, q)
}

514
vendor/github.com/hashicorp/consul/api/semaphore.go generated vendored Normal file
View File

@ -0,0 +1,514 @@
package api
import (
"encoding/json"
"fmt"
"path"
"sync"
"time"
)
const (
// DefaultSemaphoreSessionName is the Session Name we assign if none is provided
DefaultSemaphoreSessionName = "Consul API Semaphore"
// DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided
// when creating a new Semaphore. This is used because we do not have another
// other check to depend upon.
DefaultSemaphoreSessionTTL = "15s"
// DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore
// acquisition is possible. This affects the minimum time it takes to cancel
// a Semaphore acquisition.
DefaultSemaphoreWaitTime = 15 * time.Second
// DefaultSemaphoreKey is the key used within the prefix to
// use for coordination between all the contenders.
DefaultSemaphoreKey = ".lock"
// SemaphoreFlagValue is a magic flag we set to indicate a key
// is being used for a semaphore. It is used to detect a potential
// conflict with a lock.
SemaphoreFlagValue = 0xe0f69a2baa414de0
)
var (
// ErrSemaphoreHeld is returned if we attempt to double lock
ErrSemaphoreHeld = fmt.Errorf("Semaphore already held")
// ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore
// that we do not hold.
ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held")
// ErrSemaphoreInUse is returned if we attempt to destroy a semaphore
// that is in use.
ErrSemaphoreInUse = fmt.Errorf("Semaphore in use")
// ErrSemaphoreConflict is returned if the flags on a key
// used for a semaphore do not match expectation
ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use")
)
// Semaphore is used to implement a distributed semaphore
// using the Consul KV primitives.
type Semaphore struct {
c *Client
opts *SemaphoreOptions
isHeld bool
sessionRenew chan struct{}
lockSession string
l sync.Mutex
}
// SemaphoreOptions is used to parameterize the Semaphore
type SemaphoreOptions struct {
Prefix string // Must be set and have write permissions
Limit int // Must be set, and be positive
Value []byte // Optional, value to associate with the contender entry
Session string // Optional, created if not specified
SessionName string // Optional, defaults to DefaultLockSessionName
SessionTTL string // Optional, defaults to DefaultLockSessionTTL
MonitorRetries int // Optional, defaults to 0 which means no retries
MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime
SemaphoreTryOnce bool // Optional, defaults to false which means try forever
}
// semaphoreLock is written under the DefaultSemaphoreKey and
// is used to coordinate between all the contenders.
type semaphoreLock struct {
// Limit is the integer limit of holders. This is used to
// verify that all the holders agree on the value.
Limit int
// Holders is a list of all the semaphore holders.
// It maps the session ID to true. It is used as a set effectively.
Holders map[string]bool
}
// SemaphorePrefix is used to created a Semaphore which will operate
// at the given KV prefix and uses the given limit for the semaphore.
// The prefix must have write privileges, and the limit must be agreed
// upon by all contenders.
func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) {
opts := &SemaphoreOptions{
Prefix: prefix,
Limit: limit,
}
return c.SemaphoreOpts(opts)
}
// SemaphoreOpts is used to create a Semaphore with the given options.
// The prefix must have write privileges, and the limit must be agreed
// upon by all contenders. If a Session is not provided, one will be created.
func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) {
if opts.Prefix == "" {
return nil, fmt.Errorf("missing prefix")
}
if opts.Limit <= 0 {
return nil, fmt.Errorf("semaphore limit must be positive")
}
if opts.SessionName == "" {
opts.SessionName = DefaultSemaphoreSessionName
}
if opts.SessionTTL == "" {
opts.SessionTTL = DefaultSemaphoreSessionTTL
} else {
if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
return nil, fmt.Errorf("invalid SessionTTL: %v", err)
}
}
if opts.MonitorRetryTime == 0 {
opts.MonitorRetryTime = DefaultMonitorRetryTime
}
if opts.SemaphoreWaitTime == 0 {
opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime
}
s := &Semaphore{
c: c,
opts: opts,
}
return s, nil
}
// Acquire attempts to reserve a slot in the semaphore, blocking until
// success, interrupted via the stopCh or an error is encountered.
// Providing a non-nil stopCh can be used to abort the attempt.
// On success, a channel is returned that represents our slot.
// This channel could be closed at any time due to session invalidation,
// communication errors, operator intervention, etc. It is NOT safe to
// assume that the slot is held until Release() unless the Session is specifically
// created without any associated health checks. By default Consul sessions
// prefer liveness over safety and an application must be able to handle
// the session being lost.
func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) {
// Hold the lock as we try to acquire
s.l.Lock()
defer s.l.Unlock()
// Check if we already hold the semaphore
if s.isHeld {
return nil, ErrSemaphoreHeld
}
// Check if we need to create a session first
s.lockSession = s.opts.Session
if s.lockSession == "" {
sess, err := s.createSession()
if err != nil {
return nil, fmt.Errorf("failed to create session: %v", err)
}
s.sessionRenew = make(chan struct{})
s.lockSession = sess
session := s.c.Session()
go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew)
// If we fail to acquire the lock, cleanup the session
defer func() {
if !s.isHeld {
close(s.sessionRenew)
s.sessionRenew = nil
}
}()
}
// Create the contender entry
kv := s.c.KV()
made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil)
if err != nil || !made {
return nil, fmt.Errorf("failed to make contender entry: %v", err)
}
// Setup the query options
qOpts := &QueryOptions{
WaitTime: s.opts.SemaphoreWaitTime,
}
start := time.Now()
attempts := 0
WAIT:
// Check if we should quit
select {
case <-stopCh:
return nil, nil
default:
}
// Handle the one-shot mode.
if s.opts.SemaphoreTryOnce && attempts > 0 {
elapsed := time.Since(start)
if elapsed > s.opts.SemaphoreWaitTime {
return nil, nil
}
// Query wait time should not exceed the semaphore wait time
qOpts.WaitTime = s.opts.SemaphoreWaitTime - elapsed
}
attempts++
// Read the prefix
pairs, meta, err := kv.List(s.opts.Prefix, qOpts)
if err != nil {
return nil, fmt.Errorf("failed to read prefix: %v", err)
}
// Decode the lock
lockPair := s.findLock(pairs)
if lockPair.Flags != SemaphoreFlagValue {
return nil, ErrSemaphoreConflict
}
lock, err := s.decodeLock(lockPair)
if err != nil {
return nil, err
}
// Verify we agree with the limit
if lock.Limit != s.opts.Limit {
return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)",
lock.Limit, s.opts.Limit)
}
// Prune the dead holders
s.pruneDeadHolders(lock, pairs)
// Check if the lock is held
if len(lock.Holders) >= lock.Limit {
qOpts.WaitIndex = meta.LastIndex
goto WAIT
}
// Create a new lock with us as a holder
lock.Holders[s.lockSession] = true
newLock, err := s.encodeLock(lock, lockPair.ModifyIndex)
if err != nil {
return nil, err
}
// Attempt the acquisition
didSet, _, err := kv.CAS(newLock, nil)
if err != nil {
return nil, fmt.Errorf("failed to update lock: %v", err)
}
if !didSet {
// Update failed, could have been a race with another contender,
// retry the operation
goto WAIT
}
// Watch to ensure we maintain ownership of the slot
lockCh := make(chan struct{})
go s.monitorLock(s.lockSession, lockCh)
// Set that we own the lock
s.isHeld = true
// Acquired! All done
return lockCh, nil
}
// Release is used to voluntarily give up our semaphore slot. It is
// an error to call this if the semaphore has not been acquired.
func (s *Semaphore) Release() error {
// Hold the lock as we try to release
s.l.Lock()
defer s.l.Unlock()
// Ensure the lock is actually held
if !s.isHeld {
return ErrSemaphoreNotHeld
}
// Set that we no longer own the lock
s.isHeld = false
// Stop the session renew
if s.sessionRenew != nil {
defer func() {
close(s.sessionRenew)
s.sessionRenew = nil
}()
}
// Get and clear the lock session
lockSession := s.lockSession
s.lockSession = ""
// Remove ourselves as a lock holder
kv := s.c.KV()
key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
READ:
pair, _, err := kv.Get(key, nil)
if err != nil {
return err
}
if pair == nil {
pair = &KVPair{}
}
lock, err := s.decodeLock(pair)
if err != nil {
return err
}
// Create a new lock without us as a holder
if _, ok := lock.Holders[lockSession]; ok {
delete(lock.Holders, lockSession)
newLock, err := s.encodeLock(lock, pair.ModifyIndex)
if err != nil {
return err
}
// Swap the locks
didSet, _, err := kv.CAS(newLock, nil)
if err != nil {
return fmt.Errorf("failed to update lock: %v", err)
}
if !didSet {
goto READ
}
}
// Destroy the contender entry
contenderKey := path.Join(s.opts.Prefix, lockSession)
if _, err := kv.Delete(contenderKey, nil); err != nil {
return err
}
return nil
}
// Destroy is used to cleanup the semaphore entry. It is not necessary
// to invoke. It will fail if the semaphore is in use.
func (s *Semaphore) Destroy() error {
// Hold the lock as we try to acquire
s.l.Lock()
defer s.l.Unlock()
// Check if we already hold the semaphore
if s.isHeld {
return ErrSemaphoreHeld
}
// List for the semaphore
kv := s.c.KV()
pairs, _, err := kv.List(s.opts.Prefix, nil)
if err != nil {
return fmt.Errorf("failed to read prefix: %v", err)
}
// Find the lock pair, bail if it doesn't exist
lockPair := s.findLock(pairs)
if lockPair.ModifyIndex == 0 {
return nil
}
if lockPair.Flags != SemaphoreFlagValue {
return ErrSemaphoreConflict
}
// Decode the lock
lock, err := s.decodeLock(lockPair)
if err != nil {
return err
}
// Prune the dead holders
s.pruneDeadHolders(lock, pairs)
// Check if there are any holders
if len(lock.Holders) > 0 {
return ErrSemaphoreInUse
}
// Attempt the delete
didRemove, _, err := kv.DeleteCAS(lockPair, nil)
if err != nil {
return fmt.Errorf("failed to remove semaphore: %v", err)
}
if !didRemove {
return ErrSemaphoreInUse
}
return nil
}
// createSession is used to create a new managed session
func (s *Semaphore) createSession() (string, error) {
session := s.c.Session()
se := &SessionEntry{
Name: s.opts.SessionName,
TTL: s.opts.SessionTTL,
Behavior: SessionBehaviorDelete,
}
id, _, err := session.Create(se, nil)
if err != nil {
return "", err
}
return id, nil
}
// contenderEntry returns a formatted KVPair for the contender
func (s *Semaphore) contenderEntry(session string) *KVPair {
return &KVPair{
Key: path.Join(s.opts.Prefix, session),
Value: s.opts.Value,
Session: session,
Flags: SemaphoreFlagValue,
}
}
// findLock is used to find the KV Pair which is used for coordination
func (s *Semaphore) findLock(pairs KVPairs) *KVPair {
key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
for _, pair := range pairs {
if pair.Key == key {
return pair
}
}
return &KVPair{Flags: SemaphoreFlagValue}
}
// decodeLock is used to decode a semaphoreLock from an
// entry in Consul
func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) {
// Handle if there is no lock
if pair == nil || pair.Value == nil {
return &semaphoreLock{
Limit: s.opts.Limit,
Holders: make(map[string]bool),
}, nil
}
l := &semaphoreLock{}
if err := json.Unmarshal(pair.Value, l); err != nil {
return nil, fmt.Errorf("lock decoding failed: %v", err)
}
return l, nil
}
// encodeLock is used to encode a semaphoreLock into a KVPair
// that can be PUT
func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) {
enc, err := json.Marshal(l)
if err != nil {
return nil, fmt.Errorf("lock encoding failed: %v", err)
}
pair := &KVPair{
Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey),
Value: enc,
Flags: SemaphoreFlagValue,
ModifyIndex: oldIndex,
}
return pair, nil
}
// pruneDeadHolders is used to remove all the dead lock holders
func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) {
// Gather all the live holders
alive := make(map[string]struct{}, len(pairs))
for _, pair := range pairs {
if pair.Session != "" {
alive[pair.Session] = struct{}{}
}
}
// Remove any holders that are dead
for holder := range lock.Holders {
if _, ok := alive[holder]; !ok {
delete(lock.Holders, holder)
}
}
}
// monitorLock is a long running routine to monitor a semaphore ownership
// It closes the stopCh if we lose our slot.
func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) {
defer close(stopCh)
kv := s.c.KV()
opts := &QueryOptions{RequireConsistent: true}
WAIT:
retries := s.opts.MonitorRetries
RETRY:
pairs, meta, err := kv.List(s.opts.Prefix, opts)
if err != nil {
// If configured we can try to ride out a brief Consul unavailability
// by doing retries. Note that we have to attempt the retry in a non-
// blocking fashion so that we have a clean place to reset the retry
// counter if service is restored.
if retries > 0 && IsRetryableError(err) {
time.Sleep(s.opts.MonitorRetryTime)
retries--
opts.WaitIndex = 0
goto RETRY
}
return
}
lockPair := s.findLock(pairs)
lock, err := s.decodeLock(lockPair)
if err != nil {
return
}
s.pruneDeadHolders(lock, pairs)
if _, ok := lock.Holders[session]; ok {
opts.WaitIndex = meta.LastIndex
goto WAIT
}
}

224
vendor/github.com/hashicorp/consul/api/session.go generated vendored Normal file
View File

@ -0,0 +1,224 @@
package api
import (
"errors"
"fmt"
"time"
)
const (
// SessionBehaviorRelease is the default behavior and causes
// all associated locks to be released on session invalidation.
SessionBehaviorRelease = "release"
// SessionBehaviorDelete is new in Consul 0.5 and changes the
// behavior to delete all associated locks on session invalidation.
// It can be used in a way similar to Ephemeral Nodes in ZooKeeper.
SessionBehaviorDelete = "delete"
)
var ErrSessionExpired = errors.New("session expired")
// SessionEntry represents a session in consul
type SessionEntry struct {
CreateIndex uint64
ID string
Name string
Node string
Checks []string
LockDelay time.Duration
Behavior string
TTL string
}
// Session can be used to query the Session endpoints
type Session struct {
c *Client
}
// Session returns a handle to the session endpoints
func (c *Client) Session() *Session {
return &Session{c}
}
// CreateNoChecks is like Create but is used specifically to create
// a session with no associated health checks.
func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
body := make(map[string]interface{})
body["Checks"] = []string{}
if se != nil {
if se.Name != "" {
body["Name"] = se.Name
}
if se.Node != "" {
body["Node"] = se.Node
}
if se.LockDelay != 0 {
body["LockDelay"] = durToMsec(se.LockDelay)
}
if se.Behavior != "" {
body["Behavior"] = se.Behavior
}
if se.TTL != "" {
body["TTL"] = se.TTL
}
}
return s.create(body, q)
}
// Create makes a new session. Providing a session entry can
// customize the session. It can also be nil to use defaults.
func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
var obj interface{}
if se != nil {
body := make(map[string]interface{})
obj = body
if se.Name != "" {
body["Name"] = se.Name
}
if se.Node != "" {
body["Node"] = se.Node
}
if se.LockDelay != 0 {
body["LockDelay"] = durToMsec(se.LockDelay)
}
if len(se.Checks) > 0 {
body["Checks"] = se.Checks
}
if se.Behavior != "" {
body["Behavior"] = se.Behavior
}
if se.TTL != "" {
body["TTL"] = se.TTL
}
}
return s.create(obj, q)
}
func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) {
var out struct{ ID string }
wm, err := s.c.write("/v1/session/create", obj, &out, q)
if err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// Destroy invalidates a given session
func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q)
if err != nil {
return nil, err
}
return wm, nil
}
// Renew renews the TTL on a given session
func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) {
r := s.c.newRequest("PUT", "/v1/session/renew/"+id)
r.setWriteOptions(q)
rtt, resp, err := s.c.doRequest(r)
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
if resp.StatusCode == 404 {
return nil, wm, nil
} else if resp.StatusCode != 200 {
return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
}
var entries []*SessionEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, fmt.Errorf("Failed to read response: %v", err)
}
if len(entries) > 0 {
return entries[0], wm, nil
}
return nil, wm, nil
}
// RenewPeriodic is used to periodically invoke Session.Renew on a
// session until a doneCh is closed. This is meant to be used in a long running
// goroutine to ensure a session stays valid.
func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh <-chan struct{}) error {
ctx := q.Context()
ttl, err := time.ParseDuration(initialTTL)
if err != nil {
return err
}
waitDur := ttl / 2
lastRenewTime := time.Now()
var lastErr error
for {
if time.Since(lastRenewTime) > ttl {
return lastErr
}
select {
case <-time.After(waitDur):
entry, _, err := s.Renew(id, q)
if err != nil {
waitDur = time.Second
lastErr = err
continue
}
if entry == nil {
return ErrSessionExpired
}
// Handle the server updating the TTL
ttl, _ = time.ParseDuration(entry.TTL)
waitDur = ttl / 2
lastRenewTime = time.Now()
case <-doneCh:
// Attempt a session destroy
s.Destroy(id, q)
return nil
case <-ctx.Done():
// Bail immediately since attempting the destroy would
// use the canceled context in q, which would just bail.
return ctx.Err()
}
}
}
// Info looks up a single session
func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) {
var entries []*SessionEntry
qm, err := s.c.query("/v1/session/info/"+id, &entries, q)
if err != nil {
return nil, nil, err
}
if len(entries) > 0 {
return entries[0], qm, nil
}
return nil, qm, nil
}
// List gets sessions for a node
func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
var entries []*SessionEntry
qm, err := s.c.query("/v1/session/node/"+node, &entries, q)
if err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// List gets all active sessions
func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
var entries []*SessionEntry
qm, err := s.c.query("/v1/session/list", &entries, q)
if err != nil {
return nil, nil, err
}
return entries, qm, nil
}

47
vendor/github.com/hashicorp/consul/api/snapshot.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
package api
import (
"io"
)
// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of
// Consul's internal state and restore snapshots for disaster recovery.
type Snapshot struct {
c *Client
}
// Snapshot returns a handle that exposes the snapshot endpoints.
func (c *Client) Snapshot() *Snapshot {
return &Snapshot{c}
}
// Save requests a new snapshot and provides an io.ReadCloser with the snapshot
// data to save. If this doesn't return an error, then it's the responsibility
// of the caller to close it. Only a subset of the QueryOptions are supported:
// Datacenter, AllowStale, and Token.
func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) {
r := s.c.newRequest("GET", "/v1/snapshot")
r.setQueryOptions(q)
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, nil, err
}
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
return resp.Body, qm, nil
}
// Restore streams in an existing snapshot and attempts to restore it.
func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error {
r := s.c.newRequest("PUT", "/v1/snapshot")
r.body = in
r.setWriteOptions(q)
_, _, err := requireOK(s.c.doRequest(r))
if err != nil {
return err
}
return nil
}

43
vendor/github.com/hashicorp/consul/api/status.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
package api
// Status can be used to query the Status endpoints
type Status struct {
c *Client
}
// Status returns a handle to the status endpoints
func (c *Client) Status() *Status {
return &Status{c}
}
// Leader is used to query for a known leader
func (s *Status) Leader() (string, error) {
r := s.c.newRequest("GET", "/v1/status/leader")
_, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return "", err
}
defer resp.Body.Close()
var leader string
if err := decodeBody(resp, &leader); err != nil {
return "", err
}
return leader, nil
}
// Peers is used to query for a known raft peers
func (s *Status) Peers() ([]string, error) {
r := s.c.newRequest("GET", "/v1/status/peers")
_, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var peers []string
if err := decodeBody(resp, &peers); err != nil {
return nil, err
}
return peers, nil
}

230
vendor/github.com/hashicorp/consul/api/txn.go generated vendored Normal file
View File

@ -0,0 +1,230 @@
package api
import (
"bytes"
"fmt"
"io"
"net/http"
)
// Txn is used to manipulate the Txn API
type Txn struct {
c *Client
}
// Txn is used to return a handle to the K/V apis
func (c *Client) Txn() *Txn {
return &Txn{c}
}
// TxnOp is the internal format we send to Consul. Currently only K/V and
// check operations are supported.
type TxnOp struct {
KV *KVTxnOp
Node *NodeTxnOp
Service *ServiceTxnOp
Check *CheckTxnOp
}
// TxnOps is a list of transaction operations.
type TxnOps []*TxnOp
// TxnResult is the internal format we receive from Consul.
type TxnResult struct {
KV *KVPair
Node *Node
Service *CatalogService
Check *HealthCheck
}
// TxnResults is a list of TxnResult objects.
type TxnResults []*TxnResult
// TxnError is used to return information about an operation in a transaction.
type TxnError struct {
OpIndex int
What string
}
// TxnErrors is a list of TxnError objects.
type TxnErrors []*TxnError
// TxnResponse is the internal format we receive from Consul.
type TxnResponse struct {
Results TxnResults
Errors TxnErrors
}
// KVOp constants give possible operations available in a transaction.
type KVOp string
const (
KVSet KVOp = "set"
KVDelete KVOp = "delete"
KVDeleteCAS KVOp = "delete-cas"
KVDeleteTree KVOp = "delete-tree"
KVCAS KVOp = "cas"
KVLock KVOp = "lock"
KVUnlock KVOp = "unlock"
KVGet KVOp = "get"
KVGetTree KVOp = "get-tree"
KVCheckSession KVOp = "check-session"
KVCheckIndex KVOp = "check-index"
KVCheckNotExists KVOp = "check-not-exists"
)
// KVTxnOp defines a single operation inside a transaction.
type KVTxnOp struct {
Verb KVOp
Key string
Value []byte
Flags uint64
Index uint64
Session string
}
// KVTxnOps defines a set of operations to be performed inside a single
// transaction.
type KVTxnOps []*KVTxnOp
// KVTxnResponse has the outcome of a transaction.
type KVTxnResponse struct {
Results []*KVPair
Errors TxnErrors
}
// NodeOp constants give possible operations available in a transaction.
type NodeOp string
const (
NodeGet NodeOp = "get"
NodeSet NodeOp = "set"
NodeCAS NodeOp = "cas"
NodeDelete NodeOp = "delete"
NodeDeleteCAS NodeOp = "delete-cas"
)
// NodeTxnOp defines a single operation inside a transaction.
type NodeTxnOp struct {
Verb NodeOp
Node Node
}
// ServiceOp constants give possible operations available in a transaction.
type ServiceOp string
const (
ServiceGet ServiceOp = "get"
ServiceSet ServiceOp = "set"
ServiceCAS ServiceOp = "cas"
ServiceDelete ServiceOp = "delete"
ServiceDeleteCAS ServiceOp = "delete-cas"
)
// ServiceTxnOp defines a single operation inside a transaction.
type ServiceTxnOp struct {
Verb ServiceOp
Node string
Service AgentService
}
// CheckOp constants give possible operations available in a transaction.
type CheckOp string
const (
CheckGet CheckOp = "get"
CheckSet CheckOp = "set"
CheckCAS CheckOp = "cas"
CheckDelete CheckOp = "delete"
CheckDeleteCAS CheckOp = "delete-cas"
)
// CheckTxnOp defines a single operation inside a transaction.
type CheckTxnOp struct {
Verb CheckOp
Check HealthCheck
}
// Txn is used to apply multiple Consul operations in a single, atomic transaction.
//
// Note that Go will perform the required base64 encoding on the values
// automatically because the type is a byte slice. Transactions are defined as a
// list of operations to perform, using the different fields in the TxnOp structure
// to define operations. If any operation fails, none of the changes are applied
// to the state store.
//
// Even though this is generally a write operation, we take a QueryOptions input
// and return a QueryMeta output. If the transaction contains only read ops, then
// Consul will fast-path it to a different endpoint internally which supports
// consistency controls, but not blocking. If there are write operations then
// the request will always be routed through raft and any consistency settings
// will be ignored.
//
// Here's an example:
//
// ops := KVTxnOps{
// &KVTxnOp{
// Verb: KVLock,
// Key: "test/lock",
// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
// Value: []byte("hello"),
// },
// &KVTxnOp{
// Verb: KVGet,
// Key: "another/key",
// },
// &CheckTxnOp{
// Verb: CheckSet,
// HealthCheck: HealthCheck{
// Node: "foo",
// CheckID: "redis:a",
// Name: "Redis Health Check",
// Status: "passing",
// },
// }
// }
// ok, response, _, err := kv.Txn(&ops, nil)
//
// If there is a problem making the transaction request then an error will be
// returned. Otherwise, the ok value will be true if the transaction succeeded
// or false if it was rolled back. The response is a structured return value which
// will have the outcome of the transaction. Its Results member will have entries
// for each operation. For KV operations, Deleted keys will have a nil entry in the
// results, and to save space, the Value of each key in the Results will be nil
// unless the operation is a KVGet. If the transaction was rolled back, the Errors
// member will have entries referencing the index of the operation that failed
// along with an error message.
func (t *Txn) Txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) {
return t.c.txn(txn, q)
}
func (c *Client) txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) {
r := c.newRequest("PUT", "/v1/txn")
r.setQueryOptions(q)
r.obj = txn
rtt, resp, err := c.doRequest(r)
if err != nil {
return false, nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict {
var txnResp TxnResponse
if err := decodeBody(resp, &txnResp); err != nil {
return false, nil, nil, err
}
return resp.StatusCode == http.StatusOK, &txnResp, qm, nil
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
return false, nil, nil, fmt.Errorf("Failed to read response: %v", err)
}
return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String())
}

349
vendor/github.com/hashicorp/consul/api/watch/funcs.go generated vendored Normal file
View File

@ -0,0 +1,349 @@
package watch
import (
"context"
"fmt"
consulapi "github.com/hashicorp/consul/api"
)
// watchFactory is a function that can create a new WatchFunc
// from a parameter configuration
type watchFactory func(params map[string]interface{}) (WatcherFunc, error)
// watchFuncFactory maps each type to a factory function
var watchFuncFactory map[string]watchFactory
func init() {
watchFuncFactory = map[string]watchFactory{
"key": keyWatch,
"keyprefix": keyPrefixWatch,
"services": servicesWatch,
"nodes": nodesWatch,
"service": serviceWatch,
"checks": checksWatch,
"event": eventWatch,
"connect_roots": connectRootsWatch,
"connect_leaf": connectLeafWatch,
"connect_proxy_config": connectProxyConfigWatch,
"agent_service": agentServiceWatch,
}
}
// keyWatch is used to return a key watching function
func keyWatch(params map[string]interface{}) (WatcherFunc, error) {
stale := false
if err := assignValueBool(params, "stale", &stale); err != nil {
return nil, err
}
var key string
if err := assignValue(params, "key", &key); err != nil {
return nil, err
}
if key == "" {
return nil, fmt.Errorf("Must specify a single key to watch")
}
fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
kv := p.client.KV()
opts := makeQueryOptionsWithContext(p, stale)
defer p.cancelFunc()
pair, meta, err := kv.Get(key, &opts)
if err != nil {
return nil, nil, err
}
if pair == nil {
return WaitIndexVal(meta.LastIndex), nil, err
}
return WaitIndexVal(meta.LastIndex), pair, err
}
return fn, nil
}
// keyPrefixWatch is used to return a key prefix watching function
func keyPrefixWatch(params map[string]interface{}) (WatcherFunc, error) {
stale := false
if err := assignValueBool(params, "stale", &stale); err != nil {
return nil, err
}
var prefix string
if err := assignValue(params, "prefix", &prefix); err != nil {
return nil, err
}
if prefix == "" {
return nil, fmt.Errorf("Must specify a single prefix to watch")
}
fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
kv := p.client.KV()
opts := makeQueryOptionsWithContext(p, stale)
defer p.cancelFunc()
pairs, meta, err := kv.List(prefix, &opts)
if err != nil {
return nil, nil, err
}
return WaitIndexVal(meta.LastIndex), pairs, err
}
return fn, nil
}
// servicesWatch is used to watch the list of available services
func servicesWatch(params map[string]interface{}) (WatcherFunc, error) {
stale := false
if err := assignValueBool(params, "stale", &stale); err != nil {
return nil, err
}
fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
catalog := p.client.Catalog()
opts := makeQueryOptionsWithContext(p, stale)
defer p.cancelFunc()
services, meta, err := catalog.Services(&opts)
if err != nil {
return nil, nil, err
}
return WaitIndexVal(meta.LastIndex), services, err
}
return fn, nil
}
// nodesWatch is used to watch the list of available nodes
func nodesWatch(params map[string]interface{}) (WatcherFunc, error) {
stale := false
if err := assignValueBool(params, "stale", &stale); err != nil {
return nil, err
}
fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
catalog := p.client.Catalog()
opts := makeQueryOptionsWithContext(p, stale)
defer p.cancelFunc()
nodes, meta, err := catalog.Nodes(&opts)
if err != nil {
return nil, nil, err
}
return WaitIndexVal(meta.LastIndex), nodes, err
}
return fn, nil
}
// serviceWatch is used to watch a specific service for changes
func serviceWatch(params map[string]interface{}) (WatcherFunc, error) {
stale := false
if err := assignValueBool(params, "stale", &stale); err != nil {
return nil, err
}
var (
service string
tags []string
)
if err := assignValue(params, "service", &service); err != nil {
return nil, err
}
if service == "" {
return nil, fmt.Errorf("Must specify a single service to watch")
}
if err := assignValueStringSlice(params, "tag", &tags); err != nil {
return nil, err
}
passingOnly := false
if err := assignValueBool(params, "passingonly", &passingOnly); err != nil {
return nil, err
}
fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
health := p.client.Health()
opts := makeQueryOptionsWithContext(p, stale)
defer p.cancelFunc()
nodes, meta, err := health.ServiceMultipleTags(service, tags, passingOnly, &opts)
if err != nil {
return nil, nil, err
}
return WaitIndexVal(meta.LastIndex), nodes, err
}
return fn, nil
}
// checksWatch is used to watch a specific checks in a given state
func checksWatch(params map[string]interface{}) (WatcherFunc, error) {
stale := false
if err := assignValueBool(params, "stale", &stale); err != nil {
return nil, err
}
var service, state string
if err := assignValue(params, "service", &service); err != nil {
return nil, err
}
if err := assignValue(params, "state", &state); err != nil {
return nil, err
}
if service != "" && state != "" {
return nil, fmt.Errorf("Cannot specify service and state")
}
if service == "" && state == "" {
state = "any"
}
fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
health := p.client.Health()
opts := makeQueryOptionsWithContext(p, stale)
defer p.cancelFunc()
var checks []*consulapi.HealthCheck
var meta *consulapi.QueryMeta
var err error
if state != "" {
checks, meta, err = health.State(state, &opts)
} else {
checks, meta, err = health.Checks(service, &opts)
}
if err != nil {
return nil, nil, err
}
return WaitIndexVal(meta.LastIndex), checks, err
}
return fn, nil
}
// eventWatch is used to watch for events, optionally filtering on name
func eventWatch(params map[string]interface{}) (WatcherFunc, error) {
// The stale setting doesn't apply to events.
var name string
if err := assignValue(params, "name", &name); err != nil {
return nil, err
}
fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
event := p.client.Event()
opts := makeQueryOptionsWithContext(p, false)
defer p.cancelFunc()
events, meta, err := event.List(name, &opts)
if err != nil {
return nil, nil, err
}
// Prune to only the new events
for i := 0; i < len(events); i++ {
if WaitIndexVal(event.IDToIndex(events[i].ID)).Equal(p.lastParamVal) {
events = events[i+1:]
break
}
}
return WaitIndexVal(meta.LastIndex), events, err
}
return fn, nil
}
// connectRootsWatch is used to watch for changes to Connect Root certificates.
func connectRootsWatch(params map[string]interface{}) (WatcherFunc, error) {
// We don't support stale since roots are cached locally in the agent.
fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
agent := p.client.Agent()
opts := makeQueryOptionsWithContext(p, false)
defer p.cancelFunc()
roots, meta, err := agent.ConnectCARoots(&opts)
if err != nil {
return nil, nil, err
}
return WaitIndexVal(meta.LastIndex), roots, err
}
return fn, nil
}
// connectLeafWatch is used to watch for changes to Connect Leaf certificates
// for given local service id.
func connectLeafWatch(params map[string]interface{}) (WatcherFunc, error) {
// We don't support stale since certs are cached locally in the agent.
var serviceName string
if err := assignValue(params, "service", &serviceName); err != nil {
return nil, err
}
fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
agent := p.client.Agent()
opts := makeQueryOptionsWithContext(p, false)
defer p.cancelFunc()
leaf, meta, err := agent.ConnectCALeaf(serviceName, &opts)
if err != nil {
return nil, nil, err
}
return WaitIndexVal(meta.LastIndex), leaf, err
}
return fn, nil
}
// connectProxyConfigWatch is used to watch for changes to Connect managed proxy
// configuration. Note that this state is agent-local so the watch mechanism
// uses `hash` rather than `index` for deciding whether to block.
func connectProxyConfigWatch(params map[string]interface{}) (WatcherFunc, error) {
// We don't support consistency modes since it's agent local data
var proxyServiceID string
if err := assignValue(params, "proxy_service_id", &proxyServiceID); err != nil {
return nil, err
}
fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
agent := p.client.Agent()
opts := makeQueryOptionsWithContext(p, false)
defer p.cancelFunc()
config, _, err := agent.ConnectProxyConfig(proxyServiceID, &opts)
if err != nil {
return nil, nil, err
}
// Return string ContentHash since we don't have Raft indexes to block on.
return WaitHashVal(config.ContentHash), config, err
}
return fn, nil
}
// agentServiceWatch is used to watch for changes to a single service instance
// on the local agent. Note that this state is agent-local so the watch
// mechanism uses `hash` rather than `index` for deciding whether to block.
func agentServiceWatch(params map[string]interface{}) (WatcherFunc, error) {
// We don't support consistency modes since it's agent local data
var serviceID string
if err := assignValue(params, "service_id", &serviceID); err != nil {
return nil, err
}
fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
agent := p.client.Agent()
opts := makeQueryOptionsWithContext(p, false)
defer p.cancelFunc()
svc, _, err := agent.Service(serviceID, &opts)
if err != nil {
return nil, nil, err
}
// Return string ContentHash since we don't have Raft indexes to block on.
return WaitHashVal(svc.ContentHash), svc, err
}
return fn, nil
}
func makeQueryOptionsWithContext(p *Plan, stale bool) consulapi.QueryOptions {
ctx, cancel := context.WithCancel(context.Background())
p.setCancelFunc(cancel)
opts := consulapi.QueryOptions{AllowStale: stale}
switch param := p.lastParamVal.(type) {
case WaitIndexVal:
opts.WaitIndex = uint64(param)
case WaitHashVal:
opts.WaitHash = string(param)
}
return *opts.WithContext(ctx)
}

167
vendor/github.com/hashicorp/consul/api/watch/plan.go generated vendored Normal file
View File

@ -0,0 +1,167 @@
package watch
import (
"context"
"fmt"
"log"
"os"
"reflect"
"time"
consulapi "github.com/hashicorp/consul/api"
)
const (
// retryInterval is the base retry value
retryInterval = 5 * time.Second
// maximum back off time, this is to prevent
// exponential runaway
maxBackoffTime = 180 * time.Second
)
func (p *Plan) Run(address string) error {
return p.RunWithConfig(address, nil)
}
// Run is used to run a watch plan
func (p *Plan) RunWithConfig(address string, conf *consulapi.Config) error {
// Setup the client
p.address = address
if conf == nil {
conf = consulapi.DefaultConfig()
}
conf.Address = address
conf.Datacenter = p.Datacenter
conf.Token = p.Token
client, err := consulapi.NewClient(conf)
if err != nil {
return fmt.Errorf("Failed to connect to agent: %v", err)
}
// Create the logger
output := p.LogOutput
if output == nil {
output = os.Stderr
}
logger := log.New(output, "", log.LstdFlags)
return p.RunWithClientAndLogger(client, logger)
}
// RunWithClientAndLogger runs a watch plan using an external client and
// log.Logger instance. Using this, the plan's Datacenter, Token and LogOutput
// fields are ignored and the passed client is expected to be configured as
// needed.
func (p *Plan) RunWithClientAndLogger(client *consulapi.Client,
logger *log.Logger) error {
p.client = client
// Loop until we are canceled
failures := 0
OUTER:
for !p.shouldStop() {
// Invoke the handler
blockParamVal, result, err := p.Watcher(p)
// Check if we should terminate since the function
// could have blocked for a while
if p.shouldStop() {
break
}
// Handle an error in the watch function
if err != nil {
// Perform an exponential backoff
failures++
if blockParamVal == nil {
p.lastParamVal = nil
} else {
p.lastParamVal = blockParamVal.Next(p.lastParamVal)
}
retry := retryInterval * time.Duration(failures*failures)
if retry > maxBackoffTime {
retry = maxBackoffTime
}
logger.Printf("[ERR] consul.watch: Watch (type: %s) errored: %v, retry in %v",
p.Type, err, retry)
select {
case <-time.After(retry):
continue OUTER
case <-p.stopCh:
return nil
}
}
// Clear the failures
failures = 0
// If the index is unchanged do nothing
if p.lastParamVal != nil && p.lastParamVal.Equal(blockParamVal) {
continue
}
// Update the index, look for change
oldParamVal := p.lastParamVal
p.lastParamVal = blockParamVal.Next(oldParamVal)
if oldParamVal != nil && reflect.DeepEqual(p.lastResult, result) {
continue
}
// Handle the updated result
p.lastResult = result
// If a hybrid handler exists use that
if p.HybridHandler != nil {
p.HybridHandler(blockParamVal, result)
} else if p.Handler != nil {
idx, ok := blockParamVal.(WaitIndexVal)
if !ok {
logger.Printf("[ERR] consul.watch: Handler only supports index-based " +
" watches but non index-based watch run. Skipping Handler.")
}
p.Handler(uint64(idx), result)
}
}
return nil
}
// Stop is used to stop running the watch plan
func (p *Plan) Stop() {
p.stopLock.Lock()
defer p.stopLock.Unlock()
if p.stop {
return
}
p.stop = true
if p.cancelFunc != nil {
p.cancelFunc()
}
close(p.stopCh)
}
func (p *Plan) shouldStop() bool {
select {
case <-p.stopCh:
return true
default:
return false
}
}
func (p *Plan) setCancelFunc(cancel context.CancelFunc) {
p.stopLock.Lock()
defer p.stopLock.Unlock()
if p.shouldStop() {
// The watch is stopped and execute the new cancel func to stop watchFactory
cancel()
return
}
p.cancelFunc = cancel
}
func (p *Plan) IsStopped() bool {
p.stopLock.Lock()
defer p.stopLock.Unlock()
return p.stop
}

289
vendor/github.com/hashicorp/consul/api/watch/watch.go generated vendored Normal file
View File

@ -0,0 +1,289 @@
package watch
import (
"context"
"fmt"
"io"
"sync"
"time"
consulapi "github.com/hashicorp/consul/api"
"github.com/mitchellh/mapstructure"
)
const DefaultTimeout = 10 * time.Second
// Plan is the parsed version of a watch specification. A watch provides
// the details of a query, which generates a view into the Consul data store.
// This view is watched for changes and a handler is invoked to take any
// appropriate actions.
type Plan struct {
Datacenter string
Token string
Type string
HandlerType string
Exempt map[string]interface{}
Watcher WatcherFunc
// Handler is kept for backward compatibility but only supports watches based
// on index param. To support hash based watches, set HybridHandler instead.
Handler HandlerFunc
HybridHandler HybridHandlerFunc
LogOutput io.Writer
address string
client *consulapi.Client
lastParamVal BlockingParamVal
lastResult interface{}
stop bool
stopCh chan struct{}
stopLock sync.Mutex
cancelFunc context.CancelFunc
}
type HttpHandlerConfig struct {
Path string `mapstructure:"path"`
Method string `mapstructure:"method"`
Timeout time.Duration `mapstructure:"-"`
TimeoutRaw string `mapstructure:"timeout"`
Header map[string][]string `mapstructure:"header"`
TLSSkipVerify bool `mapstructure:"tls_skip_verify"`
}
// BlockingParamVal is an interface representing the common operations needed for
// different styles of blocking. It's used to abstract the core watch plan from
// whether we are performing index-based or hash-based blocking.
type BlockingParamVal interface {
// Equal returns whether the other param value should be considered equal
// (i.e. representing no change in the watched resource). Equal must not panic
// if other is nil.
Equal(other BlockingParamVal) bool
// Next is called when deciding which value to use on the next blocking call.
// It assumes the BlockingParamVal value it is called on is the most recent one
// returned and passes the previous one which may be nil as context. This
// allows types to customize logic around ordering without assuming there is
// an order. For example WaitIndexVal can check that the index didn't go
// backwards and if it did then reset to 0. Most other cases should just
// return themselves (the most recent value) to be used in the next request.
Next(previous BlockingParamVal) BlockingParamVal
}
// WaitIndexVal is a type representing a Consul index that implements
// BlockingParamVal.
type WaitIndexVal uint64
// Equal implements BlockingParamVal
func (idx WaitIndexVal) Equal(other BlockingParamVal) bool {
if otherIdx, ok := other.(WaitIndexVal); ok {
return idx == otherIdx
}
return false
}
// Next implements BlockingParamVal
func (idx WaitIndexVal) Next(previous BlockingParamVal) BlockingParamVal {
if previous == nil {
return idx
}
prevIdx, ok := previous.(WaitIndexVal)
if ok && prevIdx > idx {
// This value is smaller than the previous index, reset.
return WaitIndexVal(0)
}
return idx
}
// WaitHashVal is a type representing a Consul content hash that implements
// BlockingParamVal.
type WaitHashVal string
// Equal implements BlockingParamVal
func (h WaitHashVal) Equal(other BlockingParamVal) bool {
if otherHash, ok := other.(WaitHashVal); ok {
return h == otherHash
}
return false
}
// Next implements BlockingParamVal
func (h WaitHashVal) Next(previous BlockingParamVal) BlockingParamVal {
return h
}
// WatcherFunc is used to watch for a diff.
type WatcherFunc func(*Plan) (BlockingParamVal, interface{}, error)
// HandlerFunc is used to handle new data. It only works for index-based watches
// (which is almost all end points currently) and is kept for backwards
// compatibility until more places can make use of hash-based watches too.
type HandlerFunc func(uint64, interface{})
// HybridHandlerFunc is used to handle new data. It can support either
// index-based or hash-based watches via the BlockingParamVal.
type HybridHandlerFunc func(BlockingParamVal, interface{})
// Parse takes a watch query and compiles it into a WatchPlan or an error
func Parse(params map[string]interface{}) (*Plan, error) {
return ParseExempt(params, nil)
}
// ParseExempt takes a watch query and compiles it into a WatchPlan or an error
// Any exempt parameters are stored in the Exempt map
func ParseExempt(params map[string]interface{}, exempt []string) (*Plan, error) {
plan := &Plan{
stopCh: make(chan struct{}),
Exempt: make(map[string]interface{}),
}
// Parse the generic parameters
if err := assignValue(params, "datacenter", &plan.Datacenter); err != nil {
return nil, err
}
if err := assignValue(params, "token", &plan.Token); err != nil {
return nil, err
}
if err := assignValue(params, "type", &plan.Type); err != nil {
return nil, err
}
// Ensure there is a watch type
if plan.Type == "" {
return nil, fmt.Errorf("Watch type must be specified")
}
// Get the specific handler
if err := assignValue(params, "handler_type", &plan.HandlerType); err != nil {
return nil, err
}
switch plan.HandlerType {
case "http":
if _, ok := params["http_handler_config"]; !ok {
return nil, fmt.Errorf("Handler type 'http' requires 'http_handler_config' to be set")
}
config, err := parseHttpHandlerConfig(params["http_handler_config"])
if err != nil {
return nil, fmt.Errorf(fmt.Sprintf("Failed to parse 'http_handler_config': %v", err))
}
plan.Exempt["http_handler_config"] = config
delete(params, "http_handler_config")
case "script":
// Let the caller check for configuration in exempt parameters
}
// Look for a factory function
factory := watchFuncFactory[plan.Type]
if factory == nil {
return nil, fmt.Errorf("Unsupported watch type: %s", plan.Type)
}
// Get the watch func
fn, err := factory(params)
if err != nil {
return nil, err
}
plan.Watcher = fn
// Remove the exempt parameters
if len(exempt) > 0 {
for _, ex := range exempt {
val, ok := params[ex]
if ok {
plan.Exempt[ex] = val
delete(params, ex)
}
}
}
// Ensure all parameters are consumed
if len(params) != 0 {
var bad []string
for key := range params {
bad = append(bad, key)
}
return nil, fmt.Errorf("Invalid parameters: %v", bad)
}
return plan, nil
}
// assignValue is used to extract a value ensuring it is a string
func assignValue(params map[string]interface{}, name string, out *string) error {
if raw, ok := params[name]; ok {
val, ok := raw.(string)
if !ok {
return fmt.Errorf("Expecting %s to be a string", name)
}
*out = val
delete(params, name)
}
return nil
}
// assignValueBool is used to extract a value ensuring it is a bool
func assignValueBool(params map[string]interface{}, name string, out *bool) error {
if raw, ok := params[name]; ok {
val, ok := raw.(bool)
if !ok {
return fmt.Errorf("Expecting %s to be a boolean", name)
}
*out = val
delete(params, name)
}
return nil
}
// assignValueStringSlice is used to extract a value ensuring it is either a string or a slice of strings
func assignValueStringSlice(params map[string]interface{}, name string, out *[]string) error {
if raw, ok := params[name]; ok {
var tmp []string
switch raw.(type) {
case string:
tmp = make([]string, 1, 1)
tmp[0] = raw.(string)
case []string:
l := len(raw.([]string))
tmp = make([]string, l, l)
copy(tmp, raw.([]string))
case []interface{}:
l := len(raw.([]interface{}))
tmp = make([]string, l, l)
for i, v := range raw.([]interface{}) {
if s, ok := v.(string); ok {
tmp[i] = s
} else {
return fmt.Errorf("Index %d of %s expected to be string", i, name)
}
}
default:
return fmt.Errorf("Expecting %s to be a string or []string", name)
}
*out = tmp
delete(params, name)
}
return nil
}
// Parse the 'http_handler_config' parameters
func parseHttpHandlerConfig(configParams interface{}) (*HttpHandlerConfig, error) {
var config HttpHandlerConfig
if err := mapstructure.Decode(configParams, &config); err != nil {
return nil, err
}
if config.Path == "" {
return nil, fmt.Errorf("Requires 'path' to be set")
}
if config.Method == "" {
config.Method = "POST"
}
if config.TimeoutRaw == "" {
config.Timeout = DefaultTimeout
} else if timeout, err := time.ParseDuration(config.TimeoutRaw); err != nil {
return nil, fmt.Errorf(fmt.Sprintf("Failed to parse timeout: %v", err))
} else {
config.Timeout = timeout
}
return &config, nil
}

View File

@ -0,0 +1,139 @@
// Package freeport provides a helper for allocating free ports across multiple
// processes on the same machine.
package freeport
import (
"fmt"
"math/rand"
"net"
"sync"
"time"
"github.com/mitchellh/go-testing-interface"
)
const (
// blockSize is the size of the allocated port block. ports are given out
// consecutively from that block with roll-over for the lifetime of the
// application/test run.
blockSize = 1500
// maxBlocks is the number of available port blocks.
// lowPort + maxBlocks * blockSize must be less than 65535.
maxBlocks = 30
// lowPort is the lowest port number that should be used.
lowPort = 10000
// attempts is how often we try to allocate a port block
// before giving up.
attempts = 10
)
var (
// firstPort is the first port of the allocated block.
firstPort int
// lockLn is the system-wide mutex for the port block.
lockLn net.Listener
// mu guards nextPort
mu sync.Mutex
// once is used to do the initialization on the first call to retrieve free
// ports
once sync.Once
// port is the last allocated port.
port int
)
// initialize is used to initialize freeport.
func initialize() {
if lowPort+maxBlocks*blockSize > 65535 {
panic("freeport: block size too big or too many blocks requested")
}
rand.Seed(time.Now().UnixNano())
firstPort, lockLn = alloc()
}
// alloc reserves a port block for exclusive use for the lifetime of the
// application. lockLn serves as a system-wide mutex for the port block and is
// implemented as a TCP listener which is bound to the firstPort and which will
// be automatically released when the application terminates.
func alloc() (int, net.Listener) {
for i := 0; i < attempts; i++ {
block := int(rand.Int31n(int32(maxBlocks)))
firstPort := lowPort + block*blockSize
ln, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", firstPort))
if err != nil {
continue
}
// log.Printf("[DEBUG] freeport: allocated port block %d (%d-%d)", block, firstPort, firstPort+blockSize-1)
return firstPort, ln
}
panic("freeport: cannot allocate port block")
}
func tcpAddr(ip string, port int) *net.TCPAddr {
return &net.TCPAddr{IP: net.ParseIP(ip), Port: port}
}
// Get wraps the Free function and panics on any failure retrieving ports.
func Get(n int) (ports []int) {
ports, err := Free(n)
if err != nil {
panic(err)
}
return ports
}
// GetT is suitable for use when retrieving unused ports in tests. If there is
// an error retrieving free ports, the test will be failed.
func GetT(t testing.T, n int) (ports []int) {
ports, err := Free(n)
if err != nil {
t.Fatalf("Failed retrieving free port: %v", err)
}
return ports
}
// Free returns a list of free ports from the allocated port block. It is safe
// to call this method concurrently. Ports have been tested to be available on
// 127.0.0.1 TCP but there is no guarantee that they will remain free in the
// future.
func Free(n int) (ports []int, err error) {
mu.Lock()
defer mu.Unlock()
if n > blockSize-1 {
return nil, fmt.Errorf("freeport: block size too small")
}
// Reserve a port block
once.Do(initialize)
for len(ports) < n {
port++
// roll-over the port
if port < firstPort+1 || port >= firstPort+blockSize {
port = firstPort + 1
}
// if the port is in use then skip it
ln, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", port))
if err != nil {
// log.Println("[DEBUG] freeport: port already in use: ", port)
continue
}
ln.Close()
ports = append(ports, port)
}
// log.Println("[DEBUG] freeport: free ports:", ports)
return ports, nil
}

Some files were not shown because too many files have changed in this diff Show More