diff --git a/agent/agent.go b/agent/agent.go index b6c0286e7..1c421587b 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -1131,6 +1131,7 @@ func (a *Agent) consulConfig() (*consul.Config, error) { } // Setup the loggers + base.LogLevel = a.config.LogLevel base.LogOutput = a.LogOutput // This will set up the LAN keyring, as well as the WAN and any segments diff --git a/agent/consul/config.go b/agent/consul/config.go index c55368590..491bdeb4f 100644 --- a/agent/consul/config.go +++ b/agent/consul/config.go @@ -147,6 +147,9 @@ type Config struct { // leader election. ReconcileInterval time.Duration + // LogLevel is the level of the logs to write. Defaults to "INFO". + LogLevel string + // LogOutput is the location to write logs to. If this is not set, // logs will go to stderr. LogOutput io.Writer diff --git a/agent/consul/leader.go b/agent/consul/leader.go index 8636e3038..ce8ae41ab 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -126,6 +126,21 @@ func (s *Server) monitorLeadership() { } } +func (s *Server) leadershipTransfer() error { + retryCount := 3 + for i := 0; i < retryCount; i++ { + future := s.raft.LeadershipTransfer() + if err := future.Error(); err != nil { + s.logger.Printf("[ERR] consul: failed to transfer leadership attempt %d/%d: %v", i, retryCount, err) + } else { + s.logger.Printf("[ERR] consul: successfully transferred leadership attempt %d/%d", i, retryCount) + return nil + } + + } + return fmt.Errorf("failed to transfer leadership in %d attempts", retryCount) +} + // leaderLoop runs as long as we are the leader to run various // maintenance activities func (s *Server) leaderLoop(stopCh chan struct{}) { @@ -142,19 +157,6 @@ func (s *Server) leaderLoop(stopCh chan struct{}) { var reconcileCh chan serf.Member establishedLeader := false - reassert := func() error { - if !establishedLeader { - return fmt.Errorf("leadership has not been established") - } - if err := s.revokeLeadership(); err != nil { - return err - } - if err := s.establishLeadership(); err != nil { - return err - } - return nil - } - RECONCILE: // Setup a reconciliation timer reconcileCh = nil @@ -175,17 +177,22 @@ RECONCILE: s.logger.Printf("[ERR] consul: failed to establish leadership: %v", err) // Immediately revoke leadership since we didn't successfully // establish leadership. - if err := s.revokeLeadership(); err != nil { - s.logger.Printf("[ERR] consul: failed to revoke leadership: %v", err) + s.revokeLeadership() + + // attempt to transfer leadership. If successful it is + // time to leave the leaderLoop since this node is no + // longer the leader. If leadershipTransfer() fails, we + // will try to acquire it again after + // 5 seconds. + if err := s.leadershipTransfer(); err != nil { + s.logger.Printf("[ERR] consul: %v", err) + interval = time.After(5 * time.Second) + goto WAIT } - goto WAIT + return } establishedLeader = true - defer func() { - if err := s.revokeLeadership(); err != nil { - s.logger.Printf("[ERR] consul: failed to revoke leadership: %v", err) - } - }() + defer s.revokeLeadership() } // Reconcile any missing data @@ -223,7 +230,47 @@ WAIT: case index := <-s.tombstoneGC.ExpireCh(): go s.reapTombstones(index) case errCh := <-s.reassertLeaderCh: - errCh <- reassert() + // we can get into this state when the initial + // establishLeadership has failed as well as the follow + // up leadershipTransfer. Afterwards we will be waiting + // for the interval to trigger a reconciliation and can + // potentially end up here. There is no point to + // reassert because this agent was never leader in the + // first place. + if !establishedLeader { + errCh <- fmt.Errorf("leadership has not been established") + continue + } + + // continue to reassert only if we previously were the + // leader, which means revokeLeadership followed by an + // establishLeadership(). + s.revokeLeadership() + err := s.establishLeadership() + errCh <- err + + // in case establishLeadership failed, we will try to + // transfer leadership. At this time raft thinks we are + // the leader, but consul disagrees. + if err != nil { + if err := s.leadershipTransfer(); err != nil { + // establishedLeader was true before, + // but it no longer is since it revoked + // leadership and Leadership transfer + // also failed. Which is why it stays + // in the leaderLoop, but now + // establishedLeader needs to be set to + // false. + establishedLeader = false + interval = time.After(5 * time.Second) + goto WAIT + } + + // leadershipTransfer was successful and it is + // time to leave the leaderLoop. + return + } + } } } @@ -290,15 +337,13 @@ func (s *Server) establishLeadership() error { // revokeLeadership is invoked once we step down as leader. // This is used to cleanup any state that may be specific to a leader. -func (s *Server) revokeLeadership() error { +func (s *Server) revokeLeadership() { // Disable the tombstone GC, since it is only useful as a leader s.tombstoneGC.SetEnabled(false) // Clear the session timers on either shutdown or step down, since we // are no longer responsible for session expirations. - if err := s.clearAllSessionTimers(); err != nil { - return err - } + s.clearAllSessionTimers() s.stopConfigReplication() @@ -313,8 +358,8 @@ func (s *Server) revokeLeadership() error { s.stopACLUpgrade() s.resetConsistentReadReady() + s.autopilot.Stop() - return nil } // DEPRECATED (ACL-Legacy-Compat) - Remove once old ACL compatibility is removed diff --git a/agent/consul/server.go b/agent/consul/server.go index 8f2070d2a..2e1d7e075 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -32,6 +32,7 @@ import ( "github.com/hashicorp/consul/sentinel" "github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/consul/types" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/raft" raftboltdb "github.com/hashicorp/raft-boltdb" "github.com/hashicorp/serf/serf" @@ -548,7 +549,13 @@ func (s *Server) setupRaft() error { // Make sure we set the LogOutput. s.config.RaftConfig.LogOutput = s.config.LogOutput - s.config.RaftConfig.Logger = s.logger + raftLogger := hclog.New(&hclog.LoggerOptions{ + Name: "raft", + Level: hclog.LevelFromString(s.config.LogLevel), + Output: s.config.LogOutput, + TimeFormat: `2006/01/02 15:04:05`, + }) + s.config.RaftConfig.Logger = raftLogger // Versions of the Raft protocol below 3 require the LocalID to match the network // address of the transport. diff --git a/agent/consul/server_test.go b/agent/consul/server_test.go index 507e5a69d..f79a27aee 100644 --- a/agent/consul/server_test.go +++ b/agent/consul/server_test.go @@ -963,14 +963,8 @@ func TestServer_RevokeLeadershipIdempotent(t *testing.T) { testrpc.WaitForLeader(t, s1.RPC, "dc1") - err := s1.revokeLeadership() - if err != nil { - t.Fatal(err) - } - err = s1.revokeLeadership() - if err != nil { - t.Fatal(err) - } + s1.revokeLeadership() + s1.revokeLeadership() } func TestServer_Reload(t *testing.T) { diff --git a/agent/consul/session_ttl.go b/agent/consul/session_ttl.go index 71265f0b1..fd12701fb 100644 --- a/agent/consul/session_ttl.go +++ b/agent/consul/session_ttl.go @@ -122,9 +122,8 @@ func (s *Server) clearSessionTimer(id string) error { // clearAllSessionTimers is used when a leader is stepping // down and we no longer need to track any session timers. -func (s *Server) clearAllSessionTimers() error { +func (s *Server) clearAllSessionTimers() { s.sessionTimers.StopAll() - return nil } // sessionStats is a long running routine used to capture diff --git a/agent/consul/session_ttl_test.go b/agent/consul/session_ttl_test.go index ada7a1a69..dfa1b32e5 100644 --- a/agent/consul/session_ttl_test.go +++ b/agent/consul/session_ttl_test.go @@ -281,10 +281,7 @@ func TestClearAllSessionTimers(t *testing.T) { s1.createSessionTimer("bar", 10*time.Millisecond) s1.createSessionTimer("baz", 10*time.Millisecond) - err := s1.clearAllSessionTimers() - if err != nil { - t.Fatalf("err: %v", err) - } + s1.clearAllSessionTimers() // sessionTimers is guarded by the lock if s1.sessionTimers.Len() != 0 { diff --git a/go.mod b/go.mod index bf28ab90c..44dd71263 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,6 @@ replace github.com/hashicorp/consul/sdk => ./sdk require ( github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect github.com/Azure/go-autorest v10.15.3+incompatible // indirect - github.com/DataDog/datadog-go v0.0.0-20160329135253-cc2f4770f4d6 // indirect github.com/Jeffail/gabs v1.1.0 // indirect github.com/Microsoft/go-winio v0.4.3 // indirect github.com/NYTimes/gziphandler v1.0.1 @@ -18,16 +17,13 @@ require ( github.com/SermoDigital/jose v0.0.0-20180104203859-803625baeddc // indirect github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e - github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da + github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 github.com/asaskevich/govalidator v0.0.0-20180319081651-7d2e70ef918f // indirect - github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect github.com/boltdb/bolt v1.3.1 // indirect github.com/cenkalti/backoff v2.1.1+incompatible // indirect - github.com/circonus-labs/circonus-gometrics v0.0.0-20161109192337-d17a8420c36e // indirect - github.com/circonus-labs/circonusllhist v0.0.0-20161110002650-365d370cc145 // indirect github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect github.com/coredns/coredns v1.1.2 github.com/denisenkom/go-mssqldb v0.0.0-20180620032804-94c9c97e8c9f // indirect @@ -58,12 +54,11 @@ require ( github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-discover v0.0.0-20190403160810-22221edb15cd - github.com/hashicorp/go-hclog v0.0.0-20180402200405-69ff559dc25f // indirect + github.com/hashicorp/go-hclog v0.9.1 github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71 - github.com/hashicorp/go-msgpack v0.5.4 + github.com/hashicorp/go-msgpack v0.5.5 github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/go-plugin v0.0.0-20180331002553-e8d22c780116 - github.com/hashicorp/go-retryablehttp v0.0.0-20180531211321-3b087ef2d313 // indirect github.com/hashicorp/go-rootcerts v1.0.0 github.com/hashicorp/go-sockaddr v1.0.0 github.com/hashicorp/go-syslog v1.0.0 @@ -76,7 +71,7 @@ require ( github.com/hashicorp/mdns v1.0.1 // indirect github.com/hashicorp/memberlist v0.1.4 github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 - github.com/hashicorp/raft v1.0.1-0.20190409200437-d9fe23f7d472 + github.com/hashicorp/raft v1.1.0 github.com/hashicorp/raft-boltdb v0.0.0-20150201200839-d1e82c1ec3f1 github.com/hashicorp/serf v0.8.2 github.com/hashicorp/vault v0.10.3 @@ -89,7 +84,6 @@ require ( github.com/kr/text v0.1.0 github.com/lib/pq v0.0.0-20180523175426-90697d60dd84 // indirect github.com/lyft/protoc-gen-validate v0.0.0-20180911180927-64fcb82c878e // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/miekg/dns v1.0.14 github.com/mitchellh/cli v1.0.0 github.com/mitchellh/copystructure v0.0.0-20160804032330-cdac8253d00f @@ -103,13 +97,10 @@ require ( github.com/opencontainers/image-spec v1.0.1 // indirect github.com/opencontainers/runc v0.1.1 // indirect github.com/ory/dockertest v3.3.4+incompatible // indirect - github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c + github.com/pascaldekloe/goe v0.1.0 github.com/patrickmn/go-cache v0.0.0-20180527043350-9f6ff22cfff8 // indirect github.com/pkg/errors v0.8.1 - github.com/prometheus/client_golang v0.0.0-20180328130430-f504d69affe1 - github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 // indirect - github.com/prometheus/common v0.0.0-20180326160409-38c53a9f4bfc // indirect - github.com/prometheus/procfs v0.0.0-20180408092902-8b1c2da0d56d // indirect + github.com/prometheus/client_golang v0.9.2 github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735 // indirect github.com/shirou/gopsutil v0.0.0-20181107111621-48177ef5f880 diff --git a/go.sum b/go.sum index 2f0c956a7..c09867f65 100644 --- a/go.sum +++ b/go.sum @@ -10,6 +10,8 @@ github.com/Azure/go-autorest v10.15.3+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxS github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go v0.0.0-20160329135253-cc2f4770f4d6 h1:veThyuVPIg0cAHly135Y+IW2ymFgZ1pftOyAVkqCoi8= github.com/DataDog/datadog-go v0.0.0-20160329135253-cc2f4770f4d6/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4= +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Jeffail/gabs v1.1.0 h1:kw5zCcl9tlJNHTDme7qbi21fDHZmXrnjMoXos3Jw/NI= github.com/Jeffail/gabs v1.1.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= github.com/Microsoft/go-winio v0.4.3 h1:M3NHMuPgMSUPdE5epwNUHlRPSVzHs8HpRTrVXhR0myo= @@ -30,6 +32,8 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1 github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20180319081651-7d2e70ef918f h1:/8NcnxL60YFll4ehCwibKotx0BR9v2ND40fomga8qDs= @@ -50,8 +54,12 @@ github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1q github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/circonus-labs/circonus-gometrics v0.0.0-20161109192337-d17a8420c36e h1:VhMcRhkS/wJM+XfZxNn+tk5EVmF2k19g6yS6uDXHn0o= github.com/circonus-labs/circonus-gometrics v0.0.0-20161109192337-d17a8420c36e/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.0.0-20161110002650-365d370cc145 h1:cwLvX6r5EOiVmkUYjY+Ev4ZJrkOhex3r+mfeqT8o+8c= github.com/circonus-labs/circonusllhist v0.0.0-20161110002650-365d370cc145/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -148,12 +156,15 @@ github.com/hashicorp/go-bexpr v0.1.0 h1:hA/9CWGPsQ6YZXvPvizD+VEEjBG4V6Un0Qcyav5g github.com/hashicorp/go-bexpr v0.1.0/go.mod h1:ANbpTX1oAql27TZkKVeW8p1w8NTdnyzPe/0qqPCKohU= github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de h1:XDCSythtg8aWSRSO29uwhgh7b127fWr+m5SemqjSUL8= github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de/go.mod h1:xIwEieBHERyEvaeKF/TcHh1Hu+lxPM+n2vT1+g9I4m4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-discover v0.0.0-20190403160810-22221edb15cd h1:SynRxs8h2h7lLSA5py5a3WWkYpImhREtju0CuRd97wc= github.com/hashicorp/go-discover v0.0.0-20190403160810-22221edb15cd/go.mod h1:ueUgD9BeIocT7QNuvxSyJyPAM9dfifBcaWmeybb67OY= github.com/hashicorp/go-hclog v0.0.0-20180402200405-69ff559dc25f h1:t34t/ySFIGsPOLQ/dCcKeCoErlqhXlNLYvPn7mVogzo= github.com/hashicorp/go-hclog v0.0.0-20180402200405-69ff559dc25f/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM= +github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71 h1:yxxFgVz31vFoKKTtRUNbXLNe4GFnbLKqg+0N7yG42L8= @@ -161,12 +172,16 @@ github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71/go.mod h1:kbfIt github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.4 h1:SFT72YqIkOcLdWJUYcriVX7hbrZpwc/f7h8aW2NUqrA= github.com/hashicorp/go-msgpack v0.5.4/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-plugin v0.0.0-20180331002553-e8d22c780116 h1:Y4V/yReWjQo/Ngyc0w6C3EKXKincp4YgvXeo8lI4LrI= github.com/hashicorp/go-plugin v0.0.0-20180331002553-e8d22c780116/go.mod h1:JSqWYsict+jzcj0+xElxyrBQRPNoiWQuddnxArJ7XHQ= github.com/hashicorp/go-retryablehttp v0.0.0-20180531211321-3b087ef2d313 h1:8YjGfJRRXO9DA6RG0wNt3kEkvvnxIDao5us1PG+S0wc= github.com/hashicorp/go-retryablehttp v0.0.0-20180531211321-3b087ef2d313/go.mod h1:fXcdFsQoipQa7mwORhKad5jmDCeSy/RCGzWA08PO0lM= +github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= @@ -203,6 +218,8 @@ github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 h1:lc github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69/go.mod h1:/z+jUGRBlwVpUZfjute9jWaF6/HuhjuFQuL1YXzVD1Q= github.com/hashicorp/raft v1.0.1-0.20190409200437-d9fe23f7d472 h1:9EPzHJ1bJFaFbGOz3UV3DDFmGYANr+SF+eapmiK5zV4= github.com/hashicorp/raft v1.0.1-0.20190409200437-d9fe23f7d472/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= +github.com/hashicorp/raft v1.1.0 h1:qPMePEczgbkiQsqCsRfuHRqvDUO+zmAInDaD5ptXlq0= +github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft-boltdb v0.0.0-20150201200839-d1e82c1ec3f1 h1:LHTrLUnNkk+2YkO5EMG49q0lHdR9AZhDbCpu0+M3e0E= github.com/hashicorp/raft-boltdb v0.0.0-20150201200839-d1e82c1ec3f1/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= @@ -296,6 +313,8 @@ github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8D github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v0.0.0-20180527043350-9f6ff22cfff8 h1:BR6MM54q4W9pn0SySwg6yctZtBKlTdUq6a+b0kArBnE= github.com/patrickmn/go-cache v0.0.0-20180527043350-9f6ff22cfff8/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= @@ -310,12 +329,20 @@ github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5 github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.0.0-20180328130430-f504d69affe1 h1:jtnwVoXwppTtQ4ApMgCb+G5CcW8OUvLlprWpB+x3e+8= github.com/prometheus/client_golang v0.0.0-20180328130430-f504d69affe1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 h1:cLL6NowurKLMfCeQy4tIeph12XNQWgANCNvdyrOYKV4= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/common v0.0.0-20180326160409-38c53a9f4bfc h1:tyg3EcZAmwCUe90Jzl4Qw6Af+ajuW8S9b1VFitMNOQs= github.com/prometheus/common v0.0.0-20180326160409-38c53a9f4bfc/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/procfs v0.0.0-20180408092902-8b1c2da0d56d h1:RCcsxyRr6+/pLg6wr0cUjPovhEhSNOtPh0SOz6u3hGU= github.com/prometheus/procfs v0.0.0-20180408092902-8b1c2da0d56d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 h1:Wdi9nwnhFNAlseAOekn6B5G/+GMtks9UKbvRU/CMM/o= github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f h1:UFr9zpz4xgTnIE5yIMtWAMngCdZ9p/+q6lTbgelo80M= @@ -349,6 +376,8 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/tent/http-link-go v0.0.0-20130702225549-ac974c61c2f9 h1:/Bsw4C+DEdqPjt8vAqaC9LAqpAQnaCQQqmolqq3S1T4= github.com/tent/http-link-go v0.0.0-20130702225549-ac974c61c2f9/go.mod h1:RHkNRtSLfOK7qBTHaeSX1D6BNpI3qw7NTxsmNr4RvN8= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/vmware/govmomi v0.18.0 h1:f7QxSmP7meCtoAmiKZogvVbLInT+CZx6Px6K5rYsJZo= github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs= @@ -372,6 +401,7 @@ golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95/go.mod h1:N/0e6XlmueqKjAG golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/DataDog/datadog-go/statsd/README.md b/vendor/github.com/DataDog/datadog-go/statsd/README.md index c3b462f85..a2bca43b9 100644 --- a/vendor/github.com/DataDog/datadog-go/statsd/README.md +++ b/vendor/github.com/DataDog/datadog-go/statsd/README.md @@ -3,43 +3,3 @@ Package `statsd` provides a Go [dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/) client. Dogstatsd extends Statsd, adding tags and histograms. -## Get the code - - $ go get github.com/DataDog/datadog-go/statsd - -## Usage - -```go -// Create the client -c, err := statsd.New("127.0.0.1:8125") -if err != nil { - log.Fatal(err) -} -// Prefix every metric with the app name -c.Namespace = "flubber." -// Send the EC2 availability zone as a tag with every metric -c.Tags = append(c.Tags, "us-east-1a") -err = c.Gauge("request.duration", 1.2, nil, 1) -``` - -## Buffering Client - -Dogstatsd accepts packets with multiple statsd payloads in them. Using the BufferingClient via `NewBufferingClient` will buffer up commands and send them when the buffer is reached or after 100msec. - -## Development - -Run the tests with: - - $ go test - -## Documentation - -Please see: http://godoc.org/github.com/DataDog/datadog-go/statsd - -## License - -go-dogstatsd is released under the [MIT license](http://www.opensource.org/licenses/mit-license.php). - -## Credits - -Original code by [ooyala](https://github.com/ooyala/go-dogstatsd). diff --git a/vendor/github.com/DataDog/datadog-go/statsd/options.go b/vendor/github.com/DataDog/datadog-go/statsd/options.go new file mode 100644 index 000000000..2c5a59cd5 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/options.go @@ -0,0 +1,109 @@ +package statsd + +import "time" + +var ( + // DefaultNamespace is the default value for the Namespace option + DefaultNamespace = "" + // DefaultTags is the default value for the Tags option + DefaultTags = []string{} + // DefaultBuffered is the default value for the Buffered option + DefaultBuffered = false + // DefaultMaxMessagesPerPayload is the default value for the MaxMessagesPerPayload option + DefaultMaxMessagesPerPayload = 16 + // DefaultAsyncUDS is the default value for the AsyncUDS option + DefaultAsyncUDS = false + // DefaultWriteTimeoutUDS is the default value for the WriteTimeoutUDS option + DefaultWriteTimeoutUDS = 1 * time.Millisecond +) + +// Options contains the configuration options for a client. +type Options struct { + // Namespace to prepend to all metrics, events and service checks name. + Namespace string + // Tags are global tags to be applied to every metrics, events and service checks. + Tags []string + // Buffered allows to pack multiple DogStatsD messages in one payload. Messages will be buffered + // until the total size of the payload exceeds MaxMessagesPerPayload metrics, events and/or service + // checks or after 100ms since the payload startedto be built. + Buffered bool + // MaxMessagesPerPayload is the maximum number of metrics, events and/or service checks a single payload will contain. + // Note that this option only takes effect when the client is buffered. + MaxMessagesPerPayload int + // AsyncUDS allows to switch between async and blocking mode for UDS. + // Blocking mode allows for error checking but does not guarentee that calls won't block the execution. + AsyncUDS bool + // WriteTimeoutUDS is the timeout after which a UDS packet is dropped. + WriteTimeoutUDS time.Duration +} + +func resolveOptions(options []Option) (*Options, error) { + o := &Options{ + Namespace: DefaultNamespace, + Tags: DefaultTags, + Buffered: DefaultBuffered, + MaxMessagesPerPayload: DefaultMaxMessagesPerPayload, + AsyncUDS: DefaultAsyncUDS, + WriteTimeoutUDS: DefaultWriteTimeoutUDS, + } + + for _, option := range options { + err := option(o) + if err != nil { + return nil, err + } + } + + return o, nil +} + +// Option is a client option. Can return an error if validation fails. +type Option func(*Options) error + +// WithNamespace sets the Namespace option. +func WithNamespace(namespace string) Option { + return func(o *Options) error { + o.Namespace = namespace + return nil + } +} + +// WithTags sets the Tags option. +func WithTags(tags []string) Option { + return func(o *Options) error { + o.Tags = tags + return nil + } +} + +// Buffered sets the Buffered option. +func Buffered() Option { + return func(o *Options) error { + o.Buffered = true + return nil + } +} + +// WithMaxMessagesPerPayload sets the MaxMessagesPerPayload option. +func WithMaxMessagesPerPayload(maxMessagesPerPayload int) Option { + return func(o *Options) error { + o.MaxMessagesPerPayload = maxMessagesPerPayload + return nil + } +} + +// WithAsyncUDS sets the AsyncUDS option. +func WithAsyncUDS() Option { + return func(o *Options) error { + o.AsyncUDS = true + return nil + } +} + +// WithWriteTimeoutUDS sets the WriteTimeoutUDS option. +func WithWriteTimeoutUDS(writeTimeoutUDS time.Duration) Option { + return func(o *Options) error { + o.WriteTimeoutUDS = writeTimeoutUDS + return nil + } +} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/statsd.go b/vendor/github.com/DataDog/datadog-go/statsd/statsd.go index 2f46b3cf0..71a113cfc 100644 --- a/vendor/github.com/DataDog/datadog-go/statsd/statsd.go +++ b/vendor/github.com/DataDog/datadog-go/statsd/statsd.go @@ -27,8 +27,9 @@ import ( "bytes" "errors" "fmt" + "io" "math/rand" - "net" + "os" "strconv" "strings" "sync" @@ -54,104 +55,209 @@ any number greater than that will see frames being cut out. */ const MaxUDPPayloadSize = 65467 -// A Client is a handle for sending udp messages to dogstatsd. It is safe to +/* +UnixAddressPrefix holds the prefix to use to enable Unix Domain Socket +traffic instead of UDP. +*/ +const UnixAddressPrefix = "unix://" + +// Client-side entity ID injection for container tagging +const ( + entityIDEnvName = "DD_ENTITY_ID" + entityIDTagName = "dd.internal.entity_id" +) + +/* +Stat suffixes +*/ +var ( + gaugeSuffix = []byte("|g") + countSuffix = []byte("|c") + histogramSuffix = []byte("|h") + distributionSuffix = []byte("|d") + decrSuffix = []byte("-1|c") + incrSuffix = []byte("1|c") + setSuffix = []byte("|s") + timingSuffix = []byte("|ms") +) + +// A statsdWriter offers a standard interface regardless of the underlying +// protocol. For now UDS and UPD writers are available. +type statsdWriter interface { + Write(data []byte) (n int, err error) + SetWriteTimeout(time.Duration) error + Close() error +} + +// A Client is a handle for sending messages to dogstatsd. It is safe to // use one Client from multiple goroutines simultaneously. type Client struct { - conn net.Conn + // Writer handles the underlying networking protocol + writer statsdWriter // Namespace to prepend to all statsd calls Namespace string // Tags are global tags to be added to every statsd call Tags []string + // skipErrors turns off error passing and allows UDS to emulate UDP behaviour + SkipErrors bool // BufferLength is the length of the buffer in commands. bufferLength int flushTime time.Duration - commands []string + commands [][]byte buffer bytes.Buffer - stop bool + stop chan struct{} sync.Mutex } -// New returns a pointer to a new Client given an addr in the format "hostname:port". -func New(addr string) (*Client, error) { - udpAddr, err := net.ResolveUDPAddr("udp", addr) +// New returns a pointer to a new Client given an addr in the format "hostname:port" or +// "unix:///path/to/socket". +func New(addr string, options ...Option) (*Client, error) { + o, err := resolveOptions(options) if err != nil { return nil, err } - conn, err := net.DialUDP("udp", nil, udpAddr) + + var w statsdWriter + + if !strings.HasPrefix(addr, UnixAddressPrefix) { + w, err = newUDPWriter(addr) + } else if o.AsyncUDS { + w, err = newAsyncUdsWriter(addr[len(UnixAddressPrefix)-1:]) + } else { + w, err = newBlockingUdsWriter(addr[len(UnixAddressPrefix)-1:]) + } if err != nil { return nil, err } - client := &Client{conn: conn} + w.SetWriteTimeout(o.WriteTimeoutUDS) + + c := Client{ + Namespace: o.Namespace, + Tags: o.Tags, + writer: w, + } + + // Inject DD_ENTITY_ID as a constant tag if found + entityID := os.Getenv(entityIDEnvName) + if entityID != "" { + entityTag := fmt.Sprintf("%s:%s", entityIDTagName, entityID) + c.Tags = append(c.Tags, entityTag) + } + + if o.Buffered { + c.bufferLength = o.MaxMessagesPerPayload + c.commands = make([][]byte, 0, o.MaxMessagesPerPayload) + c.flushTime = time.Millisecond * 100 + c.stop = make(chan struct{}, 1) + go c.watch() + } + + return &c, nil +} + +// NewWithWriter creates a new Client with given writer. Writer is a +// io.WriteCloser + SetWriteTimeout(time.Duration) error +func NewWithWriter(w statsdWriter) (*Client, error) { + client := &Client{writer: w, SkipErrors: false} + + // Inject DD_ENTITY_ID as a constant tag if found + entityID := os.Getenv(entityIDEnvName) + if entityID != "" { + entityTag := fmt.Sprintf("%s:%s", entityIDTagName, entityID) + client.Tags = append(client.Tags, entityTag) + } + return client, nil } // NewBuffered returns a Client that buffers its output and sends it in chunks. // Buflen is the length of the buffer in number of commands. +// +// When addr is empty, the client will default to a UDP client and use the DD_AGENT_HOST +// and (optionally) the DD_DOGSTATSD_PORT environment variables to build the target address. func NewBuffered(addr string, buflen int) (*Client, error) { - client, err := New(addr) - if err != nil { - return nil, err - } - client.bufferLength = buflen - client.commands = make([]string, 0, buflen) - client.flushTime = time.Millisecond * 100 - go client.watch() - return client, nil + return New(addr, Buffered(), WithMaxMessagesPerPayload(buflen)) } // format a message from its name, value, tags and rate. Also adds global // namespace and tags. -func (c *Client) format(name, value string, tags []string, rate float64) string { - var buf bytes.Buffer +func (c *Client) format(name string, value interface{}, suffix []byte, tags []string, rate float64) []byte { + // preallocated buffer, stack allocated as long as it doesn't escape + buf := make([]byte, 0, 200) + if c.Namespace != "" { - buf.WriteString(c.Namespace) + buf = append(buf, c.Namespace...) } - buf.WriteString(name) - buf.WriteString(":") - buf.WriteString(value) + buf = append(buf, name...) + buf = append(buf, ':') + + switch val := value.(type) { + case float64: + buf = strconv.AppendFloat(buf, val, 'f', 6, 64) + + case int64: + buf = strconv.AppendInt(buf, val, 10) + + case string: + buf = append(buf, val...) + + default: + // do nothing + } + buf = append(buf, suffix...) + if rate < 1 { - buf.WriteString(`|@`) - buf.WriteString(strconv.FormatFloat(rate, 'f', -1, 64)) + buf = append(buf, "|@"...) + buf = strconv.AppendFloat(buf, rate, 'f', -1, 64) } - tags = append(c.Tags, tags...) - if len(tags) > 0 { - buf.WriteString("|#") - buf.WriteString(tags[0]) - for _, tag := range tags[1:] { - buf.WriteString(",") - buf.WriteString(tag) - } + buf = appendTagString(buf, c.Tags, tags) + + // non-zeroing copy to avoid referencing a larger than necessary underlying array + return append([]byte(nil), buf...) +} + +// SetWriteTimeout allows the user to set a custom UDS write timeout. Not supported for UDP. +func (c *Client) SetWriteTimeout(d time.Duration) error { + if c == nil { + return fmt.Errorf("Client is nil") } - return buf.String() + return c.writer.SetWriteTimeout(d) } func (c *Client) watch() { - for _ = range time.Tick(c.flushTime) { - if c.stop { + ticker := time.NewTicker(c.flushTime) + + for { + select { + case <-ticker.C: + c.Lock() + if len(c.commands) > 0 { + // FIXME: eating error here + c.flushLocked() + } + c.Unlock() + case <-c.stop: + ticker.Stop() return } - c.Lock() - if len(c.commands) > 0 { - // FIXME: eating error here - c.flush() - } - c.Unlock() } } -func (c *Client) append(cmd string) error { +func (c *Client) append(cmd []byte) error { + c.Lock() + defer c.Unlock() c.commands = append(c.commands, cmd) // if we should flush, lets do it if len(c.commands) == c.bufferLength { - if err := c.flush(); err != nil { + if err := c.flushLocked(); err != nil { return err } } return nil } -func (c *Client) joinMaxSize(cmds []string, sep string, maxSize int) ([][]byte, []int) { +func (c *Client) joinMaxSize(cmds [][]byte, sep string, maxSize int) ([][]byte, []int) { c.buffer.Reset() //clear buffer var frames [][]byte @@ -171,13 +277,13 @@ func (c *Client) joinMaxSize(cmds []string, sep string, maxSize int) ([][]byte, if elem != 0 { c.buffer.Write(sepBytes) } - c.buffer.WriteString(cmd) + c.buffer.Write(cmd) elem++ } else { frames = append(frames, copyAndResetBuffer(&c.buffer)) ncmds = append(ncmds, elem) // if cmd is bigger than maxSize it will get flushed on next loop - c.buffer.WriteString(cmd) + c.buffer.Write(cmd) elem = 1 } } @@ -198,13 +304,23 @@ func copyAndResetBuffer(buf *bytes.Buffer) []byte { return tmpBuf } +// Flush forces a flush of the pending commands in the buffer +func (c *Client) Flush() error { + if c == nil { + return fmt.Errorf("Client is nil") + } + c.Lock() + defer c.Unlock() + return c.flushLocked() +} + // flush the commands in the buffer. Lock must be held by caller. -func (c *Client) flush() error { +func (c *Client) flushLocked() error { frames, flushable := c.joinMaxSize(c.commands, "\n", OptimalPayloadSize) var err error cmdsFlushed := 0 for i, data := range frames { - _, e := c.conn.Write(data) + _, e := c.writer.Write(data) if e != nil { err = e break @@ -223,71 +339,93 @@ func (c *Client) flush() error { return err } -func (c *Client) sendMsg(msg string) error { +func (c *Client) sendMsg(msg []byte) error { + // return an error if message is bigger than MaxUDPPayloadSize + if len(msg) > MaxUDPPayloadSize { + return errors.New("message size exceeds MaxUDPPayloadSize") + } + // if this client is buffered, then we'll just append this - c.Lock() - defer c.Unlock() if c.bufferLength > 0 { - // return an error if message is bigger than OptimalPayloadSize - if len(msg) > MaxUDPPayloadSize { - return errors.New("message size exceeds MaxUDPPayloadSize") - } return c.append(msg) } - _, err := c.conn.Write([]byte(msg)) + + _, err := c.writer.Write(msg) + + if c.SkipErrors { + return nil + } return err } // send handles sampling and sends the message over UDP. It also adds global namespace prefixes and tags. -func (c *Client) send(name, value string, tags []string, rate float64) error { +func (c *Client) send(name string, value interface{}, suffix []byte, tags []string, rate float64) error { if c == nil { - return nil + return fmt.Errorf("Client is nil") } if rate < 1 && rand.Float64() > rate { return nil } - data := c.format(name, value, tags, rate) + data := c.format(name, value, suffix, tags, rate) return c.sendMsg(data) } // Gauge measures the value of a metric at a particular time. func (c *Client) Gauge(name string, value float64, tags []string, rate float64) error { - stat := fmt.Sprintf("%f|g", value) - return c.send(name, stat, tags, rate) + return c.send(name, value, gaugeSuffix, tags, rate) } // Count tracks how many times something happened per second. func (c *Client) Count(name string, value int64, tags []string, rate float64) error { - stat := fmt.Sprintf("%d|c", value) - return c.send(name, stat, tags, rate) + return c.send(name, value, countSuffix, tags, rate) } -// Histogram tracks the statistical distribution of a set of values. +// Histogram tracks the statistical distribution of a set of values on each host. func (c *Client) Histogram(name string, value float64, tags []string, rate float64) error { - stat := fmt.Sprintf("%f|h", value) - return c.send(name, stat, tags, rate) + return c.send(name, value, histogramSuffix, tags, rate) +} + +// Distribution tracks the statistical distribution of a set of values across your infrastructure. +func (c *Client) Distribution(name string, value float64, tags []string, rate float64) error { + return c.send(name, value, distributionSuffix, tags, rate) +} + +// Decr is just Count of -1 +func (c *Client) Decr(name string, tags []string, rate float64) error { + return c.send(name, nil, decrSuffix, tags, rate) +} + +// Incr is just Count of 1 +func (c *Client) Incr(name string, tags []string, rate float64) error { + return c.send(name, nil, incrSuffix, tags, rate) } // Set counts the number of unique elements in a group. func (c *Client) Set(name string, value string, tags []string, rate float64) error { - stat := fmt.Sprintf("%s|s", value) - return c.send(name, stat, tags, rate) + return c.send(name, value, setSuffix, tags, rate) +} + +// Timing sends timing information, it is an alias for TimeInMilliseconds +func (c *Client) Timing(name string, value time.Duration, tags []string, rate float64) error { + return c.TimeInMilliseconds(name, value.Seconds()*1000, tags, rate) } // TimeInMilliseconds sends timing information in milliseconds. // It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing) func (c *Client) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error { - stat := fmt.Sprintf("%f|ms", value) - return c.send(name, stat, tags, rate) + return c.send(name, value, timingSuffix, tags, rate) } // Event sends the provided Event. func (c *Client) Event(e *Event) error { + if c == nil { + return fmt.Errorf("Client is nil") + } stat, err := e.Encode(c.Tags...) if err != nil { return err } - return c.sendMsg(stat) + return c.sendMsg([]byte(stat)) } // SimpleEvent sends an event with the provided title and text. @@ -296,37 +434,70 @@ func (c *Client) SimpleEvent(title, text string) error { return c.Event(e) } +// ServiceCheck sends the provided ServiceCheck. +func (c *Client) ServiceCheck(sc *ServiceCheck) error { + if c == nil { + return fmt.Errorf("Client is nil") + } + stat, err := sc.Encode(c.Tags...) + if err != nil { + return err + } + return c.sendMsg([]byte(stat)) +} + +// SimpleServiceCheck sends an serviceCheck with the provided name and status. +func (c *Client) SimpleServiceCheck(name string, status ServiceCheckStatus) error { + sc := NewServiceCheck(name, status) + return c.ServiceCheck(sc) +} + // Close the client connection. func (c *Client) Close() error { if c == nil { - return nil + return fmt.Errorf("Client is nil") } - c.stop = true - return c.conn.Close() + select { + case c.stop <- struct{}{}: + default: + } + + // if this client is buffered, flush before closing the writer + if c.bufferLength > 0 { + if err := c.Flush(); err != nil { + return err + } + } + + return c.writer.Close() } // Events support +// EventAlertType and EventAlertPriority became exported types after this issue was submitted: https://github.com/DataDog/datadog-go/issues/41 +// The reason why they got exported is so that client code can directly use the types. -type eventAlertType string +// EventAlertType is the alert type for events +type EventAlertType string const ( // Info is the "info" AlertType for events - Info eventAlertType = "info" + Info EventAlertType = "info" // Error is the "error" AlertType for events - Error eventAlertType = "error" + Error EventAlertType = "error" // Warning is the "warning" AlertType for events - Warning eventAlertType = "warning" + Warning EventAlertType = "warning" // Success is the "success" AlertType for events - Success eventAlertType = "success" + Success EventAlertType = "success" ) -type eventPriority string +// EventPriority is the event priority for events +type EventPriority string const ( // Normal is the "normal" Priority for events - Normal eventPriority = "normal" + Normal EventPriority = "normal" // Low is the "low" Priority for events - Low eventPriority = "low" + Low EventPriority = "low" ) // An Event is an object that can be posted to your DataDog event stream. @@ -343,12 +514,12 @@ type Event struct { // AggregationKey groups this event with others of the same key. AggregationKey string // Priority of the event. Can be statsd.Low or statsd.Normal. - Priority eventPriority + Priority EventPriority // SourceTypeName is a source type for the event. SourceTypeName string // AlertType can be statsd.Info, statsd.Error, statsd.Warning, or statsd.Success. // If absent, the default value applied by the dogstatsd server is Info. - AlertType eventAlertType + AlertType EventAlertType // Tags for the event. Tags []string } @@ -424,16 +595,93 @@ func (e Event) Encode(tags ...string) (string, error) { buffer.WriteString(string(e.AlertType)) } - if len(tags)+len(e.Tags) > 0 { - all := make([]string, 0, len(tags)+len(e.Tags)) - all = append(all, tags...) - all = append(all, e.Tags...) - buffer.WriteString("|#") - buffer.WriteString(all[0]) - for _, tag := range all[1:] { - buffer.WriteString(",") - buffer.WriteString(tag) - } + writeTagString(&buffer, tags, e.Tags) + + return buffer.String(), nil +} + +// ServiceCheckStatus support +type ServiceCheckStatus byte + +const ( + // Ok is the "ok" ServiceCheck status + Ok ServiceCheckStatus = 0 + // Warn is the "warning" ServiceCheck status + Warn ServiceCheckStatus = 1 + // Critical is the "critical" ServiceCheck status + Critical ServiceCheckStatus = 2 + // Unknown is the "unknown" ServiceCheck status + Unknown ServiceCheckStatus = 3 +) + +// An ServiceCheck is an object that contains status of DataDog service check. +type ServiceCheck struct { + // Name of the service check. Required. + Name string + // Status of service check. Required. + Status ServiceCheckStatus + // Timestamp is a timestamp for the serviceCheck. If not provided, the dogstatsd + // server will set this to the current time. + Timestamp time.Time + // Hostname for the serviceCheck. + Hostname string + // A message describing the current state of the serviceCheck. + Message string + // Tags for the serviceCheck. + Tags []string +} + +// NewServiceCheck creates a new serviceCheck with the given name and status. Error checking +// against these values is done at send-time, or upon running sc.Check. +func NewServiceCheck(name string, status ServiceCheckStatus) *ServiceCheck { + return &ServiceCheck{ + Name: name, + Status: status, + } +} + +// Check verifies that an event is valid. +func (sc ServiceCheck) Check() error { + if len(sc.Name) == 0 { + return fmt.Errorf("statsd.ServiceCheck name is required") + } + if byte(sc.Status) < 0 || byte(sc.Status) > 3 { + return fmt.Errorf("statsd.ServiceCheck status has invalid value") + } + return nil +} + +// Encode returns the dogstatsd wire protocol representation for an serviceCheck. +// Tags may be passed which will be added to the encoded output but not to +// the Event's list of tags, eg. for default tags. +func (sc ServiceCheck) Encode(tags ...string) (string, error) { + err := sc.Check() + if err != nil { + return "", err + } + message := sc.escapedMessage() + + var buffer bytes.Buffer + buffer.WriteString("_sc|") + buffer.WriteString(sc.Name) + buffer.WriteRune('|') + buffer.WriteString(strconv.FormatInt(int64(sc.Status), 10)) + + if !sc.Timestamp.IsZero() { + buffer.WriteString("|d:") + buffer.WriteString(strconv.FormatInt(int64(sc.Timestamp.Unix()), 10)) + } + + if len(sc.Hostname) != 0 { + buffer.WriteString("|h:") + buffer.WriteString(sc.Hostname) + } + + writeTagString(&buffer, tags, sc.Tags) + + if len(message) != 0 { + buffer.WriteString("|m:") + buffer.WriteString(message) } return buffer.String(), nil @@ -442,3 +690,68 @@ func (e Event) Encode(tags ...string) (string, error) { func (e Event) escapedText() string { return strings.Replace(e.Text, "\n", "\\n", -1) } + +func (sc ServiceCheck) escapedMessage() string { + msg := strings.Replace(sc.Message, "\n", "\\n", -1) + return strings.Replace(msg, "m:", `m\:`, -1) +} + +func removeNewlines(str string) string { + return strings.Replace(str, "\n", "", -1) +} + +func writeTagString(w io.Writer, tagList1, tagList2 []string) { + // the tag lists may be shared with other callers, so we cannot modify + // them in any way (which means we cannot append to them either) + // therefore we must make an entirely separate copy just for this call + totalLen := len(tagList1) + len(tagList2) + if totalLen == 0 { + return + } + tags := make([]string, 0, totalLen) + tags = append(tags, tagList1...) + tags = append(tags, tagList2...) + + io.WriteString(w, "|#") + io.WriteString(w, removeNewlines(tags[0])) + for _, tag := range tags[1:] { + io.WriteString(w, ",") + io.WriteString(w, removeNewlines(tag)) + } +} + +func appendTagString(buf []byte, tagList1, tagList2 []string) []byte { + if len(tagList1) == 0 { + if len(tagList2) == 0 { + return buf + } + tagList1 = tagList2 + tagList2 = nil + } + + buf = append(buf, "|#"...) + buf = appendWithoutNewlines(buf, tagList1[0]) + for _, tag := range tagList1[1:] { + buf = append(buf, ',') + buf = appendWithoutNewlines(buf, tag) + } + for _, tag := range tagList2 { + buf = append(buf, ',') + buf = appendWithoutNewlines(buf, tag) + } + return buf +} + +func appendWithoutNewlines(buf []byte, s string) []byte { + // fastpath for strings without newlines + if strings.IndexByte(s, '\n') == -1 { + return append(buf, s...) + } + + for _, b := range []byte(s) { + if b != '\n' { + buf = append(buf, b) + } + } + return buf +} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/udp.go b/vendor/github.com/DataDog/datadog-go/statsd/udp.go new file mode 100644 index 000000000..9ddff421c --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/udp.go @@ -0,0 +1,73 @@ +package statsd + +import ( + "errors" + "fmt" + "net" + "os" + "time" +) + +const ( + autoHostEnvName = "DD_AGENT_HOST" + autoPortEnvName = "DD_DOGSTATSD_PORT" + defaultUDPPort = "8125" +) + +// udpWriter is an internal class wrapping around management of UDP connection +type udpWriter struct { + conn net.Conn +} + +// New returns a pointer to a new udpWriter given an addr in the format "hostname:port". +func newUDPWriter(addr string) (*udpWriter, error) { + if addr == "" { + addr = addressFromEnvironment() + } + if addr == "" { + return nil, errors.New("No address passed and autodetection from environment failed") + } + + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + return nil, err + } + conn, err := net.DialUDP("udp", nil, udpAddr) + if err != nil { + return nil, err + } + writer := &udpWriter{conn: conn} + return writer, nil +} + +// SetWriteTimeout is not needed for UDP, returns error +func (w *udpWriter) SetWriteTimeout(d time.Duration) error { + return errors.New("SetWriteTimeout: not supported for UDP connections") +} + +// Write data to the UDP connection with no error handling +func (w *udpWriter) Write(data []byte) (int, error) { + return w.conn.Write(data) +} + +func (w *udpWriter) Close() error { + return w.conn.Close() +} + +func (w *udpWriter) remoteAddr() net.Addr { + return w.conn.RemoteAddr() +} + +func addressFromEnvironment() string { + autoHost := os.Getenv(autoHostEnvName) + if autoHost == "" { + return "" + } + + autoPort := os.Getenv(autoPortEnvName) + if autoPort == "" { + autoPort = defaultUDPPort + } + + return fmt.Sprintf("%s:%s", autoHost, autoPort) +} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/uds.go b/vendor/github.com/DataDog/datadog-go/statsd/uds.go new file mode 100644 index 000000000..cc2537e00 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/uds.go @@ -0,0 +1,11 @@ +package statsd + +import ( + "time" +) + +/* +UDSTimeout holds the default timeout for UDS socket writes, as they can get +blocking when the receiving buffer is full. +*/ +const defaultUDSTimeout = 1 * time.Millisecond diff --git a/vendor/github.com/DataDog/datadog-go/statsd/uds_async.go b/vendor/github.com/DataDog/datadog-go/statsd/uds_async.go new file mode 100644 index 000000000..39d4ccb23 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/uds_async.go @@ -0,0 +1,113 @@ +package statsd + +import ( + "fmt" + "net" + "time" +) + +// asyncUdsWriter is an internal class wrapping around management of UDS connection +type asyncUdsWriter struct { + // Address to send metrics to, needed to allow reconnection on error + addr net.Addr + // Established connection object, or nil if not connected yet + conn net.Conn + // write timeout + writeTimeout time.Duration + // datagramQueue is the queue of datagrams ready to be sent + datagramQueue chan []byte + stopChan chan struct{} +} + +// New returns a pointer to a new asyncUdsWriter given a socket file path as addr. +func newAsyncUdsWriter(addr string) (*asyncUdsWriter, error) { + udsAddr, err := net.ResolveUnixAddr("unixgram", addr) + if err != nil { + return nil, err + } + + writer := &asyncUdsWriter{ + addr: udsAddr, + conn: nil, + writeTimeout: defaultUDSTimeout, + // 8192 * 8KB = 65.5MB + datagramQueue: make(chan []byte, 8192), + stopChan: make(chan struct{}, 1), + } + + go writer.sendLoop() + return writer, nil +} + +func (w *asyncUdsWriter) sendLoop() { + for { + select { + case datagram := <-w.datagramQueue: + w.write(datagram) + case <-w.stopChan: + return + } + } +} + +// SetWriteTimeout allows the user to set a custom write timeout +func (w *asyncUdsWriter) SetWriteTimeout(d time.Duration) error { + w.writeTimeout = d + return nil +} + +// Write data to the UDS connection with write timeout and minimal error handling: +// create the connection if nil, and destroy it if the statsd server has disconnected +func (w *asyncUdsWriter) Write(data []byte) (int, error) { + select { + case w.datagramQueue <- data: + return len(data), nil + default: + return 0, fmt.Errorf("uds datagram queue is full (the agent might not be able to keep up)") + } +} + +// write writes the given data to the UDS. +// This function is **not** thread safe. +func (w *asyncUdsWriter) write(data []byte) (int, error) { + conn, err := w.ensureConnection() + if err != nil { + return 0, err + } + + conn.SetWriteDeadline(time.Now().Add(w.writeTimeout)) + n, err := conn.Write(data) + + if e, isNetworkErr := err.(net.Error); !isNetworkErr || !e.Temporary() { + // err is not temporary, Statsd server disconnected, retry connecting at next packet + w.unsetConnection() + return 0, e + } + + return n, err +} + +func (w *asyncUdsWriter) Close() error { + close(w.stopChan) + if w.conn != nil { + return w.conn.Close() + } + return nil +} + +func (w *asyncUdsWriter) ensureConnection() (net.Conn, error) { + if w.conn != nil { + return w.conn, nil + } + + newConn, err := net.Dial(w.addr.Network(), w.addr.String()) + if err != nil { + return nil, err + } + w.conn = newConn + return newConn, nil +} + +func (w *asyncUdsWriter) unsetConnection() { + w.conn = nil +} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/uds_blocking.go b/vendor/github.com/DataDog/datadog-go/statsd/uds_blocking.go new file mode 100644 index 000000000..70ee99ab3 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/uds_blocking.go @@ -0,0 +1,92 @@ +package statsd + +import ( + "net" + "sync" + "time" +) + +// blockingUdsWriter is an internal class wrapping around management of UDS connection +type blockingUdsWriter struct { + // Address to send metrics to, needed to allow reconnection on error + addr net.Addr + // Established connection object, or nil if not connected yet + conn net.Conn + // write timeout + writeTimeout time.Duration + sync.RWMutex // used to lock conn / writer can replace it +} + +// New returns a pointer to a new blockingUdsWriter given a socket file path as addr. +func newBlockingUdsWriter(addr string) (*blockingUdsWriter, error) { + udsAddr, err := net.ResolveUnixAddr("unixgram", addr) + if err != nil { + return nil, err + } + // Defer connection to first Write + writer := &blockingUdsWriter{addr: udsAddr, conn: nil, writeTimeout: defaultUDSTimeout} + return writer, nil +} + +// SetWriteTimeout allows the user to set a custom write timeout +func (w *blockingUdsWriter) SetWriteTimeout(d time.Duration) error { + w.writeTimeout = d + return nil +} + +// Write data to the UDS connection with write timeout and minimal error handling: +// create the connection if nil, and destroy it if the statsd server has disconnected +func (w *blockingUdsWriter) Write(data []byte) (int, error) { + conn, err := w.ensureConnection() + if err != nil { + return 0, err + } + + conn.SetWriteDeadline(time.Now().Add(w.writeTimeout)) + n, e := conn.Write(data) + + if err, isNetworkErr := e.(net.Error); !isNetworkErr || !err.Temporary() { + // Statsd server disconnected, retry connecting at next packet + w.unsetConnection() + return 0, e + } + return n, e +} + +func (w *blockingUdsWriter) Close() error { + if w.conn != nil { + return w.conn.Close() + } + return nil +} + +func (w *blockingUdsWriter) ensureConnection() (net.Conn, error) { + // Check if we've already got a socket we can use + w.RLock() + currentConn := w.conn + w.RUnlock() + + if currentConn != nil { + return currentConn, nil + } + + // Looks like we might need to connect - try again with write locking. + w.Lock() + defer w.Unlock() + if w.conn != nil { + return w.conn, nil + } + + newConn, err := net.Dial(w.addr.Network(), w.addr.String()) + if err != nil { + return nil, err + } + w.conn = newConn + return newConn, nil +} + +func (w *blockingUdsWriter) unsetConnection() { + w.Lock() + defer w.Unlock() + w.conn = nil +} diff --git a/vendor/github.com/armon/go-metrics/.travis.yml b/vendor/github.com/armon/go-metrics/.travis.yml new file mode 100644 index 000000000..87d230c8d --- /dev/null +++ b/vendor/github.com/armon/go-metrics/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - "1.x" + +env: + - GO111MODULE=on + +install: + - go get ./... + +script: + - go test ./... diff --git a/vendor/github.com/armon/go-metrics/go.mod b/vendor/github.com/armon/go-metrics/go.mod new file mode 100644 index 000000000..88e1e98fb --- /dev/null +++ b/vendor/github.com/armon/go-metrics/go.mod @@ -0,0 +1,16 @@ +module github.com/armon/go-metrics + +go 1.12 + +require ( + github.com/DataDog/datadog-go v2.2.0+incompatible + github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible + github.com/circonus-labs/circonusllhist v0.1.3 // indirect + github.com/hashicorp/go-immutable-radix v1.0.0 + github.com/hashicorp/go-retryablehttp v0.5.3 // indirect + github.com/pascaldekloe/goe v0.1.0 + github.com/pkg/errors v0.8.1 // indirect + github.com/prometheus/client_golang v0.9.2 + github.com/stretchr/testify v1.3.0 // indirect + github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect +) diff --git a/vendor/github.com/armon/go-metrics/go.sum b/vendor/github.com/armon/go-metrics/go.sum new file mode 100644 index 000000000..5ffd8329a --- /dev/null +++ b/vendor/github.com/armon/go-metrics/go.sum @@ -0,0 +1,46 @@ +github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4= +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go index 4e2d6a709..93b0e0ad8 100644 --- a/vendor/github.com/armon/go-metrics/inmem.go +++ b/vendor/github.com/armon/go-metrics/inmem.go @@ -255,11 +255,11 @@ func (i *InmemSink) Data() []*IntervalMetrics { } copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters)) for k, v := range current.Counters { - copyCurrent.Counters[k] = v + copyCurrent.Counters[k] = v.deepCopy() } copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples)) for k, v := range current.Samples { - copyCurrent.Samples[k] = v + copyCurrent.Samples[k] = v.deepCopy() } current.RUnlock() diff --git a/vendor/github.com/armon/go-metrics/inmem_endpoint.go b/vendor/github.com/armon/go-metrics/inmem_endpoint.go index 504f1b374..5fac958d9 100644 --- a/vendor/github.com/armon/go-metrics/inmem_endpoint.go +++ b/vendor/github.com/armon/go-metrics/inmem_endpoint.go @@ -41,6 +41,16 @@ type SampledValue struct { DisplayLabels map[string]string `json:"Labels"` } +// deepCopy allocates a new instance of AggregateSample +func (source *SampledValue) deepCopy() SampledValue { + dest := *source + if source.AggregateSample != nil { + dest.AggregateSample = &AggregateSample{} + *dest.AggregateSample = *source.AggregateSample + } + return dest +} + // DisplayMetrics returns a summary of the metrics from the most recent finished interval. func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) { data := i.Data() @@ -52,12 +62,15 @@ func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) return nil, fmt.Errorf("no metric intervals have been initialized yet") case n == 1: // Show the current interval if it's all we have - interval = i.intervals[0] + interval = data[0] default: // Show the most recent finished interval if we have one - interval = i.intervals[n-2] + interval = data[n-2] } + interval.RLock() + defer interval.RUnlock() + summary := MetricsSummary{ Timestamp: interval.Interval.Round(time.Second).UTC().String(), Gauges: make([]GaugeValue, 0, len(interval.Gauges)), diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go index cf9def748..4920d6832 100644 --- a/vendor/github.com/armon/go-metrics/metrics.go +++ b/vendor/github.com/armon/go-metrics/metrics.go @@ -197,7 +197,7 @@ func (m *Metrics) filterLabels(labels []Label) []Label { if labels == nil { return nil } - toReturn := labels[:0] + toReturn := []Label{} for _, label := range labels { if m.labelIsAllowed(&label) { toReturn = append(toReturn, label) diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/.gitignore b/vendor/github.com/circonus-labs/circonus-gometrics/.gitignore index 713e0e0aa..af1728d94 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/.gitignore +++ b/vendor/github.com/circonus-labs/circonus-gometrics/.gitignore @@ -1,3 +1,11 @@ .DS_Store env.sh NOTES.md + +# codecov.io +.codecov +coverage.txt +coverage.xml +coverage.html + +vendor/ diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/CHANGELOG.md b/vendor/github.com/circonus-labs/circonus-gometrics/CHANGELOG.md new file mode 100644 index 000000000..dbaaa1240 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/CHANGELOG.md @@ -0,0 +1,72 @@ +# v2.3.1 + +* fix: incorrect attribute types in graph overlays (docs vs what api actually returns) + +# v2.3.0 + +* fix: graph structures incorrectly represented nesting of overlay sets + +# v2.2.7 + +* add: `search` (`*string`) attribute to graph datapoint +* add: `cluster_ip` (`*string`) attribute to broker details + +# v2.2.6 + +* fix: func signature to match go-retryablehttp update +* upd: dependency go-retryablehttp, lock to v0.5.2 to prevent future breaking patch features + +# v2.2.5 + +* upd: switch from tracking master to versions for retryablehttp and circonusllhist now that both repositories are doing releases + +# v2.2.4 + +* fix: worksheet.graphs is a required attribute. worksheet.smart_queries is an optional attribute. + +# v2.2.3 + +* upd: remove go.{mod,dep} as cgm being v2 causes more issues than it solves at this point. will re-add after `go mod` becomes more common and adding `v2` to all internal import statements won't cause additional issues. + +# v2.2.2 + +* upd: add go.mod and go.sum + +# v2.2.1 + +* fix: if submission url host is 'api.circonus.com' do not use private CA in TLSConfig + +# v2.2.0 + +* fix: do not reset counter|gauge|text funcs after each snapshot (only on explicit call to Reset) +* upd: dashboards - optional widget attributes - which are structs - should be pointers for correct omission in json sent to api +* fix: dashboards - remove `omitempty` from required attributes +* fix: graphs - remove `omitempty` from required attributes +* fix: worksheets - correct attribute name, remove `omitempty` from required attributes +* fix: handle case where a broker has no external host or ip set + +# v2.1.2 + +* upd: breaking change in upstream repo +* upd: upstream deps + +# v2.1.1 + +* dep dependencies +* fix two instances of shadowed variables +* fix several documentation typos +* simplify (gofmt -s) +* remove an inefficient use of regexp.MatchString + +# v2.1.0 + +* Add unix socket capability for SubmissionURL `http+unix://...` +* Add `RecordCountForValue` function to histograms + +# v2.0.0 + +* gauges as `interface{}` + * change: `GeTestGauge(string) (string,error)` -> `GeTestGauge(string) (interface{},error)` + * add: `AddGauge(string, interface{})` to add a delta value to an existing gauge +* prom output candidate +* Add `CHANGELOG.md` to repository diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/Gopkg.lock b/vendor/github.com/circonus-labs/circonus-gometrics/Gopkg.lock new file mode 100644 index 000000000..d306f4011 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/Gopkg.lock @@ -0,0 +1,39 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/circonus-labs/circonusllhist" + packages = ["."] + revision = "87d4d00b35adeefe4911ece727838749e0fab113" + version = "v0.1.3" + +[[projects]] + name = "github.com/hashicorp/go-cleanhttp" + packages = ["."] + revision = "e8ab9daed8d1ddd2d3c4efba338fe2eeae2e4f18" + version = "v0.5.0" + +[[projects]] + name = "github.com/hashicorp/go-retryablehttp" + packages = ["."] + revision = "73489d0a1476f0c9e6fb03f9c39241523a496dfd" + version = "v0.5.2" + +[[projects]] + name = "github.com/pkg/errors" + packages = ["."] + revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" + version = "v0.8.1" + +[[projects]] + branch = "master" + name = "github.com/tv42/httpunix" + packages = ["."] + revision = "b75d8614f926c077e48d85f1f8f7885b758c6225" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ff81639f2f1513555846304ee903af4d13a0f0f181e140e1ebb1d71aa18fb5fb" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/Gopkg.toml b/vendor/github.com/circonus-labs/circonus-gometrics/Gopkg.toml new file mode 100644 index 000000000..bb40a91e2 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/Gopkg.toml @@ -0,0 +1,15 @@ +[[constraint]] + name = "github.com/circonus-labs/circonusllhist" + version = "0.1.3" + +[[constraint]] + name = "github.com/hashicorp/go-retryablehttp" + version = "=0.5.2" + +[[constraint]] + name = "github.com/pkg/errors" + version = "0.8.1" + +[[constraint]] + branch = "master" + name = "github.com/tv42/httpunix" diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/OPTIONS.md b/vendor/github.com/circonus-labs/circonus-gometrics/OPTIONS.md new file mode 100644 index 000000000..f54c9984e --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/OPTIONS.md @@ -0,0 +1,113 @@ +## Circonus gometrics options + +### Example defaults +```go +package main + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "path" + + cgm "github.com/circonus-labs/circonus-gometrics" +) + +func main() { + cfg := &cgm.Config{} + + // Defaults + + // General + cfg.Debug = false + cfg.Log = log.New(ioutil.Discard, "", log.LstdFlags) + cfg.Interval = "10s" + cfg.ResetCounters = "true" + cfg.ResetGauges = "true" + cfg.ResetHistograms = "true" + cfg.ResetText = "true" + + // API + cfg.CheckManager.API.TokenKey = "" + cfg.CheckManager.API.TokenApp = "circonus-gometrics" + cfg.CheckManager.API.TokenURL = "https://api.circonus.com/v2" + cfg.CheckManager.API.CACert = nil + cfg.CheckManager.API.TLSConfig = nil + + // Check + _, an := path.Split(os.Args[0]) + hn, _ := os.Hostname() + cfg.CheckManager.Check.ID = "" + cfg.CheckManager.Check.SubmissionURL = "" + cfg.CheckManager.Check.InstanceID = fmt.Sprintf("%s:%s", hn, an) + cfg.CheckManager.Check.TargetHost = cfg.CheckManager.Check.InstanceID + cfg.CheckManager.Check.DisplayName = cfg.CheckManager.Check.InstanceID + cfg.CheckManager.Check.SearchTag = fmt.Sprintf("service:%s", an) + cfg.CheckManager.Check.Tags = "" + cfg.CheckManager.Check.Secret = "" // randomly generated sha256 hash + cfg.CheckManager.Check.MaxURLAge = "5m" + cfg.CheckManager.Check.ForceMetricActivation = "false" + + // Broker + cfg.CheckManager.Broker.ID = "" + cfg.CheckManager.Broker.SelectTag = "" + cfg.CheckManager.Broker.MaxResponseTime = "500ms" + cfg.CheckManager.Broker.TLSConfig = nil + + // create a new cgm instance and start sending metrics... + // see the complete example in the main README. +} +``` + +## Options +| Option | Default | Description | +| ------ | ------- | ----------- | +| General || +| `cfg.Log` | none | log.Logger instance to send logging messages. Default is to discard messages. If Debug is turned on and no instance is specified, messages will go to stderr. | +| `cfg.Debug` | false | Turn on debugging messages. | +| `cfg.Interval` | "10s" | Interval at which metrics are flushed and sent to Circonus. Set to "0s" to disable automatic flush (note, if disabled, `cgm.Flush()` must be called manually to send metrics to Circonus).| +| `cfg.ResetCounters` | "true" | Reset counter metrics after each submission. Change to "false" to retain (and continue submitting) the last value.| +| `cfg.ResetGauges` | "true" | Reset gauge metrics after each submission. Change to "false" to retain (and continue submitting) the last value.| +| `cfg.ResetHistograms` | "true" | Reset histogram metrics after each submission. Change to "false" to retain (and continue submitting) the last value.| +| `cfg.ResetText` | "true" | Reset text metrics after each submission. Change to "false" to retain (and continue submitting) the last value.| +|API|| +| `cfg.CheckManager.API.TokenKey` | "" | [Circonus API Token key](https://login.circonus.com/user/tokens) | +| `cfg.CheckManager.API.TokenApp` | "circonus-gometrics" | App associated with API token | +| `cfg.CheckManager.API.URL` | "https://api.circonus.com/v2" | Circonus API URL | +| `cfg.CheckManager.API.TLSConfig` | nil | Custom tls.Config to use when communicating with Circonus API | +| `cfg.CheckManager.API.CACert` | nil | DEPRECATED - use TLSConfig ~~[*x509.CertPool](https://golang.org/pkg/crypto/x509/#CertPool) with CA Cert to validate API endpoint using internal CA or self-signed certificates~~ | +|Check|| +| `cfg.CheckManager.Check.ID` | "" | Check ID of previously created check. (*Note: **check id** not **check bundle id**.*) | +| `cfg.CheckManager.Check.SubmissionURL` | "" | Submission URL of previously created check. Metrics can also be sent to a local [circonus-agent](https://github.com/circonus-labs/circonus-agent) by using the agent's URL (e.g. `http://127.0.0.1:2609/write/appid` where `appid` is a unique identifier for the application which will prefix all metrics. Additionally, the circonus-agent can optionally listen for requests to `/write` on a unix socket - to leverage this feature, use a URL such as `http+unix:///path/to/socket_file/write/appid`). | +| `cfg.CheckManager.Check.InstanceID` | hostname:program name | An identifier for the 'group of metrics emitted by this process or service'. | +| `cfg.CheckManager.Check.TargetHost` | InstanceID | Explicit setting of `check.target`. | +| `cfg.CheckManager.Check.DisplayName` | InstanceID | Custom `check.display_name`. Shows in UI check list. | +| `cfg.CheckManager.Check.SearchTag` | service:program name | Specific tag used to search for an existing check when neither SubmissionURL nor ID are provided. | +| `cfg.CheckManager.Check.Tags` | "" | List (comma separated) of tags to add to check when it is being created. The SearchTag will be added to the list. | +| `cfg.CheckManager.Check.Secret` | random generated | A secret to use for when creating an httptrap check. | +| `cfg.CheckManager.Check.MaxURLAge` | "5m" | Maximum amount of time to retry a [failing] submission URL before refreshing it. | +| `cfg.CheckManager.Check.ForceMetricActivation` | "false" | If a metric has been disabled via the UI the default behavior is to *not* re-activate the metric; this setting overrides the behavior and will re-activate the metric when it is encountered. | +|Broker|| +| `cfg.CheckManager.Broker.ID` | "" | ID of a specific broker to use when creating a check. Default is to use a random enterprise broker or the public Circonus default broker. | +| `cfg.CheckManager.Broker.SelectTag` | "" | Used to select a broker with the same tag(s). If more than one broker has the tag(s), one will be selected randomly from the resulting list. (e.g. could be used to select one from a list of brokers serving a specific colo/region. "dc:sfo", "loc:nyc,dc:nyc01", "zone:us-west") | +| `cfg.CheckManager.Broker.MaxResponseTime` | "500ms" | Maximum amount time to wait for a broker connection test to be considered valid. (if latency is > the broker will be considered invalid and not available for selection.) | +| `cfg.CheckManager.Broker.TLSConfig` | nil | Custom tls.Config to use when communicating with Circonus Broker | + +## Notes: + +* All options are *strings* with the following exceptions: + * `cfg.Log` - an instance of [`log.Logger`](https://golang.org/pkg/log/#Logger) or something else (e.g. [logrus](https://github.com/Sirupsen/logrus)) which can be used to satisfy the interface requirements. + * `cfg.Debug` - a boolean true|false. +* At a minimum, one of either `API.TokenKey` or `Check.SubmissionURL` is **required** for cgm to function. +* Check management can be disabled by providing a `Check.SubmissionURL` without an `API.TokenKey`. Note: the supplied URL needs to be http or the broker needs to be running with a cert which can be verified. Otherwise, the `API.TokenKey` will be required to retrieve the correct CA certificate to validate the broker's cert for the SSL connection. +* A note on `Check.InstanceID`, the instance id is used to consistently identify a check. The display name can be changed in the UI. The hostname may be ephemeral. For metric continuity, the instance id is used to locate existing checks. Since the check.target is never actually used by an httptrap check it is more decorative than functional, a valid FQDN is not required for an httptrap check.target. But, using instance id as the target can pollute the Host list in the UI with host:application specific entries. +* Check identification precedence + 1. Check SubmissionURL + 2. Check ID + 3. Search + 1. Search for an active httptrap check for TargetHost which has the SearchTag + 2. Search for an active httptrap check which has the SearchTag and the InstanceID in the notes field + 3. Create a new check +* Broker selection + 1. If Broker.ID or Broker.SelectTag are not specified, a broker will be selected randomly from the list of brokers available to the API token. Enterprise brokers take precedence. A viable broker is "active", has the "httptrap" module enabled, and responds within Broker.MaxResponseTime. diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/README.md b/vendor/github.com/circonus-labs/circonus-gometrics/README.md index 77daae05b..361920309 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/README.md +++ b/vendor/github.com/circonus-labs/circonus-gometrics/README.md @@ -1,189 +1,186 @@ # Circonus metrics tracking for Go applications -This library supports named counters, gauges and histograms. -It also provides convenience wrappers for registering latency -instrumented functions with Go's builtin http server. +This library supports named counters, gauges and histograms. It also provides convenience wrappers for registering latency instrumented functions with Go's builtin http server. -Initializing only requires setting an ApiToken. +Initializing only requires setting an [API Token](https://login.circonus.com/user/tokens) at a minimum. + +## Options + +See [OPTIONS.md](OPTIONS.md) for information on all of the available cgm options. ## Example -**rough and simple** +### Bare bones minimum + +A working cut-n-past example. Simply set the required environment variable `CIRCONUS_API_TOKEN` and run. ```go package main import ( - "log" - "math/rand" - "os" - "time" + "log" + "math/rand" + "os" + "os/signal" + "syscall" + "time" - cgm "github.com/circonus-labs/circonus-gometrics" + cgm "github.com/circonus-labs/circonus-gometrics" ) func main() { logger := log.New(os.Stdout, "", log.LstdFlags) - logger.Println("Configuring cgm") + logger.Println("Configuring cgm") - cmc := &cgm.Config{} + cmc := &cgm.Config{} + cmc.Debug = false // set to true for debug messages + cmc.Log = logger - // Interval at which metrics are submitted to Circonus, default: 10 seconds - // cmc.Interval = "10s" // 10 seconds + // Circonus API Token key (https://login.circonus.com/user/tokens) + cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN") - // Enable debug messages, default: false - cmc.Debug = true + logger.Println("Creating new cgm instance") - // Send debug messages to specific log.Logger instance - // default: if debug stderr, else, discard - cmc.Log = logger + metrics, err := cgm.NewCirconusMetrics(cmc) + if err != nil { + logger.Println(err) + os.Exit(1) + } - // Reset counter metrics after each submission, default: "true" - // Change to "false" to retain (and continue submitting) the last value. - // cmc.ResetCounters = "true" - - // Reset gauge metrics after each submission, default: "true" - // Change to "false" to retain (and continue submitting) the last value. - // cmc.ResetGauges = "true" - - // Reset histogram metrics after each submission, default: "true" - // Change to "false" to retain (and continue submitting) the last value. - // cmc.ResetHistograms = "true" - - // Reset text metrics after each submission, default: "true" - // Change to "false" to retain (and continue submitting) the last value. - // cmc.ResetText = "true" - - // Circonus API configuration options - // - // Token, no default (blank disables check manager) - cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN") - // App name, default: circonus-gometrics - cmc.CheckManager.API.TokenApp = os.Getenv("CIRCONUS_API_APP") - // URL, default: https://api.circonus.com/v2 - cmc.CheckManager.API.URL = os.Getenv("CIRCONUS_API_URL") - - // Check configuration options - // - // precedence 1 - explicit submission_url - // precedence 2 - specific check id (note: not a check bundle id) - // precedence 3 - search using instanceId and searchTag - // otherwise: if an applicable check is NOT specified or found, an - // attempt will be made to automatically create one - // - // Submission URL for an existing [httptrap] check - cmc.CheckManager.Check.SubmissionURL = os.Getenv("CIRCONUS_SUBMISION_URL") - - // ID of an existing [httptrap] check (note: check id not check bundle id) - cmc.CheckManager.Check.ID = os.Getenv("CIRCONUS_CHECK_ID") - - // if neither a submission url nor check id are provided, an attempt will be made to find an existing - // httptrap check by using the circonus api to search for a check matching the following criteria: - // an active check, - // of type httptrap, - // where the target/host is equal to InstanceId - see below - // and the check has a tag equal to SearchTag - see below - // Instance ID - an identifier for the 'group of metrics emitted by this process or service' - // this is used as the value for check.target (aka host) - // default: 'hostname':'program name' - // note: for a persistent instance that is ephemeral or transient where metric continuity is - // desired set this explicitly so that the current hostname will not be used. - // cmc.CheckManager.Check.InstanceID = "" - - // Search tag - specific tag(s) used in conjunction with isntanceId to search for an - // existing check. comma separated string of tags (spaces will be removed, no commas - // in tag elements). - // default: service:application name (e.g. service:consul service:nomad etc.) - // cmc.CheckManager.Check.SearchTag = "" - - // Check secret, default: generated when a check needs to be created - // cmc.CheckManager.Check.Secret = "" - - // Additional tag(s) to add when *creating* a check. comma separated string - // of tags (spaces will be removed, no commas in tag elements). - // (e.g. group:abc or service_role:agent,group:xyz). - // default: none - // cmc.CheckManager.Check.Tags = "" - - // max amount of time to to hold on to a submission url - // when a given submission fails (due to retries) if the - // time the url was last updated is > than this, the trap - // url will be refreshed (e.g. if the broker is changed - // in the UI) default 5 minutes - // cmc.CheckManager.Check.MaxURLAge = "5m" - - // custom display name for check, default: "InstanceId /cgm" - // cmc.CheckManager.Check.DisplayName = "" - - // force metric activation - if a metric has been disabled via the UI - // the default behavior is to *not* re-activate the metric; this setting - // overrides the behavior and will re-activate the metric when it is - // encountered. "(true|false)", default "false" - // cmc.CheckManager.Check.ForceMetricActivation = "false" - - // Broker configuration options - // - // Broker ID of specific broker to use, default: random enterprise broker or - // Circonus default if no enterprise brokers are available. - // default: only used if set - // cmc.CheckManager.Broker.ID = "" - - // used to select a broker with the same tag(s) (e.g. can be used to dictate that a broker - // serving a specific location should be used. "dc:sfo", "loc:nyc,dc:nyc01", "zone:us-west") - // if more than one broker has the tag(s), one will be selected randomly from the resulting - // list. comma separated string of tags (spaces will be removed, no commas in tag elements). - // default: none - // cmc.CheckManager.Broker.SelectTag = "" - - // longest time to wait for a broker connection (if latency is > the broker will - // be considered invalid and not available for selection.), default: 500 milliseconds - // cmc.CheckManager.Broker.MaxResponseTime = "500ms" - - // note: if broker Id or SelectTag are not specified, a broker will be selected randomly - // from the list of brokers available to the api token. enterprise brokers take precedence - // viable brokers are "active", have the "httptrap" module enabled, are reachable and respond - // within MaxResponseTime. - - logger.Println("Creating new cgm instance") - - metrics, err := cgm.NewCirconusMetrics(cmc) - if err != nil { - panic(err) - } - - src := rand.NewSource(time.Now().UnixNano()) - rnd := rand.New(src) - - logger.Println("Starting cgm internal auto-flush timer") - metrics.Start() + src := rand.NewSource(time.Now().UnixNano()) + rnd := rand.New(src) logger.Println("Adding ctrl-c trap") - c := make(chan os.Signal, 2) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - go func() { - <-c - logger.Println("Received CTRL-C, flushing outstanding metrics before exit") - metrics.Flush() - os.Exit(0) - }() + c := make(chan os.Signal, 2) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + logger.Println("Received CTRL-C, flushing outstanding metrics before exit") + metrics.Flush() + os.Exit(0) + }() + + logger.Println("Starting to send metrics") + + // number of "sets" of metrics to send + max := 60 + + for i := 1; i < max; i++ { + logger.Printf("\tmetric set %d of %d", i, 60) + metrics.Timing("foo", rnd.Float64()*10) + metrics.Increment("bar") + metrics.Gauge("baz", 10) + time.Sleep(time.Second) + } + + metrics.SetText("fini", "complete") + + logger.Println("Flushing any outstanding metrics manually") + metrics.Flush() +} +``` + +### A more complete example + +A working, cut-n-paste example with all options available for modification. Also, demonstrates metric tagging. + +```go +package main + +import ( + "log" + "math/rand" + "os" + "os/signal" + "syscall" + "time" + + cgm "github.com/circonus-labs/circonus-gometrics" +) + +func main() { + + logger := log.New(os.Stdout, "", log.LstdFlags) + + logger.Println("Configuring cgm") + + cmc := &cgm.Config{} + + // General + + cmc.Interval = "10s" + cmc.Log = logger + cmc.Debug = false + cmc.ResetCounters = "true" + cmc.ResetGauges = "true" + cmc.ResetHistograms = "true" + cmc.ResetText = "true" + + // Circonus API configuration options + cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN") + cmc.CheckManager.API.TokenApp = os.Getenv("CIRCONUS_API_APP") + cmc.CheckManager.API.URL = os.Getenv("CIRCONUS_API_URL") + cmc.CheckManager.API.TLSConfig = nil + + // Check configuration options + cmc.CheckManager.Check.SubmissionURL = os.Getenv("CIRCONUS_SUBMISSION_URL") + cmc.CheckManager.Check.ID = os.Getenv("CIRCONUS_CHECK_ID") + cmc.CheckManager.Check.InstanceID = "" + cmc.CheckManager.Check.DisplayName = "" + cmc.CheckManager.Check.TargetHost = "" + // if hn, err := os.Hostname(); err == nil { + // cmc.CheckManager.Check.TargetHost = hn + // } + cmc.CheckManager.Check.SearchTag = "" + cmc.CheckManager.Check.Secret = "" + cmc.CheckManager.Check.Tags = "" + cmc.CheckManager.Check.MaxURLAge = "5m" + cmc.CheckManager.Check.ForceMetricActivation = "false" + + // Broker configuration options + cmc.CheckManager.Broker.ID = "" + cmc.CheckManager.Broker.SelectTag = "" + cmc.CheckManager.Broker.MaxResponseTime = "500ms" + cmc.CheckManager.Broker.TLSConfig = nil + + logger.Println("Creating new cgm instance") + + metrics, err := cgm.NewCirconusMetrics(cmc) + if err != nil { + logger.Println(err) + os.Exit(1) + } + + src := rand.NewSource(time.Now().UnixNano()) + rnd := rand.New(src) + + logger.Println("Adding ctrl-c trap") + c := make(chan os.Signal, 2) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + logger.Println("Received CTRL-C, flushing outstanding metrics before exit") + metrics.Flush() + os.Exit(0) + }() // Add metric tags (append to any existing tags on specified metric) metrics.AddMetricTags("foo", []string{"cgm:test"}) metrics.AddMetricTags("baz", []string{"cgm:test"}) - logger.Println("Starting to send metrics") + logger.Println("Starting to send metrics") - // number of "sets" of metrics to send - max := 60 + // number of "sets" of metrics to send + max := 60 - for i := 1; i < max; i++ { - logger.Printf("\tmetric set %d of %d", i, 60) + for i := 1; i < max; i++ { + logger.Printf("\tmetric set %d of %d", i, 60) - metrics.Timing("foo", rnd.Float64()*10) - metrics.Increment("bar") - metrics.Gauge("baz", 10) + metrics.Timing("foo", rnd.Float64()*10) + metrics.Increment("bar") + metrics.Gauge("baz", 10) if i == 35 { // Set metric tags (overwrite current tags on specified metric) @@ -191,23 +188,23 @@ func main() { } time.Sleep(time.Second) - } + } - logger.Println("Flushing any outstanding metrics manually") - metrics.Flush() + logger.Println("Flushing any outstanding metrics manually") + metrics.Flush() } ``` ### HTTP Handler wrapping -``` +```go http.HandleFunc("/", metrics.TrackHTTPLatency("/", handler_func)) ``` ### HTTP latency example -``` +```go package main import ( @@ -225,7 +222,6 @@ func main() { if err != nil { panic(err) } - metrics.Start() http.HandleFunc("/", metrics.TrackHTTPLatency("/", func(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "Hello, %s!", r.URL.Path[1:]) @@ -235,4 +231,4 @@ func main() { ``` -Unless otherwise noted, the source files are distributed under the BSD-style license found in the LICENSE file. +Unless otherwise noted, the source files are distributed under the BSD-style license found in the [LICENSE](LICENSE) file. diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/README.md b/vendor/github.com/circonus-labs/circonus-gometrics/api/README.md new file mode 100644 index 000000000..8f286b79f --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/README.md @@ -0,0 +1,163 @@ +## Circonus API package + +Full api documentation (for using *this* package) is available at [godoc.org](https://godoc.org/github.com/circonus-labs/circonus-gometrics/api). Links in the lists below go directly to the generic Circonus API documentation for the endpoint. + +### Straight [raw] API access + +* Get +* Post (for creates) +* Put (for updates) +* Delete + +### Helpers for currently supported API endpoints + +> Note, these interfaces are still being actively developed. For example, many of the `New*` methods only return an empty struct; sensible defaults will be added going forward. Other, common helper methods for the various endpoints may be added as use cases emerge. The organization +of the API may change if common use contexts would benefit significantly. + +* [Account](https://login.circonus.com/resources/api/calls/account) + * FetchAccount + * FetchAccounts + * UpdateAccount + * SearchAccounts +* [Acknowledgement](https://login.circonus.com/resources/api/calls/acknowledgement) + * NewAcknowledgement + * FetchAcknowledgement + * FetchAcknowledgements + * UpdateAcknowledgement + * CreateAcknowledgement + * DeleteAcknowledgement + * DeleteAcknowledgementByCID + * SearchAcknowledgements +* [Alert](https://login.circonus.com/resources/api/calls/alert) + * FetchAlert + * FetchAlerts + * SearchAlerts +* [Annotation](https://login.circonus.com/resources/api/calls/annotation) + * NewAnnotation + * FetchAnnotation + * FetchAnnotations + * UpdateAnnotation + * CreateAnnotation + * DeleteAnnotation + * DeleteAnnotationByCID + * SearchAnnotations +* [Broker](https://login.circonus.com/resources/api/calls/broker) + * FetchBroker + * FetchBrokers + * SearchBrokers +* [Check Bundle](https://login.circonus.com/resources/api/calls/check_bundle) + * NewCheckBundle + * FetchCheckBundle + * FetchCheckBundles + * UpdateCheckBundle + * CreateCheckBundle + * DeleteCheckBundle + * DeleteCheckBundleByCID + * SearchCheckBundles +* [Check Bundle Metrics](https://login.circonus.com/resources/api/calls/check_bundle_metrics) + * FetchCheckBundleMetrics + * UpdateCheckBundleMetrics +* [Check](https://login.circonus.com/resources/api/calls/check) + * FetchCheck + * FetchChecks + * SearchChecks +* [Contact Group](https://login.circonus.com/resources/api/calls/contact_group) + * NewContactGroup + * FetchContactGroup + * FetchContactGroups + * UpdateContactGroup + * CreateContactGroup + * DeleteContactGroup + * DeleteContactGroupByCID + * SearchContactGroups +* [Dashboard](https://login.circonus.com/resources/api/calls/dashboard) -- note, this is a work in progress, the methods/types may still change + * NewDashboard + * FetchDashboard + * FetchDashboards + * UpdateDashboard + * CreateDashboard + * DeleteDashboard + * DeleteDashboardByCID + * SearchDashboards +* [Graph](https://login.circonus.com/resources/api/calls/graph) + * NewGraph + * FetchGraph + * FetchGraphs + * UpdateGraph + * CreateGraph + * DeleteGraph + * DeleteGraphByCID + * SearchGraphs +* [Metric Cluster](https://login.circonus.com/resources/api/calls/metric_cluster) + * NewMetricCluster + * FetchMetricCluster + * FetchMetricClusters + * UpdateMetricCluster + * CreateMetricCluster + * DeleteMetricCluster + * DeleteMetricClusterByCID + * SearchMetricClusters +* [Metric](https://login.circonus.com/resources/api/calls/metric) + * FetchMetric + * FetchMetrics + * UpdateMetric + * SearchMetrics +* [Maintenance window](https://login.circonus.com/resources/api/calls/maintenance) + * NewMaintenanceWindow + * FetchMaintenanceWindow + * FetchMaintenanceWindows + * UpdateMaintenanceWindow + * CreateMaintenanceWindow + * DeleteMaintenanceWindow + * DeleteMaintenanceWindowByCID + * SearchMaintenanceWindows +* [Outlier Report](https://login.circonus.com/resources/api/calls/outlier_report) + * NewOutlierReport + * FetchOutlierReport + * FetchOutlierReports + * UpdateOutlierReport + * CreateOutlierReport + * DeleteOutlierReport + * DeleteOutlierReportByCID + * SearchOutlierReports +* [Provision Broker](https://login.circonus.com/resources/api/calls/provision_broker) + * NewProvisionBroker + * FetchProvisionBroker + * UpdateProvisionBroker + * CreateProvisionBroker +* [Rule Set](https://login.circonus.com/resources/api/calls/rule_set) + * NewRuleset + * FetchRuleset + * FetchRulesets + * UpdateRuleset + * CreateRuleset + * DeleteRuleset + * DeleteRulesetByCID + * SearchRulesets +* [Rule Set Group](https://login.circonus.com/resources/api/calls/rule_set_group) + * NewRulesetGroup + * FetchRulesetGroup + * FetchRulesetGroups + * UpdateRulesetGroup + * CreateRulesetGroup + * DeleteRulesetGroup + * DeleteRulesetGroupByCID + * SearchRulesetGroups +* [User](https://login.circonus.com/resources/api/calls/user) + * FetchUser + * FetchUsers + * UpdateUser + * SearchUsers +* [Worksheet](https://login.circonus.com/resources/api/calls/worksheet) + * NewWorksheet + * FetchWorksheet + * FetchWorksheets + * UpdateWorksheet + * CreateWorksheet + * DeleteWorksheet + * DeleteWorksheetByCID + * SearchWorksheets + +--- + +Unless otherwise noted, the source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/account.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/account.go new file mode 100644 index 000000000..dd8ff577d --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/account.go @@ -0,0 +1,181 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Account API support - Fetch and Update +// See: https://login.circonus.com/resources/api/calls/account +// Note: Create and Delete are not supported for Accounts via the API + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// AccountLimit defines a usage limit imposed on account +type AccountLimit struct { + Limit uint `json:"_limit,omitempty"` // uint >=0 + Type string `json:"_type,omitempty"` // string + Used uint `json:"_used,omitempty"` // uint >=0 +} + +// AccountInvite defines outstanding invites +type AccountInvite struct { + Email string `json:"email"` // string + Role string `json:"role"` // string +} + +// AccountUser defines current users +type AccountUser struct { + Role string `json:"role"` // string + UserCID string `json:"user"` // string +} + +// Account defines an account. See https://login.circonus.com/resources/api/calls/account for more information. +type Account struct { + Address1 *string `json:"address1,omitempty"` // string or null + Address2 *string `json:"address2,omitempty"` // string or null + CCEmail *string `json:"cc_email,omitempty"` // string or null + CID string `json:"_cid,omitempty"` // string + City *string `json:"city,omitempty"` // string or null + ContactGroups []string `json:"_contact_groups,omitempty"` // [] len >= 0 + Country string `json:"country_code,omitempty"` // string + Description *string `json:"description,omitempty"` // string or null + Invites []AccountInvite `json:"invites,omitempty"` // [] len >= 0 + Name string `json:"name,omitempty"` // string + OwnerCID string `json:"_owner,omitempty"` // string + StateProv *string `json:"state_prov,omitempty"` // string or null + Timezone string `json:"timezone,omitempty"` // string + UIBaseURL string `json:"_ui_base_url,omitempty"` // string + Usage []AccountLimit `json:"_usage,omitempty"` // [] len >= 0 + Users []AccountUser `json:"users,omitempty"` // [] len >= 0 +} + +// FetchAccount retrieves account with passed cid. Pass nil for '/account/current'. +func (a *API) FetchAccount(cid CIDType) (*Account, error) { + var accountCID string + + if cid == nil || *cid == "" { + accountCID = config.AccountPrefix + "/current" + } else { + accountCID = string(*cid) + } + + matched, err := regexp.MatchString(config.AccountCIDRegex, accountCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid account CID [%s]", accountCID) + } + + result, err := a.Get(accountCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] account fetch, received JSON: %s", string(result)) + } + + account := new(Account) + if err := json.Unmarshal(result, account); err != nil { + return nil, err + } + + return account, nil +} + +// FetchAccounts retrieves all accounts available to the API Token. +func (a *API) FetchAccounts() (*[]Account, error) { + result, err := a.Get(config.AccountPrefix) + if err != nil { + return nil, err + } + + var accounts []Account + if err := json.Unmarshal(result, &accounts); err != nil { + return nil, err + } + + return &accounts, nil +} + +// UpdateAccount updates passed account. +func (a *API) UpdateAccount(cfg *Account) (*Account, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid account config [nil]") + } + + accountCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.AccountCIDRegex, accountCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid account CID [%s]", accountCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] account update, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(accountCID, jsonCfg) + if err != nil { + return nil, err + } + + account := &Account{} + if err := json.Unmarshal(result, account); err != nil { + return nil, err + } + + return account, nil +} + +// SearchAccounts returns accounts matching a filter (search queries are not +// suppoted by the account endpoint). Pass nil as filter for all accounts the +// API Token can access. +func (a *API) SearchAccounts(filterCriteria *SearchFilterType) (*[]Account, error) { + q := url.Values{} + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchAccounts() + } + + reqURL := url.URL{ + Path: config.AccountPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var accounts []Account + if err := json.Unmarshal(result, &accounts); err != nil { + return nil, err + } + + return &accounts, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/acknowledgement.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/acknowledgement.go new file mode 100644 index 000000000..f6da51d4d --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/acknowledgement.go @@ -0,0 +1,190 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Acknowledgement API support - Fetch, Create, Update, Delete*, and Search +// See: https://login.circonus.com/resources/api/calls/acknowledgement +// * : delete (cancel) by updating with AcknowledgedUntil set to 0 + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// Acknowledgement defines a acknowledgement. See https://login.circonus.com/resources/api/calls/acknowledgement for more information. +type Acknowledgement struct { + AcknowledgedBy string `json:"_acknowledged_by,omitempty"` // string + AcknowledgedOn uint `json:"_acknowledged_on,omitempty"` // uint + AcknowledgedUntil interface{} `json:"acknowledged_until,omitempty"` // NOTE received as uint; can be set using string or uint + Active bool `json:"_active,omitempty"` // bool + AlertCID string `json:"alert,omitempty"` // string + CID string `json:"_cid,omitempty"` // string + LastModified uint `json:"_last_modified,omitempty"` // uint + LastModifiedBy string `json:"_last_modified_by,omitempty"` // string + Notes string `json:"notes,omitempty"` // string +} + +// NewAcknowledgement returns new Acknowledgement (with defaults, if applicable). +func NewAcknowledgement() *Acknowledgement { + return &Acknowledgement{} +} + +// FetchAcknowledgement retrieves acknowledgement with passed cid. +func (a *API) FetchAcknowledgement(cid CIDType) (*Acknowledgement, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid acknowledgement CID [none]") + } + + acknowledgementCID := string(*cid) + + matched, err := regexp.MatchString(config.AcknowledgementCIDRegex, acknowledgementCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid acknowledgement CID [%s]", acknowledgementCID) + } + + result, err := a.Get(acknowledgementCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] acknowledgement fetch, received JSON: %s", string(result)) + } + + acknowledgement := &Acknowledgement{} + if err := json.Unmarshal(result, acknowledgement); err != nil { + return nil, err + } + + return acknowledgement, nil +} + +// FetchAcknowledgements retrieves all acknowledgements available to the API Token. +func (a *API) FetchAcknowledgements() (*[]Acknowledgement, error) { + result, err := a.Get(config.AcknowledgementPrefix) + if err != nil { + return nil, err + } + + var acknowledgements []Acknowledgement + if err := json.Unmarshal(result, &acknowledgements); err != nil { + return nil, err + } + + return &acknowledgements, nil +} + +// UpdateAcknowledgement updates passed acknowledgement. +func (a *API) UpdateAcknowledgement(cfg *Acknowledgement) (*Acknowledgement, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid acknowledgement config [nil]") + } + + acknowledgementCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.AcknowledgementCIDRegex, acknowledgementCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid acknowledgement CID [%s]", acknowledgementCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] acknowledgement update, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(acknowledgementCID, jsonCfg) + if err != nil { + return nil, err + } + + acknowledgement := &Acknowledgement{} + if err := json.Unmarshal(result, acknowledgement); err != nil { + return nil, err + } + + return acknowledgement, nil +} + +// CreateAcknowledgement creates a new acknowledgement. +func (a *API) CreateAcknowledgement(cfg *Acknowledgement) (*Acknowledgement, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid acknowledgement config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + result, err := a.Post(config.AcknowledgementPrefix, jsonCfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] acknowledgement create, sending JSON: %s", string(jsonCfg)) + } + + acknowledgement := &Acknowledgement{} + if err := json.Unmarshal(result, acknowledgement); err != nil { + return nil, err + } + + return acknowledgement, nil +} + +// SearchAcknowledgements returns acknowledgements matching +// the specified search query and/or filter. If nil is passed for +// both parameters all acknowledgements will be returned. +func (a *API) SearchAcknowledgements(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Acknowledgement, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchAcknowledgements() + } + + reqURL := url.URL{ + Path: config.AcknowledgementPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var acknowledgements []Acknowledgement + if err := json.Unmarshal(result, &acknowledgements); err != nil { + return nil, err + } + + return &acknowledgements, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/alert.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/alert.go new file mode 100644 index 000000000..a242d3d85 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/alert.go @@ -0,0 +1,131 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Alert API support - Fetch and Search +// See: https://login.circonus.com/resources/api/calls/alert + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// Alert defines a alert. See https://login.circonus.com/resources/api/calls/alert for more information. +type Alert struct { + AcknowledgementCID *string `json:"_acknowledgement,omitempty"` // string or null + AlertURL string `json:"_alert_url,omitempty"` // string + BrokerCID string `json:"_broker,omitempty"` // string + CheckCID string `json:"_check,omitempty"` // string + CheckName string `json:"_check_name,omitempty"` // string + CID string `json:"_cid,omitempty"` // string + ClearedOn *uint `json:"_cleared_on,omitempty"` // uint or null + ClearedValue *string `json:"_cleared_value,omitempty"` // string or null + Maintenance []string `json:"_maintenance,omitempty"` // [] len >= 0 + MetricLinkURL *string `json:"_metric_link,omitempty"` // string or null + MetricName string `json:"_metric_name,omitempty"` // string + MetricNotes *string `json:"_metric_notes,omitempty"` // string or null + OccurredOn uint `json:"_occurred_on,omitempty"` // uint + RuleSetCID string `json:"_rule_set,omitempty"` // string + Severity uint `json:"_severity,omitempty"` // uint + Tags []string `json:"_tags,omitempty"` // [] len >= 0 + Value string `json:"_value,omitempty"` // string +} + +// NewAlert returns a new alert (with defaults, if applicable) +func NewAlert() *Alert { + return &Alert{} +} + +// FetchAlert retrieves alert with passed cid. +func (a *API) FetchAlert(cid CIDType) (*Alert, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid alert CID [none]") + } + + alertCID := string(*cid) + + matched, err := regexp.MatchString(config.AlertCIDRegex, alertCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid alert CID [%s]", alertCID) + } + + result, err := a.Get(alertCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch alert, received JSON: %s", string(result)) + } + + alert := &Alert{} + if err := json.Unmarshal(result, alert); err != nil { + return nil, err + } + + return alert, nil +} + +// FetchAlerts retrieves all alerts available to the API Token. +func (a *API) FetchAlerts() (*[]Alert, error) { + result, err := a.Get(config.AlertPrefix) + if err != nil { + return nil, err + } + + var alerts []Alert + if err := json.Unmarshal(result, &alerts); err != nil { + return nil, err + } + + return &alerts, nil +} + +// SearchAlerts returns alerts matching the specified search query +// and/or filter. If nil is passed for both parameters all alerts +// will be returned. +func (a *API) SearchAlerts(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Alert, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchAlerts() + } + + reqURL := url.URL{ + Path: config.AlertPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var alerts []Alert + if err := json.Unmarshal(result, &alerts); err != nil { + return nil, err + } + + return &alerts, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/annotation.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/annotation.go new file mode 100644 index 000000000..589ec6da9 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/annotation.go @@ -0,0 +1,223 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Annotation API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/annotation + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// Annotation defines a annotation. See https://login.circonus.com/resources/api/calls/annotation for more information. +type Annotation struct { + Category string `json:"category"` // string + CID string `json:"_cid,omitempty"` // string + Created uint `json:"_created,omitempty"` // uint + Description string `json:"description"` // string + LastModified uint `json:"_last_modified,omitempty"` // uint + LastModifiedBy string `json:"_last_modified_by,omitempty"` // string + RelatedMetrics []string `json:"rel_metrics"` // [] len >= 0 + Start uint `json:"start"` // uint + Stop uint `json:"stop"` // uint + Title string `json:"title"` // string +} + +// NewAnnotation returns a new Annotation (with defaults, if applicable) +func NewAnnotation() *Annotation { + return &Annotation{} +} + +// FetchAnnotation retrieves annotation with passed cid. +func (a *API) FetchAnnotation(cid CIDType) (*Annotation, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid annotation CID [none]") + } + + annotationCID := string(*cid) + + matched, err := regexp.MatchString(config.AnnotationCIDRegex, annotationCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid annotation CID [%s]", annotationCID) + } + + result, err := a.Get(annotationCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch annotation, received JSON: %s", string(result)) + } + + annotation := &Annotation{} + if err := json.Unmarshal(result, annotation); err != nil { + return nil, err + } + + return annotation, nil +} + +// FetchAnnotations retrieves all annotations available to the API Token. +func (a *API) FetchAnnotations() (*[]Annotation, error) { + result, err := a.Get(config.AnnotationPrefix) + if err != nil { + return nil, err + } + + var annotations []Annotation + if err := json.Unmarshal(result, &annotations); err != nil { + return nil, err + } + + return &annotations, nil +} + +// UpdateAnnotation updates passed annotation. +func (a *API) UpdateAnnotation(cfg *Annotation) (*Annotation, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid annotation config [nil]") + } + + annotationCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.AnnotationCIDRegex, annotationCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid annotation CID [%s]", annotationCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update annotation, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(annotationCID, jsonCfg) + if err != nil { + return nil, err + } + + annotation := &Annotation{} + if err := json.Unmarshal(result, annotation); err != nil { + return nil, err + } + + return annotation, nil +} + +// CreateAnnotation creates a new annotation. +func (a *API) CreateAnnotation(cfg *Annotation) (*Annotation, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid annotation config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create annotation, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.AnnotationPrefix, jsonCfg) + if err != nil { + return nil, err + } + + annotation := &Annotation{} + if err := json.Unmarshal(result, annotation); err != nil { + return nil, err + } + + return annotation, nil +} + +// DeleteAnnotation deletes passed annotation. +func (a *API) DeleteAnnotation(cfg *Annotation) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid annotation config [nil]") + } + + return a.DeleteAnnotationByCID(CIDType(&cfg.CID)) +} + +// DeleteAnnotationByCID deletes annotation with passed cid. +func (a *API) DeleteAnnotationByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid annotation CID [none]") + } + + annotationCID := string(*cid) + + matched, err := regexp.MatchString(config.AnnotationCIDRegex, annotationCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid annotation CID [%s]", annotationCID) + } + + _, err = a.Delete(annotationCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchAnnotations returns annotations matching the specified +// search query and/or filter. If nil is passed for both parameters +// all annotations will be returned. +func (a *API) SearchAnnotations(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Annotation, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchAnnotations() + } + + reqURL := url.URL{ + Path: config.AnnotationPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var annotations []Annotation + if err := json.Unmarshal(result, &annotations); err != nil { + return nil, err + } + + return &annotations, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go index f640c54d0..ee6a411c9 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go @@ -2,24 +2,41 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package api provides methods for interacting with the Circonus API package api import ( "bytes" + "context" + crand "crypto/rand" + "crypto/tls" + "crypto/x509" "errors" "fmt" "io/ioutil" "log" + "math" + "math/big" + "math/rand" + "net" "net/http" "net/url" "os" "strings" + "sync" "time" "github.com/hashicorp/go-retryablehttp" ) +func init() { + n, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + rand.Seed(time.Now().UTC().UnixNano()) + return + } + rand.Seed(n.Int64()) +} + const ( // a few sensible defaults defaultAPIURL = "https://api.circonus.com/v2" @@ -35,44 +52,76 @@ type TokenKeyType string // TokenAppType - Circonus API Token app name type TokenAppType string -// IDType Circonus object id (numeric portion of cid) -type IDType int +// TokenAccountIDType - Circonus API Token account id +type TokenAccountIDType string // CIDType Circonus object cid -type CIDType string +type CIDType *string + +// IDType Circonus object id +type IDType int // URLType submission url type type URLType string -// SearchQueryType search query +// SearchQueryType search query (see: https://login.circonus.com/resources/api#searching) type SearchQueryType string -// SearchFilterType search filter -type SearchFilterType string +// SearchFilterType search filter (see: https://login.circonus.com/resources/api#filtering) +type SearchFilterType map[string][]string // TagType search/select/custom tag(s) type type TagType []string // Config options for Circonus API type Config struct { - URL string + // URL defines the API URL - default https://api.circonus.com/v2/ + URL string + + // TokenKey defines the key to use when communicating with the API TokenKey string + + // TokenApp defines the app to use when communicating with the API TokenApp string - Log *log.Logger - Debug bool + + TokenAccountID string + + // CACert deprecating, use TLSConfig instead + CACert *x509.CertPool + + // TLSConfig defines a custom tls configuration to use when communicating with the API + TLSConfig *tls.Config + + Log *log.Logger + Debug bool } // API Circonus API type API struct { - apiURL *url.URL - key TokenKeyType - app TokenAppType - Debug bool - Log *log.Logger + apiURL *url.URL + key TokenKeyType + app TokenAppType + accountID TokenAccountIDType + caCert *x509.CertPool + tlsConfig *tls.Config + Debug bool + Log *log.Logger + useExponentialBackoff bool + useExponentialBackoffmu sync.Mutex } -// NewAPI returns a new Circonus API +// NewClient returns a new Circonus API (alias for New) +func NewClient(ac *Config) (*API, error) { + return New(ac) +} + +// NewAPI returns a new Circonus API (alias for New) func NewAPI(ac *Config) (*API, error) { + return New(ac) +} + +// New returns a new Circonus API +func New(ac *Config) (*API, error) { if ac == nil { return nil, errors.New("Invalid API configuration (nil)") @@ -88,6 +137,8 @@ func NewAPI(ac *Config) (*API, error) { app = defaultAPIApp } + acctID := TokenAccountIDType(ac.TokenAccountID) + au := string(ac.URL) if au == "" { au = defaultAPIURL @@ -97,6 +148,7 @@ func NewAPI(ac *Config) (*API, error) { au = fmt.Sprintf("https://%s/v2", ac.URL) } if last := len(au) - 1; last >= 0 && au[last] == '/' { + // strip off trailing '/' au = au[:last] } apiURL, err := url.Parse(au) @@ -104,7 +156,17 @@ func NewAPI(ac *Config) (*API, error) { return nil, err } - a := &API{apiURL, key, app, ac.Debug, ac.Log} + a := &API{ + apiURL: apiURL, + key: key, + app: app, + accountID: acctID, + caCert: ac.CACert, + tlsConfig: ac.TLSConfig, + Debug: ac.Debug, + Log: ac.Log, + useExponentialBackoff: false, + } a.Debug = ac.Debug a.Log = ac.Log @@ -118,51 +180,111 @@ func NewAPI(ac *Config) (*API, error) { return a, nil } +// EnableExponentialBackoff enables use of exponential backoff for next API call(s) +// and use exponential backoff for all API calls until exponential backoff is disabled. +func (a *API) EnableExponentialBackoff() { + a.useExponentialBackoffmu.Lock() + a.useExponentialBackoff = true + a.useExponentialBackoffmu.Unlock() +} + +// DisableExponentialBackoff disables use of exponential backoff. If a request using +// exponential backoff is currently running, it will stop using exponential backoff +// on its next iteration (if needed). +func (a *API) DisableExponentialBackoff() { + a.useExponentialBackoffmu.Lock() + a.useExponentialBackoff = false + a.useExponentialBackoffmu.Unlock() +} + // Get API request func (a *API) Get(reqPath string) ([]byte, error) { - return a.apiCall("GET", reqPath, nil) + return a.apiRequest("GET", reqPath, nil) } // Delete API request func (a *API) Delete(reqPath string) ([]byte, error) { - return a.apiCall("DELETE", reqPath, nil) + return a.apiRequest("DELETE", reqPath, nil) } // Post API request func (a *API) Post(reqPath string, data []byte) ([]byte, error) { - return a.apiCall("POST", reqPath, data) + return a.apiRequest("POST", reqPath, data) } // Put API request func (a *API) Put(reqPath string, data []byte) ([]byte, error) { - return a.apiCall("PUT", reqPath, data) + return a.apiRequest("PUT", reqPath, data) +} + +func backoff(interval uint) float64 { + return math.Floor(((float64(interval) * (1 + rand.Float64())) / 2) + .5) +} + +// apiRequest manages retry strategy for exponential backoffs +func (a *API) apiRequest(reqMethod string, reqPath string, data []byte) ([]byte, error) { + backoffs := []uint{2, 4, 8, 16, 32} + attempts := 0 + success := false + + var result []byte + var err error + + for !success { + result, err = a.apiCall(reqMethod, reqPath, data) + if err == nil { + success = true + } + + // break and return error if not using exponential backoff + if err != nil { + if !a.useExponentialBackoff { + break + } + if strings.Contains(err.Error(), "code 403") { + break + } + } + + if !success { + var wait float64 + if attempts >= len(backoffs) { + wait = backoff(backoffs[len(backoffs)-1]) + } else { + wait = backoff(backoffs[attempts]) + } + attempts++ + a.Log.Printf("[WARN] API call failed %s, retrying in %d seconds.\n", err.Error(), uint(wait)) + time.Sleep(time.Duration(wait) * time.Second) + } + } + + return result, err } // apiCall call Circonus API func (a *API) apiCall(reqMethod string, reqPath string, data []byte) ([]byte, error) { - dataReader := bytes.NewReader(data) reqURL := a.apiURL.String() + if reqPath == "" { + return nil, errors.New("Invalid URL path") + } if reqPath[:1] != "/" { reqURL += "/" } - if reqPath[:3] == "/v2" { - reqURL += reqPath[3:len(reqPath)] + if len(reqPath) >= 3 && reqPath[:3] == "/v2" { + reqURL += reqPath[3:] } else { reqURL += reqPath } - req, err := retryablehttp.NewRequest(reqMethod, reqURL, dataReader) - if err != nil { - return nil, fmt.Errorf("[ERROR] creating API request: %s %+v", reqURL, err) - } - req.Header.Add("Accept", "application/json") - req.Header.Add("X-Circonus-Auth-Token", string(a.key)) - req.Header.Add("X-Circonus-App-Name", string(a.app)) - // keep last HTTP error in the event of retry failure var lastHTTPError error - retryPolicy := func(resp *http.Response, err error) (bool, error) { + retryPolicy := func(ctx context.Context, resp *http.Response, err error) (bool, error) { + if ctxErr := ctx.Err(); ctxErr != nil { + return false, ctxErr + } + if err != nil { lastHTTPError = err return true, err @@ -172,24 +294,83 @@ func (a *API) apiCall(reqMethod string, reqPath string, data []byte) ([]byte, er // errors and may relate to outages on the server side. This will catch // invalid response codes as well, like 0 and 999. // Retry on 429 (rate limit) as well. - if resp.StatusCode == 0 || resp.StatusCode >= 500 || resp.StatusCode == 429 { + if resp.StatusCode == 0 || // wtf?! + resp.StatusCode >= 500 || // rutroh + resp.StatusCode == 429 { // rate limit body, readErr := ioutil.ReadAll(resp.Body) if readErr != nil { - lastHTTPError = fmt.Errorf("- last HTTP error: %d %+v", resp.StatusCode, readErr) + lastHTTPError = fmt.Errorf("- response: %d %s", resp.StatusCode, readErr.Error()) } else { - lastHTTPError = fmt.Errorf("- last HTTP error: %d %s", resp.StatusCode, string(body)) + lastHTTPError = fmt.Errorf("- response: %d %s", resp.StatusCode, strings.TrimSpace(string(body))) } return true, nil } return false, nil } + dataReader := bytes.NewReader(data) + + req, err := retryablehttp.NewRequest(reqMethod, reqURL, dataReader) + if err != nil { + return nil, fmt.Errorf("[ERROR] creating API request: %s %+v", reqURL, err) + } + req.Header.Add("Accept", "application/json") + req.Header.Add("X-Circonus-Auth-Token", string(a.key)) + req.Header.Add("X-Circonus-App-Name", string(a.app)) + if string(a.accountID) != "" { + req.Header.Add("X-Circonus-Account-ID", string(a.accountID)) + } + client := retryablehttp.NewClient() - client.RetryWaitMin = minRetryWait - client.RetryWaitMax = maxRetryWait - client.RetryMax = maxRetries + if a.apiURL.Scheme == "https" { + var tlscfg *tls.Config + if a.tlsConfig != nil { // preference full custom tls config + tlscfg = a.tlsConfig + } else if a.caCert != nil { + tlscfg = &tls.Config{RootCAs: a.caCert} + } + client.HTTPClient.Transport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlscfg, + DisableKeepAlives: true, + MaxIdleConnsPerHost: -1, + DisableCompression: true, + } + } else { + client.HTTPClient.Transport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + DisableKeepAlives: true, + MaxIdleConnsPerHost: -1, + DisableCompression: true, + } + } + + a.useExponentialBackoffmu.Lock() + eb := a.useExponentialBackoff + a.useExponentialBackoffmu.Unlock() + + if eb { + // limit to one request if using exponential backoff + client.RetryWaitMin = 1 + client.RetryWaitMax = 2 + client.RetryMax = 0 + } else { + client.RetryWaitMin = minRetryWait + client.RetryWaitMax = maxRetryWait + client.RetryMax = maxRetries + } + // retryablehttp only groks log or no log - // but, outputs everything as [DEBUG] messages if a.Debug { client.Logger = a.Log } else { diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go index 76dfebeca..bc444e317 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go @@ -2,51 +2,70 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// Broker API support - Fetch and Search +// See: https://login.circonus.com/resources/api/calls/broker + package api import ( "encoding/json" "fmt" - "strings" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" ) -// BrokerDetail instance attributes +// BrokerDetail defines instance attributes type BrokerDetail struct { - CN string `json:"cn"` - ExternalHost string `json:"external_host"` - ExternalPort int `json:"external_port"` - IP string `json:"ipaddress"` - MinVer int `json:"minimum_version_required"` - Modules []string `json:"modules"` - Port int `json:"port"` - Skew string `json:"skew"` - Status string `json:"status"` - Version int `json:"version"` + ClusterIP *string `json:"cluster_ip"` // string or null + CN string `json:"cn"` // string + ExternalHost *string `json:"external_host"` // string or null + ExternalPort uint16 `json:"external_port"` // uint16 + IP *string `json:"ipaddress"` // string or null + MinVer uint `json:"minimum_version_required"` // uint + Modules []string `json:"modules"` // [] len >= 0 + Port *uint16 `json:"port"` // uint16 or null + Skew *string `json:"skew"` // BUG doc: floating point number, api object: string or null + Status string `json:"status"` // string + Version *uint `json:"version"` // uint or null } -// Broker definition +// Broker defines a broker. See https://login.circonus.com/resources/api/calls/broker for more information. type Broker struct { - Cid string `json:"_cid"` - Details []BrokerDetail `json:"_details"` - Latitude string `json:"_latitude"` - Longitude string `json:"_longitude"` - Name string `json:"_name"` - Tags []string `json:"_tags"` - Type string `json:"_type"` + CID string `json:"_cid"` // string + Details []BrokerDetail `json:"_details"` // [] len >= 1 + Latitude *string `json:"_latitude"` // string or null + Longitude *string `json:"_longitude"` // string or null + Name string `json:"_name"` // string + Tags []string `json:"_tags"` // [] len >= 0 + Type string `json:"_type"` // string } -// FetchBrokerByID fetch a broker configuration by [group]id -func (a *API) FetchBrokerByID(id IDType) (*Broker, error) { - cid := CIDType(fmt.Sprintf("/broker/%d", id)) - return a.FetchBrokerByCID(cid) -} +// FetchBroker retrieves broker with passed cid. +func (a *API) FetchBroker(cid CIDType) (*Broker, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid broker CID [none]") + } -// FetchBrokerByCID fetch a broker configuration by cid -func (a *API) FetchBrokerByCID(cid CIDType) (*Broker, error) { - result, err := a.Get(string(cid)) + brokerCID := string(*cid) + + matched, err := regexp.MatchString(config.BrokerCIDRegex, brokerCID) if err != nil { return nil, err } + if !matched { + return nil, fmt.Errorf("Invalid broker CID [%s]", brokerCID) + } + + result, err := a.Get(brokerCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch broker, received JSON: %s", string(result)) + } response := new(Broker) if err := json.Unmarshal(result, &response); err != nil { @@ -57,32 +76,9 @@ func (a *API) FetchBrokerByCID(cid CIDType) (*Broker, error) { } -// FetchBrokerListByTag return list of brokers with a specific tag -func (a *API) FetchBrokerListByTag(searchTag TagType) ([]Broker, error) { - query := SearchQueryType(fmt.Sprintf("f__tags_has=%s", strings.Replace(strings.Join(searchTag, ","), ",", "&f__tags_has=", -1))) - return a.BrokerSearch(query) -} - -// BrokerSearch return a list of brokers matching a query/filter -func (a *API) BrokerSearch(query SearchQueryType) ([]Broker, error) { - queryURL := fmt.Sprintf("/broker?%s", string(query)) - - result, err := a.Get(queryURL) - if err != nil { - return nil, err - } - - var brokers []Broker - if err := json.Unmarshal(result, &brokers); err != nil { - return nil, err - } - - return brokers, nil -} - -// FetchBrokerList return list of all brokers available to the api token/app -func (a *API) FetchBrokerList() ([]Broker, error) { - result, err := a.Get("/broker") +// FetchBrokers returns all brokers available to the API Token. +func (a *API) FetchBrokers() (*[]Broker, error) { + result, err := a.Get(config.BrokerPrefix) if err != nil { return nil, err } @@ -92,5 +88,45 @@ func (a *API) FetchBrokerList() ([]Broker, error) { return nil, err } - return response, nil + return &response, nil +} + +// SearchBrokers returns brokers matching the specified search +// query and/or filter. If nil is passed for both parameters +// all brokers will be returned. +func (a *API) SearchBrokers(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Broker, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchBrokers() + } + + reqURL := url.URL{ + Path: config.BrokerPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var brokers []Broker + if err := json.Unmarshal(result, &brokers); err != nil { + return nil, err + } + + return &brokers, nil } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/check.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/check.go index 0887caf3d..047d71935 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/check.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/check.go @@ -2,42 +2,58 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// Check API support - Fetch and Search +// See: https://login.circonus.com/resources/api/calls/check +// Notes: checks do not directly support create, update, and delete - see check bundle. + package api import ( "encoding/json" "fmt" "net/url" - "strings" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" ) -// CheckDetails is an arbitrary json structure, we would only care about submission_url -type CheckDetails struct { - SubmissionURL string `json:"submission_url"` -} +// CheckDetails contains [undocumented] check type specific information +type CheckDetails map[config.Key]string -// Check definition +// Check defines a check. See https://login.circonus.com/resources/api/calls/check for more information. type Check struct { - Cid string `json:"_cid"` - Active bool `json:"_active"` - BrokerCid string `json:"_broker"` - CheckBundleCid string `json:"_check_bundle"` - CheckUUID string `json:"_check_uuid"` - Details CheckDetails `json:"_details"` + Active bool `json:"_active"` // bool + BrokerCID string `json:"_broker"` // string + CheckBundleCID string `json:"_check_bundle"` // string + CheckUUID string `json:"_check_uuid"` // string + CID string `json:"_cid"` // string + Details CheckDetails `json:"_details"` // NOTE contents of details are check type specific, map len >= 0 } -// FetchCheckByID fetch a check configuration by id -func (a *API) FetchCheckByID(id IDType) (*Check, error) { - cid := CIDType(fmt.Sprintf("/check/%d", int(id))) - return a.FetchCheckByCID(cid) -} +// FetchCheck retrieves check with passed cid. +func (a *API) FetchCheck(cid CIDType) (*Check, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid check CID [none]") + } -// FetchCheckByCID fetch a check configuration by cid -func (a *API) FetchCheckByCID(cid CIDType) (*Check, error) { - result, err := a.Get(string(cid)) + checkCID := string(*cid) + + matched, err := regexp.MatchString(config.CheckCIDRegex, checkCID) if err != nil { return nil, err } + if !matched { + return nil, fmt.Errorf("Invalid check CID [%s]", checkCID) + } + + result, err := a.Get(checkCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch check, received JSON: %s", string(result)) + } check := new(Check) if err := json.Unmarshal(result, check); err != nil { @@ -47,62 +63,49 @@ func (a *API) FetchCheckByCID(cid CIDType) (*Check, error) { return check, nil } -// FetchCheckBySubmissionURL fetch a check configuration by submission_url -func (a *API) FetchCheckBySubmissionURL(submissionURL URLType) (*Check, error) { - - u, err := url.Parse(string(submissionURL)) +// FetchChecks retrieves all checks available to the API Token. +func (a *API) FetchChecks() (*[]Check, error) { + result, err := a.Get(config.CheckPrefix) if err != nil { return nil, err } - // valid trap url: scheme://host[:port]/module/httptrap/UUID/secret - - // does it smell like a valid trap url path - if !strings.Contains(u.Path, "/module/httptrap/") { - return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', unrecognized path", submissionURL) - } - - // extract uuid - pathParts := strings.Split(strings.Replace(u.Path, "/module/httptrap/", "", 1), "/") - if len(pathParts) != 2 { - return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', UUID not where expected", submissionURL) - } - uuid := pathParts[0] - - filter := SearchFilterType(fmt.Sprintf("f__check_uuid=%s", uuid)) - - checks, err := a.CheckFilterSearch(filter) - if err != nil { + var checks []Check + if err := json.Unmarshal(result, &checks); err != nil { return nil, err } - if len(checks) == 0 { - return nil, fmt.Errorf("[ERROR] No checks found with UUID %s", uuid) + return &checks, nil +} + +// SearchChecks returns checks matching the specified search query +// and/or filter. If nil is passed for both parameters all checks +// will be returned. +func (a *API) SearchChecks(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Check, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) } - numActive := 0 - checkID := -1 - - for idx, check := range checks { - if check.Active { - numActive++ - checkID = idx + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } } } - if numActive > 1 { - return nil, fmt.Errorf("[ERROR] Multiple checks with same UUID %s", uuid) + if q.Encode() == "" { + return a.FetchChecks() } - return &checks[checkID], nil + reqURL := url.URL{ + Path: config.CheckPrefix, + RawQuery: q.Encode(), + } -} - -// CheckSearch returns a list of checks matching a search query -func (a *API) CheckSearch(query SearchQueryType) ([]Check, error) { - queryURL := fmt.Sprintf("/check?search=%s", string(query)) - - result, err := a.Get(queryURL) + result, err := a.Get(reqURL.String()) if err != nil { return nil, err } @@ -112,22 +115,5 @@ func (a *API) CheckSearch(query SearchQueryType) ([]Check, error) { return nil, err } - return checks, nil -} - -// CheckFilterSearch returns a list of checks matching a filter -func (a *API) CheckFilterSearch(filter SearchFilterType) ([]Check, error) { - filterURL := fmt.Sprintf("/check?%s", string(filter)) - - result, err := a.Get(filterURL) - if err != nil { - return nil, err - } - - var checks []Check - if err := json.Unmarshal(result, &checks); err != nil { - return nil, err - } - - return checks, nil + return &checks, nil } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle.go new file mode 100644 index 000000000..c202853c2 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle.go @@ -0,0 +1,255 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check bundle API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/check_bundle + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// CheckBundleMetric individual metric configuration +type CheckBundleMetric struct { + Name string `json:"name"` // string + Result *string `json:"result,omitempty"` // string or null, NOTE not settable - return/information value only + Status string `json:"status,omitempty"` // string + Tags []string `json:"tags"` // [] len >= 0 + Type string `json:"type"` // string + Units *string `json:"units,omitempty"` // string or null + +} + +// CheckBundleConfig contains the check type specific configuration settings +// as k/v pairs (see https://login.circonus.com/resources/api/calls/check_bundle +// for the specific settings available for each distinct check type) +type CheckBundleConfig map[config.Key]string + +// CheckBundle defines a check bundle. See https://login.circonus.com/resources/api/calls/check_bundle for more information. +type CheckBundle struct { + Brokers []string `json:"brokers"` // [] len >= 0 + Checks []string `json:"_checks,omitempty"` // [] len >= 0 + CheckUUIDs []string `json:"_check_uuids,omitempty"` // [] len >= 0 + CID string `json:"_cid,omitempty"` // string + Config CheckBundleConfig `json:"config"` // NOTE contents of config are check type specific, map len >= 0 + Created uint `json:"_created,omitempty"` // uint + DisplayName string `json:"display_name"` // string + LastModifedBy string `json:"_last_modifed_by,omitempty"` // string + LastModified uint `json:"_last_modified,omitempty"` // uint + MetricLimit int `json:"metric_limit,omitempty"` // int + Metrics []CheckBundleMetric `json:"metrics"` // [] >= 0 + Notes *string `json:"notes,omitempty"` // string or null + Period uint `json:"period,omitempty"` // uint + ReverseConnectURLs []string `json:"_reverse_connection_urls,omitempty"` // [] len >= 0 + Status string `json:"status,omitempty"` // string + Tags []string `json:"tags,omitempty"` // [] len >= 0 + Target string `json:"target"` // string + Timeout float32 `json:"timeout,omitempty"` // float32 + Type string `json:"type"` // string +} + +// NewCheckBundle returns new CheckBundle (with defaults, if applicable) +func NewCheckBundle() *CheckBundle { + return &CheckBundle{ + Config: make(CheckBundleConfig, config.DefaultConfigOptionsSize), + MetricLimit: config.DefaultCheckBundleMetricLimit, + Period: config.DefaultCheckBundlePeriod, + Timeout: config.DefaultCheckBundleTimeout, + Status: config.DefaultCheckBundleStatus, + } +} + +// FetchCheckBundle retrieves check bundle with passed cid. +func (a *API) FetchCheckBundle(cid CIDType) (*CheckBundle, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid check bundle CID [none]") + } + + bundleCID := string(*cid) + + matched, err := regexp.MatchString(config.CheckBundleCIDRegex, bundleCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid check bundle CID [%v]", bundleCID) + } + + result, err := a.Get(bundleCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch check bundle, received JSON: %s", string(result)) + } + + checkBundle := &CheckBundle{} + if err := json.Unmarshal(result, checkBundle); err != nil { + return nil, err + } + + return checkBundle, nil +} + +// FetchCheckBundles retrieves all check bundles available to the API Token. +func (a *API) FetchCheckBundles() (*[]CheckBundle, error) { + result, err := a.Get(config.CheckBundlePrefix) + if err != nil { + return nil, err + } + + var checkBundles []CheckBundle + if err := json.Unmarshal(result, &checkBundles); err != nil { + return nil, err + } + + return &checkBundles, nil +} + +// UpdateCheckBundle updates passed check bundle. +func (a *API) UpdateCheckBundle(cfg *CheckBundle) (*CheckBundle, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid check bundle config [nil]") + } + + bundleCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.CheckBundleCIDRegex, bundleCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid check bundle CID [%s]", bundleCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update check bundle, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(bundleCID, jsonCfg) + if err != nil { + return nil, err + } + + checkBundle := &CheckBundle{} + if err := json.Unmarshal(result, checkBundle); err != nil { + return nil, err + } + + return checkBundle, nil +} + +// CreateCheckBundle creates a new check bundle (check). +func (a *API) CreateCheckBundle(cfg *CheckBundle) (*CheckBundle, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid check bundle config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create check bundle, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.CheckBundlePrefix, jsonCfg) + if err != nil { + return nil, err + } + + checkBundle := &CheckBundle{} + if err := json.Unmarshal(result, checkBundle); err != nil { + return nil, err + } + + return checkBundle, nil +} + +// DeleteCheckBundle deletes passed check bundle. +func (a *API) DeleteCheckBundle(cfg *CheckBundle) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid check bundle config [nil]") + } + return a.DeleteCheckBundleByCID(CIDType(&cfg.CID)) +} + +// DeleteCheckBundleByCID deletes check bundle with passed cid. +func (a *API) DeleteCheckBundleByCID(cid CIDType) (bool, error) { + + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid check bundle CID [none]") + } + + bundleCID := string(*cid) + + matched, err := regexp.MatchString(config.CheckBundleCIDRegex, bundleCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid check bundle CID [%v]", bundleCID) + } + + _, err = a.Delete(bundleCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchCheckBundles returns check bundles matching the specified +// search query and/or filter. If nil is passed for both parameters +// all check bundles will be returned. +func (a *API) SearchCheckBundles(searchCriteria *SearchQueryType, filterCriteria *map[string][]string) (*[]CheckBundle, error) { + + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchCheckBundles() + } + + reqURL := url.URL{ + Path: config.CheckBundlePrefix, + RawQuery: q.Encode(), + } + + resp, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var results []CheckBundle + if err := json.Unmarshal(resp, &results); err != nil { + return nil, err + } + + return &results, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle_metrics.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle_metrics.go new file mode 100644 index 000000000..817c7b891 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle_metrics.go @@ -0,0 +1,95 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// CheckBundleMetrics API support - Fetch, Create*, Update, and Delete** +// See: https://login.circonus.com/resources/api/calls/check_bundle_metrics +// * : create metrics by adding to array with a status of 'active' +// ** : delete (distable collection of) metrics by changing status from 'active' to 'available' + +package api + +import ( + "encoding/json" + "fmt" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// CheckBundleMetrics defines metrics for a specific check bundle. See https://login.circonus.com/resources/api/calls/check_bundle_metrics for more information. +type CheckBundleMetrics struct { + CID string `json:"_cid,omitempty"` // string + Metrics []CheckBundleMetric `json:"metrics"` // See check_bundle.go for CheckBundleMetric definition +} + +// FetchCheckBundleMetrics retrieves metrics for the check bundle with passed cid. +func (a *API) FetchCheckBundleMetrics(cid CIDType) (*CheckBundleMetrics, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid check bundle metrics CID [none]") + } + + metricsCID := string(*cid) + + matched, err := regexp.MatchString(config.CheckBundleMetricsCIDRegex, metricsCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid check bundle metrics CID [%s]", metricsCID) + } + + result, err := a.Get(metricsCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch check bundle metrics, received JSON: %s", string(result)) + } + + metrics := &CheckBundleMetrics{} + if err := json.Unmarshal(result, metrics); err != nil { + return nil, err + } + + return metrics, nil +} + +// UpdateCheckBundleMetrics updates passed metrics. +func (a *API) UpdateCheckBundleMetrics(cfg *CheckBundleMetrics) (*CheckBundleMetrics, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid check bundle metrics config [nil]") + } + + metricsCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.CheckBundleMetricsCIDRegex, metricsCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid check bundle metrics CID [%s]", metricsCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update check bundle metrics, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(metricsCID, jsonCfg) + if err != nil { + return nil, err + } + + metrics := &CheckBundleMetrics{} + if err := json.Unmarshal(result, metrics); err != nil { + return nil, err + } + + return metrics, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/checkbundle.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/checkbundle.go deleted file mode 100644 index e5faae0fb..000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/checkbundle.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2016 Circonus, Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package api - -import ( - "encoding/json" - "fmt" -) - -// CheckBundleConfig configuration specific to check type -type CheckBundleConfig struct { - AsyncMetrics bool `json:"async_metrics"` - Secret string `json:"secret"` - SubmissionURL string `json:"submission_url"` - ReverseSecret string `json:"reverse:secret_key"` - HTTPVersion string `json:"http_version,omitempty"` - Method string `json:"method,omitempty"` - Payload string `json:"payload,omitempty"` - Port string `json:"port,omitempty"` - ReadLimit string `json:"read_limit,omitempty"` - URL string `json:"url,omitempty"` -} - -// CheckBundleMetric individual metric configuration -type CheckBundleMetric struct { - Name string `json:"name"` - Type string `json:"type"` - Units string `json:"units"` - Status string `json:"status"` - Tags []string `json:"tags"` -} - -// CheckBundle definition -type CheckBundle struct { - CheckUUIDs []string `json:"_check_uuids,omitempty"` - Checks []string `json:"_checks,omitempty"` - Cid string `json:"_cid,omitempty"` - Created int `json:"_created,omitempty"` - LastModified int `json:"_last_modified,omitempty"` - LastModifedBy string `json:"_last_modifed_by,omitempty"` - ReverseConnectURLs []string `json:"_reverse_connection_urls"` - Brokers []string `json:"brokers"` - Config CheckBundleConfig `json:"config"` - DisplayName string `json:"display_name"` - Metrics []CheckBundleMetric `json:"metrics"` - MetricLimit int `json:"metric_limit"` - Notes string `json:"notes"` - Period int `json:"period"` - Status string `json:"status"` - Tags []string `json:"tags"` - Target string `json:"target"` - Timeout int `json:"timeout"` - Type string `json:"type"` -} - -// FetchCheckBundleByID fetch a check bundle configuration by id -func (a *API) FetchCheckBundleByID(id IDType) (*CheckBundle, error) { - cid := CIDType(fmt.Sprintf("/check_bundle/%d", id)) - return a.FetchCheckBundleByCID(cid) -} - -// FetchCheckBundleByCID fetch a check bundle configuration by id -func (a *API) FetchCheckBundleByCID(cid CIDType) (*CheckBundle, error) { - result, err := a.Get(string(cid)) - if err != nil { - return nil, err - } - - checkBundle := &CheckBundle{} - if err := json.Unmarshal(result, checkBundle); err != nil { - return nil, err - } - - return checkBundle, nil -} - -// CheckBundleSearch returns list of check bundles matching a search query -// - a search query not a filter (see: https://login.circonus.com/resources/api#searching) -func (a *API) CheckBundleSearch(searchCriteria SearchQueryType) ([]CheckBundle, error) { - apiPath := fmt.Sprintf("/check_bundle?search=%s", searchCriteria) - - response, err := a.Get(apiPath) - if err != nil { - return nil, fmt.Errorf("[ERROR] API call error %+v", err) - } - - var results []CheckBundle - if err := json.Unmarshal(response, &results); err != nil { - return nil, err - } - - return results, nil -} - -// CreateCheckBundle create a new check bundle (check) -func (a *API) CreateCheckBundle(config CheckBundle) (*CheckBundle, error) { - cfgJSON, err := json.Marshal(config) - if err != nil { - return nil, err - } - - response, err := a.Post("/check_bundle", cfgJSON) - if err != nil { - return nil, err - } - - checkBundle := &CheckBundle{} - if err := json.Unmarshal(response, checkBundle); err != nil { - return nil, err - } - - return checkBundle, nil -} - -// UpdateCheckBundle updates a check bundle configuration -func (a *API) UpdateCheckBundle(config *CheckBundle) (*CheckBundle, error) { - if a.Debug { - a.Log.Printf("[DEBUG] Updating check bundle.") - } - - cfgJSON, err := json.Marshal(config) - if err != nil { - return nil, err - } - - response, err := a.Put(config.Cid, cfgJSON) - if err != nil { - return nil, err - } - - checkBundle := &CheckBundle{} - if err := json.Unmarshal(response, checkBundle); err != nil { - return nil, err - } - - return checkBundle, nil -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/config/consts.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/config/consts.go new file mode 100644 index 000000000..bbca43d03 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/config/consts.go @@ -0,0 +1,538 @@ +package config + +// Key for CheckBundleConfig options and CheckDetails info +type Key string + +// Constants per type as defined in +// https://login.circonus.com/resources/api/calls/check_bundle +const ( + // + // default settings for api.NewCheckBundle() + // + DefaultCheckBundleMetricLimit = -1 // unlimited + DefaultCheckBundleStatus = "active" + DefaultCheckBundlePeriod = 60 + DefaultCheckBundleTimeout = 10 + DefaultConfigOptionsSize = 20 + + // + // common (apply to more than one check type) + // + AsyncMetrics = Key("asynch_metrics") + AuthMethod = Key("auth_method") + AuthPassword = Key("auth_password") + AuthUser = Key("auth_user") + BaseURL = Key("base_url") + CAChain = Key("ca_chain") + CertFile = Key("certificate_file") + Ciphers = Key("ciphers") + Command = Key("command") + DSN = Key("dsn") + HeaderPrefix = Key("header_") + HTTPVersion = Key("http_version") + KeyFile = Key("key_file") + Method = Key("method") + Password = Key("password") + Payload = Key("payload") + Port = Key("port") + Query = Key("query") + ReadLimit = Key("read_limit") + Secret = Key("secret") + SQL = Key("sql") + URI = Key("uri") + URL = Key("url") + Username = Key("username") + UseSSL = Key("use_ssl") + User = Key("user") + SASLAuthentication = Key("sasl_authentication") + SASLUser = Key("sasl_user") + SecurityLevel = Key("security_level") + Version = Key("version") + AppendColumnName = Key("append_column_name") + Database = Key("database") + JDBCPrefix = Key("jdbc_") + + // + // CAQL check + // + // Common items: + // Query + + // + // Circonus Windows Agent + // + // Common items: + // AuthPassword + // AuthUser + // Port + // URL + Calculated = Key("calculated") + Category = Key("category") + + // + // Cloudwatch + // + // Notes: + // DimPrefix is special because the actual key is dynamic and matches: `dim_(.+)` + // Common items: + // URL + // Version + APIKey = Key("api_key") + APISecret = Key("api_secret") + CloudwatchMetrics = Key("cloudwatch_metrics") + DimPrefix = Key("dim_") + Granularity = Key("granularity") + Namespace = Key("namespace") + Statistics = Key("statistics") + + // + // Collectd + // + // Common items: + // AsyncMetrics + // Username + // Secret + // SecurityLevel + + // + // Composite + // + CompositeMetricName = Key("composite_metric_name") + Formula = Key("formula") + + // + // DHCP + // + HardwareAddress = Key("hardware_addr") + HostIP = Key("host_ip") + RequestType = Key("request_type") + SendPort = Key("send_port") + + // + // DNS + // + // Common items: + // Query + CType = Key("ctype") + Nameserver = Key("nameserver") + RType = Key("rtype") + + // + // EC Console + // + // Common items: + // Command + // Port + // SASLAuthentication + // SASLUser + Objects = Key("objects") + XPath = Key("xpath") + + // + // Elastic Search + // + // Common items: + // Port + // URL + + // + // Ganglia + // + // Common items: + // AsyncMetrics + + // + // Google Analytics + // + // Common items: + // Password + // Username + OAuthToken = Key("oauth_token") + OAuthTokenSecret = Key("oauth_token_secret") + OAuthVersion = Key("oauth_version") + TableID = Key("table_id") + UseOAuth = Key("use_oauth") + + // + // HA Proxy + // + // Common items: + // AuthPassword + // AuthUser + // Port + // UseSSL + Host = Key("host") + Select = Key("select") + + // + // HTTP + // + // Notes: + // HeaderPrefix is special because the actual key is dynamic and matches: `header_(\S+)` + // Common items: + // AuthMethod + // AuthPassword + // AuthUser + // CAChain + // CertFile + // Ciphers + // KeyFile + // URL + // HeaderPrefix + // HTTPVersion + // Method + // Payload + // ReadLimit + Body = Key("body") + Code = Key("code") + Extract = Key("extract") + Redirects = Key("redirects") + + // + // HTTPTRAP + // + // Common items: + // AsyncMetrics + // Secret + + // + // IMAP + // + // Common items: + // AuthPassword + // AuthUser + // CAChain + // CertFile + // Ciphers + // KeyFile + // Port + // UseSSL + Fetch = Key("fetch") + Folder = Key("folder") + HeaderHost = Key("header_Host") + Search = Key("search") + + // + // JMX + // + // Common items: + // Password + // Port + // URI + // Username + MbeanDomains = Key("mbean_domains") + + // + // JSON + // + // Common items: + // AuthMethod + // AuthPassword + // AuthUser + // CAChain + // CertFile + // Ciphers + // HeaderPrefix + // HTTPVersion + // KeyFile + // Method + // Payload + // Port + // ReadLimit + // URL + + // + // Keynote + // + // Notes: + // SlotAliasPrefix is special because the actual key is dynamic and matches: `slot_alias_(\d+)` + // Common items: + // APIKey + // BaseURL + PageComponent = Key("pagecomponent") + SlotAliasPrefix = Key("slot_alias_") + SlotIDList = Key("slot_id_list") + TransPageList = Key("transpagelist") + + // + // Keynote Pulse + // + // Common items: + // BaseURL + // Password + // User + AgreementID = Key("agreement_id") + + // + // LDAP + // + // Common items: + // Password + // Port + AuthType = Key("authtype") + DN = Key("dn") + SecurityPrincipal = Key("security_principal") + + // + // Memcached + // + // Common items: + // Port + + // + // MongoDB + // + // Common items: + // Command + // Password + // Port + // Username + DBName = Key("dbname") + + // + // Munin + // + // Note: no configuration options + + // + // MySQL + // + // Common items: + // DSN + // SQL + + // + // Newrelic rpm + // + // Common items: + // APIKey + AccountID = Key("acct_id") + ApplicationID = Key("application_id") + LicenseKey = Key("license_key") + + // + // Nginx + // + // Common items: + // CAChain + // CertFile + // Ciphers + // KeyFile + // URL + + // + // NRPE + // + // Common items: + // Command + // Port + // UseSSL + AppendUnits = Key("append_uom") + + // + // NTP + // + // Common items: + // Port + Control = Key("control") + + // + // Oracle + // + // Notes: + // JDBCPrefix is special because the actual key is dynamic and matches: `jdbc_(\S+)` + // Common items: + // AppendColumnName + // Database + // JDBCPrefix + // Password + // Port + // SQL + // User + + // + // Ping ICMP + // + AvailNeeded = Key("avail_needed") + Count = Key("count") + Interval = Key("interval") + + // + // PostgreSQL + // + // Common items: + // DSN + // SQL + + // + // Redis + // + // Common items: + // Command + // Password + // Port + DBIndex = Key("dbindex") + + // + // Resmon + // + // Notes: + // HeaderPrefix is special because the actual key is dynamic and matches: `header_(\S+)` + // Common items: + // AuthMethod + // AuthPassword + // AuthUser + // CAChain + // CertFile + // Ciphers + // HeaderPrefix + // HTTPVersion + // KeyFile + // Method + // Payload + // Port + // ReadLimit + // URL + + // + // SMTP + // + // Common items: + // Payload + // Port + // SASLAuthentication + // SASLUser + EHLO = Key("ehlo") + From = Key("from") + SASLAuthID = Key("sasl_auth_id") + SASLPassword = Key("sasl_password") + StartTLS = Key("starttls") + To = Key("to") + + // + // SNMP + // + // Notes: + // OIDPrefix is special because the actual key is dynamic and matches: `oid_(.+)` + // TypePrefix is special because the actual key is dynamic and matches: `type_(.+)` + // Common items: + // Port + // SecurityLevel + // Version + AuthPassphrase = Key("auth_passphrase") + AuthProtocol = Key("auth_protocol") + Community = Key("community") + ContextEngine = Key("context_engine") + ContextName = Key("context_name") + OIDPrefix = Key("oid_") + PrivacyPassphrase = Key("privacy_passphrase") + PrivacyProtocol = Key("privacy_protocol") + SecurityEngine = Key("security_engine") + SecurityName = Key("security_name") + SeparateQueries = Key("separate_queries") + TypePrefix = Key("type_") + + // + // SQLServer + // + // Notes: + // JDBCPrefix is special because the actual key is dynamic and matches: `jdbc_(\S+)` + // Common items: + // AppendColumnName + // Database + // JDBCPrefix + // Password + // Port + // SQL + // User + + // + // SSH v2 + // + // Common items: + // Port + MethodCompCS = Key("method_comp_cs") + MethodCompSC = Key("method_comp_sc") + MethodCryptCS = Key("method_crypt_cs") + MethodCryptSC = Key("method_crypt_sc") + MethodHostKey = Key("method_hostkey") + MethodKeyExchange = Key("method_kex") + MethodMacCS = Key("method_mac_cs") + MethodMacSC = Key("method_mac_sc") + + // + // StatsD + // + // Note: no configuration options + + // + // TCP + // + // Common items: + // CAChain + // CertFile + // Ciphers + // KeyFile + // Port + // UseSSL + BannerMatch = Key("banner_match") + + // + // Varnish + // + // Note: no configuration options + + // + // reserved - config option(s) can't actually be set - here for r/o access + // + ReverseSecretKey = Key("reverse:secret_key") + SubmissionURL = Key("submission_url") + + // + // Endpoint prefix & cid regex + // + DefaultCIDRegex = "[0-9]+" + DefaultUUIDRegex = "[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12}" + AccountPrefix = "/account" + AccountCIDRegex = "^(" + AccountPrefix + "/(" + DefaultCIDRegex + "|current))$" + AcknowledgementPrefix = "/acknowledgement" + AcknowledgementCIDRegex = "^(" + AcknowledgementPrefix + "/(" + DefaultCIDRegex + "))$" + AlertPrefix = "/alert" + AlertCIDRegex = "^(" + AlertPrefix + "/(" + DefaultCIDRegex + "))$" + AnnotationPrefix = "/annotation" + AnnotationCIDRegex = "^(" + AnnotationPrefix + "/(" + DefaultCIDRegex + "))$" + BrokerPrefix = "/broker" + BrokerCIDRegex = "^(" + BrokerPrefix + "/(" + DefaultCIDRegex + "))$" + CheckBundleMetricsPrefix = "/check_bundle_metrics" + CheckBundleMetricsCIDRegex = "^(" + CheckBundleMetricsPrefix + "/(" + DefaultCIDRegex + "))$" + CheckBundlePrefix = "/check_bundle" + CheckBundleCIDRegex = "^(" + CheckBundlePrefix + "/(" + DefaultCIDRegex + "))$" + CheckPrefix = "/check" + CheckCIDRegex = "^(" + CheckPrefix + "/(" + DefaultCIDRegex + "))$" + ContactGroupPrefix = "/contact_group" + ContactGroupCIDRegex = "^(" + ContactGroupPrefix + "/(" + DefaultCIDRegex + "))$" + DashboardPrefix = "/dashboard" + DashboardCIDRegex = "^(" + DashboardPrefix + "/(" + DefaultCIDRegex + "))$" + GraphPrefix = "/graph" + GraphCIDRegex = "^(" + GraphPrefix + "/(" + DefaultUUIDRegex + "))$" + MaintenancePrefix = "/maintenance" + MaintenanceCIDRegex = "^(" + MaintenancePrefix + "/(" + DefaultCIDRegex + "))$" + MetricClusterPrefix = "/metric_cluster" + MetricClusterCIDRegex = "^(" + MetricClusterPrefix + "/(" + DefaultCIDRegex + "))$" + MetricPrefix = "/metric" + MetricCIDRegex = "^(" + MetricPrefix + "/((" + DefaultCIDRegex + ")_([^[:space:]]+)))$" + OutlierReportPrefix = "/outlier_report" + OutlierReportCIDRegex = "^(" + OutlierReportPrefix + "/(" + DefaultCIDRegex + "))$" + ProvisionBrokerPrefix = "/provision_broker" + ProvisionBrokerCIDRegex = "^(" + ProvisionBrokerPrefix + "/([a-z0-9]+-[a-z0-9]+))$" + RuleSetGroupPrefix = "/rule_set_group" + RuleSetGroupCIDRegex = "^(" + RuleSetGroupPrefix + "/(" + DefaultCIDRegex + "))$" + RuleSetPrefix = "/rule_set" + RuleSetCIDRegex = "^(" + RuleSetPrefix + "/((" + DefaultCIDRegex + ")_([^[:space:]]+)))$" + UserPrefix = "/user" + UserCIDRegex = "^(" + UserPrefix + "/(" + DefaultCIDRegex + "|current))$" + WorksheetPrefix = "/worksheet" + WorksheetCIDRegex = "^(" + WorksheetPrefix + "/(" + DefaultUUIDRegex + "))$" + // contact group serverity levels + NumSeverityLevels = 5 +) diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/contact_group.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/contact_group.go new file mode 100644 index 000000000..578a2e898 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/contact_group.go @@ -0,0 +1,263 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Contact Group API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/contact_group + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// ContactGroupAlertFormats define alert formats +type ContactGroupAlertFormats struct { + LongMessage *string `json:"long_message"` // string or null + LongSubject *string `json:"long_subject"` // string or null + LongSummary *string `json:"long_summary"` // string or null + ShortMessage *string `json:"short_message"` // string or null + ShortSummary *string `json:"short_summary"` // string or null +} + +// ContactGroupContactsExternal external contacts +type ContactGroupContactsExternal struct { + Info string `json:"contact_info"` // string + Method string `json:"method"` // string +} + +// ContactGroupContactsUser user contacts +type ContactGroupContactsUser struct { + Info string `json:"_contact_info,omitempty"` // string + Method string `json:"method"` // string + UserCID string `json:"user"` // string +} + +// ContactGroupContacts list of contacts +type ContactGroupContacts struct { + External []ContactGroupContactsExternal `json:"external"` // [] len >= 0 + Users []ContactGroupContactsUser `json:"users"` // [] len >= 0 +} + +// ContactGroupEscalation defines escalations for severity levels +type ContactGroupEscalation struct { + After uint `json:"after"` // uint + ContactGroupCID string `json:"contact_group"` // string +} + +// ContactGroup defines a contact group. See https://login.circonus.com/resources/api/calls/contact_group for more information. +type ContactGroup struct { + AggregationWindow uint `json:"aggregation_window,omitempty"` // uint + AlertFormats ContactGroupAlertFormats `json:"alert_formats,omitempty"` // ContactGroupAlertFormats + CID string `json:"_cid,omitempty"` // string + Contacts ContactGroupContacts `json:"contacts,omitempty"` // ContactGroupContacts + Escalations []*ContactGroupEscalation `json:"escalations,omitempty"` // [] len == 5, elements: ContactGroupEscalation or null + LastModified uint `json:"_last_modified,omitempty"` // uint + LastModifiedBy string `json:"_last_modified_by,omitempty"` // string + Name string `json:"name,omitempty"` // string + Reminders []uint `json:"reminders,omitempty"` // [] len == 5 + Tags []string `json:"tags,omitempty"` // [] len >= 0 +} + +// NewContactGroup returns a ContactGroup (with defaults, if applicable) +func NewContactGroup() *ContactGroup { + return &ContactGroup{ + Escalations: make([]*ContactGroupEscalation, config.NumSeverityLevels), + Reminders: make([]uint, config.NumSeverityLevels), + Contacts: ContactGroupContacts{ + External: []ContactGroupContactsExternal{}, + Users: []ContactGroupContactsUser{}, + }, + } +} + +// FetchContactGroup retrieves contact group with passed cid. +func (a *API) FetchContactGroup(cid CIDType) (*ContactGroup, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid contact group CID [none]") + } + + groupCID := string(*cid) + + matched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid contact group CID [%s]", groupCID) + } + + result, err := a.Get(groupCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch contact group, received JSON: %s", string(result)) + } + + group := new(ContactGroup) + if err := json.Unmarshal(result, group); err != nil { + return nil, err + } + + return group, nil +} + +// FetchContactGroups retrieves all contact groups available to the API Token. +func (a *API) FetchContactGroups() (*[]ContactGroup, error) { + result, err := a.Get(config.ContactGroupPrefix) + if err != nil { + return nil, err + } + + var groups []ContactGroup + if err := json.Unmarshal(result, &groups); err != nil { + return nil, err + } + + return &groups, nil +} + +// UpdateContactGroup updates passed contact group. +func (a *API) UpdateContactGroup(cfg *ContactGroup) (*ContactGroup, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid contact group config [nil]") + } + + groupCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid contact group CID [%s]", groupCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update contact group, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(groupCID, jsonCfg) + if err != nil { + return nil, err + } + + group := &ContactGroup{} + if err := json.Unmarshal(result, group); err != nil { + return nil, err + } + + return group, nil +} + +// CreateContactGroup creates a new contact group. +func (a *API) CreateContactGroup(cfg *ContactGroup) (*ContactGroup, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid contact group config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create contact group, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.ContactGroupPrefix, jsonCfg) + if err != nil { + return nil, err + } + + group := &ContactGroup{} + if err := json.Unmarshal(result, group); err != nil { + return nil, err + } + + return group, nil +} + +// DeleteContactGroup deletes passed contact group. +func (a *API) DeleteContactGroup(cfg *ContactGroup) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid contact group config [nil]") + } + return a.DeleteContactGroupByCID(CIDType(&cfg.CID)) +} + +// DeleteContactGroupByCID deletes contact group with passed cid. +func (a *API) DeleteContactGroupByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid contact group CID [none]") + } + + groupCID := string(*cid) + + matched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid contact group CID [%s]", groupCID) + } + + _, err = a.Delete(groupCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchContactGroups returns contact groups matching the specified +// search query and/or filter. If nil is passed for both parameters +// all contact groups will be returned. +func (a *API) SearchContactGroups(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]ContactGroup, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchContactGroups() + } + + reqURL := url.URL{ + Path: config.ContactGroupPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var groups []ContactGroup + if err := json.Unmarshal(result, &groups); err != nil { + return nil, err + } + + return &groups, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard.go new file mode 100644 index 000000000..596f33db6 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard.go @@ -0,0 +1,400 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Dashboard API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/dashboard + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// DashboardGridLayout defines layout +type DashboardGridLayout struct { + Height uint `json:"height"` + Width uint `json:"width"` +} + +// DashboardAccessConfig defines access config +type DashboardAccessConfig struct { + BlackDash bool `json:"black_dash"` + Enabled bool `json:"enabled"` + Fullscreen bool `json:"fullscreen"` + FullscreenHideTitle bool `json:"fullscreen_hide_title"` + Nickname string `json:"nickname"` + ScaleText bool `json:"scale_text"` + SharedID string `json:"shared_id"` + TextSize uint `json:"text_size"` +} + +// DashboardOptions defines options +type DashboardOptions struct { + AccessConfigs []DashboardAccessConfig `json:"access_configs"` + FullscreenHideTitle bool `json:"fullscreen_hide_title"` + HideGrid bool `json:"hide_grid"` + Linkages [][]string `json:"linkages"` + ScaleText bool `json:"scale_text"` + TextSize uint `json:"text_size"` +} + +// ChartTextWidgetDatapoint defines datapoints for charts +type ChartTextWidgetDatapoint struct { + AccountID string `json:"account_id,omitempty"` // metric cluster, metric + CheckID uint `json:"_check_id,omitempty"` // metric + ClusterID uint `json:"cluster_id,omitempty"` // metric cluster + ClusterTitle string `json:"_cluster_title,omitempty"` // metric cluster + Label string `json:"label,omitempty"` // metric + Label2 string `json:"_label,omitempty"` // metric cluster + Metric string `json:"metric,omitempty"` // metric + MetricType string `json:"_metric_type,omitempty"` // metric + NumericOnly bool `json:"numeric_only,omitempty"` // metric cluster +} + +// ChartWidgetDefinitionLegend defines chart widget definition legend +type ChartWidgetDefinitionLegend struct { + Show bool `json:"show,omitempty"` + Type string `json:"type,omitempty"` +} + +// ChartWidgetWedgeLabels defines chart widget wedge labels +type ChartWidgetWedgeLabels struct { + OnChart bool `json:"on_chart,omitempty"` + ToolTips bool `json:"tooltips,omitempty"` +} + +// ChartWidgetWedgeValues defines chart widget wedge values +type ChartWidgetWedgeValues struct { + Angle string `json:"angle,omitempty"` + Color string `json:"color,omitempty"` + Show bool `json:"show,omitempty"` +} + +// ChartWidgtDefinition defines chart widget definition +type ChartWidgtDefinition struct { + Datasource string `json:"datasource,omitempty"` + Derive string `json:"derive,omitempty"` + DisableAutoformat bool `json:"disable_autoformat,omitempty"` + Formula string `json:"formula,omitempty"` + Legend ChartWidgetDefinitionLegend `json:"legend,omitempty"` + Period uint `json:"period,omitempty"` + PopOnHover bool `json:"pop_onhover,omitempty"` + WedgeLabels ChartWidgetWedgeLabels `json:"wedge_labels,omitempty"` + WedgeValues ChartWidgetWedgeValues `json:"wedge_values,omitempty"` +} + +// ForecastGaugeWidgetThresholds defines forecast widget thresholds +type ForecastGaugeWidgetThresholds struct { + Colors []string `json:"colors,omitempty"` // forecasts, gauges + Flip bool `json:"flip,omitempty"` // gauges + Values []string `json:"values,omitempty"` // forecasts, gauges +} + +// StatusWidgetAgentStatusSettings defines agent status settings +type StatusWidgetAgentStatusSettings struct { + Search string `json:"search,omitempty"` + ShowAgentTypes string `json:"show_agent_types,omitempty"` + ShowContact bool `json:"show_contact,omitempty"` + ShowFeeds bool `json:"show_feeds,omitempty"` + ShowSetup bool `json:"show_setup,omitempty"` + ShowSkew bool `json:"show_skew,omitempty"` + ShowUpdates bool `json:"show_updates,omitempty"` +} + +// StatusWidgetHostStatusSettings defines host status settings +type StatusWidgetHostStatusSettings struct { + LayoutStyle string `json:"layout_style,omitempty"` + Search string `json:"search,omitempty"` + SortBy string `json:"sort_by,omitempty"` + TagFilterSet []string `json:"tag_filter_set,omitempty"` +} + +// DashboardWidgetSettings defines settings specific to widget +// Note: optional attributes which are structs need to be pointers so they will be omitted +type DashboardWidgetSettings struct { + AccountID string `json:"account_id,omitempty"` // alerts, clusters, gauges, graphs, lists, status + Acknowledged string `json:"acknowledged,omitempty"` // alerts + AgentStatusSettings *StatusWidgetAgentStatusSettings `json:"agent_status_settings,omitempty"` // status + Algorithm string `json:"algorithm,omitempty"` // clusters + Autoformat bool `json:"autoformat,omitempty"` // text + BodyFormat string `json:"body_format,omitempty"` // text + ChartType string `json:"chart_type,omitempty"` // charts + CheckUUID string `json:"check_uuid,omitempty"` // gauges + Cleared string `json:"cleared,omitempty"` // alerts + ClusterID uint `json:"cluster_id,omitempty"` // clusters + ClusterName string `json:"cluster_name,omitempty"` // clusters + ContactGroups []uint `json:"contact_groups,omitempty"` // alerts + ContentType string `json:"content_type,omitempty"` // status + Datapoints []ChartTextWidgetDatapoint `json:"datapoints,omitempty"` // charts, text + DateWindow string `json:"date_window,omitempty"` // graphs + Definition *ChartWidgtDefinition `json:"definition,omitempty"` // charts + Dependents string `json:"dependents,omitempty"` // alerts + DisableAutoformat bool `json:"disable_autoformat,omitempty"` // gauges + Display string `json:"display,omitempty"` // alerts + Format string `json:"format,omitempty"` // forecasts + Formula string `json:"formula,omitempty"` // gauges + GraphUUID string `json:"graph_id,omitempty"` // graphs + HideXAxis bool `json:"hide_xaxis,omitempty"` // graphs + HideYAxis bool `json:"hide_yaxis,omitempty"` // graphs + HostStatusSettings *StatusWidgetHostStatusSettings `json:"host_status_settings,omitempty"` // status + KeyInline bool `json:"key_inline,omitempty"` // graphs + KeyLoc string `json:"key_loc,omitempty"` // graphs + KeySize uint `json:"key_size,omitempty"` // graphs + KeyWrap bool `json:"key_wrap,omitempty"` // graphs + Label string `json:"label,omitempty"` // graphs + Layout string `json:"layout,omitempty"` // clusters + Limit uint `json:"limit,omitempty"` // lists + Maintenance string `json:"maintenance,omitempty"` // alerts + Markup string `json:"markup,omitempty"` // html + MetricDisplayName string `json:"metric_display_name,omitempty"` // gauges + MetricName string `json:"metric_name,omitempty"` // gauges + MinAge string `json:"min_age,omitempty"` // alerts + OffHours []uint `json:"off_hours,omitempty"` // alerts + OverlaySetID string `json:"overlay_set_id,omitempty"` // graphs + Period uint `json:"period,omitempty"` // gauges, text, graphs + RangeHigh int `json:"range_high,omitempty"` // gauges + RangeLow int `json:"range_low,omitempty"` // gauges + Realtime bool `json:"realtime,omitempty"` // graphs + ResourceLimit string `json:"resource_limit,omitempty"` // forecasts + ResourceUsage string `json:"resource_usage,omitempty"` // forecasts + Search string `json:"search,omitempty"` // alerts, lists + Severity string `json:"severity,omitempty"` // alerts + ShowFlags bool `json:"show_flags,omitempty"` // graphs + Size string `json:"size,omitempty"` // clusters + TagFilterSet []string `json:"tag_filter_set,omitempty"` // alerts + Threshold float32 `json:"threshold,omitempty"` // clusters + Thresholds *ForecastGaugeWidgetThresholds `json:"thresholds,omitempty"` // forecasts, gauges + TimeWindow string `json:"time_window,omitempty"` // alerts + Title string `json:"title,omitempty"` // alerts, charts, forecasts, gauges, html + TitleFormat string `json:"title_format,omitempty"` // text + Trend string `json:"trend,omitempty"` // forecasts + Type string `json:"type,omitempty"` // gauges, lists + UseDefault bool `json:"use_default,omitempty"` // text + ValueType string `json:"value_type,omitempty"` // gauges, text + WeekDays []string `json:"weekdays,omitempty"` // alerts +} + +// DashboardWidget defines widget +type DashboardWidget struct { + Active bool `json:"active"` + Height uint `json:"height"` + Name string `json:"name"` + Origin string `json:"origin"` + Settings DashboardWidgetSettings `json:"settings"` + Type string `json:"type"` + WidgetID string `json:"widget_id"` + Width uint `json:"width"` +} + +// Dashboard defines a dashboard. See https://login.circonus.com/resources/api/calls/dashboard for more information. +type Dashboard struct { + AccountDefault bool `json:"account_default"` + Active bool `json:"_active,omitempty"` + CID string `json:"_cid,omitempty"` + Created uint `json:"_created,omitempty"` + CreatedBy string `json:"_created_by,omitempty"` + GridLayout DashboardGridLayout `json:"grid_layout"` + LastModified uint `json:"_last_modified,omitempty"` + Options DashboardOptions `json:"options"` + Shared bool `json:"shared"` + Title string `json:"title"` + UUID string `json:"_dashboard_uuid,omitempty"` + Widgets []DashboardWidget `json:"widgets"` +} + +// NewDashboard returns a new Dashboard (with defaults, if applicable) +func NewDashboard() *Dashboard { + return &Dashboard{} +} + +// FetchDashboard retrieves dashboard with passed cid. +func (a *API) FetchDashboard(cid CIDType) (*Dashboard, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid dashboard CID [none]") + } + + dashboardCID := string(*cid) + + matched, err := regexp.MatchString(config.DashboardCIDRegex, dashboardCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid dashboard CID [%s]", dashboardCID) + } + + result, err := a.Get(string(*cid)) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch dashboard, received JSON: %s", string(result)) + } + + dashboard := new(Dashboard) + if err := json.Unmarshal(result, dashboard); err != nil { + return nil, err + } + + return dashboard, nil +} + +// FetchDashboards retrieves all dashboards available to the API Token. +func (a *API) FetchDashboards() (*[]Dashboard, error) { + result, err := a.Get(config.DashboardPrefix) + if err != nil { + return nil, err + } + + var dashboards []Dashboard + if err := json.Unmarshal(result, &dashboards); err != nil { + return nil, err + } + + return &dashboards, nil +} + +// UpdateDashboard updates passed dashboard. +func (a *API) UpdateDashboard(cfg *Dashboard) (*Dashboard, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid dashboard config [nil]") + } + + dashboardCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.DashboardCIDRegex, dashboardCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid dashboard CID [%s]", dashboardCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update dashboard, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(dashboardCID, jsonCfg) + if err != nil { + return nil, err + } + + dashboard := &Dashboard{} + if err := json.Unmarshal(result, dashboard); err != nil { + return nil, err + } + + return dashboard, nil +} + +// CreateDashboard creates a new dashboard. +func (a *API) CreateDashboard(cfg *Dashboard) (*Dashboard, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid dashboard config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create dashboard, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.DashboardPrefix, jsonCfg) + if err != nil { + return nil, err + } + + dashboard := &Dashboard{} + if err := json.Unmarshal(result, dashboard); err != nil { + return nil, err + } + + return dashboard, nil +} + +// DeleteDashboard deletes passed dashboard. +func (a *API) DeleteDashboard(cfg *Dashboard) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid dashboard config [nil]") + } + return a.DeleteDashboardByCID(CIDType(&cfg.CID)) +} + +// DeleteDashboardByCID deletes dashboard with passed cid. +func (a *API) DeleteDashboardByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid dashboard CID [none]") + } + + dashboardCID := string(*cid) + + matched, err := regexp.MatchString(config.DashboardCIDRegex, dashboardCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid dashboard CID [%s]", dashboardCID) + } + + _, err = a.Delete(dashboardCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchDashboards returns dashboards matching the specified +// search query and/or filter. If nil is passed for both parameters +// all dashboards will be returned. +func (a *API) SearchDashboards(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Dashboard, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchDashboards() + } + + reqURL := url.URL{ + Path: config.DashboardPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var dashboards []Dashboard + if err := json.Unmarshal(result, &dashboards); err != nil { + return nil, err + } + + return &dashboards, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/doc.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/doc.go new file mode 100644 index 000000000..bdceae5d0 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/doc.go @@ -0,0 +1,63 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package api provides methods for interacting with the Circonus API. See the full Circonus API +Documentation at https://login.circonus.com/resources/api for more information. + +Raw REST methods + + Get - retrieve existing item(s) + Put - update an existing item + Post - create a new item + Delete - remove an existing item + +Endpoints (supported) + + Account https://login.circonus.com/resources/api/calls/account + Acknowledgement https://login.circonus.com/resources/api/calls/acknowledgement + Alert https://login.circonus.com/resources/api/calls/alert + Annotation https://login.circonus.com/resources/api/calls/annotation + Broker https://login.circonus.com/resources/api/calls/broker + Check https://login.circonus.com/resources/api/calls/check + Check Bundle https://login.circonus.com/resources/api/calls/check_bundle + Check Bundle Metrics https://login.circonus.com/resources/api/calls/check_bundle_metrics + Contact Group https://login.circonus.com/resources/api/calls/contact_group + Dashboard https://login.circonus.com/resources/api/calls/dashboard + Graph https://login.circonus.com/resources/api/calls/graph + Maintenance [window] https://login.circonus.com/resources/api/calls/maintenance + Metric https://login.circonus.com/resources/api/calls/metric + Metric Cluster https://login.circonus.com/resources/api/calls/metric_cluster + Outlier Report https://login.circonus.com/resources/api/calls/outlier_report + Provision Broker https://login.circonus.com/resources/api/calls/provision_broker + Rule Set https://login.circonus.com/resources/api/calls/rule_set + Rule Set Group https://login.circonus.com/resources/api/calls/rule_set_group + User https://login.circonus.com/resources/api/calls/user + Worksheet https://login.circonus.com/resources/api/calls/worksheet + +Endpoints (not supported) + + Support may be added for these endpoints in the future. These endpoints may currently be used + directly with the Raw REST methods above. + + CAQL https://login.circonus.com/resources/api/calls/caql + Check Move https://login.circonus.com/resources/api/calls/check_move + Data https://login.circonus.com/resources/api/calls/data + Snapshot https://login.circonus.com/resources/api/calls/snapshot + Tag https://login.circonus.com/resources/api/calls/tag + Template https://login.circonus.com/resources/api/calls/template + +Verbs + + Fetch singular/plural item(s) - e.g. FetchAnnotation, FetchAnnotations + Create create new item - e.g. CreateAnnotation + Update update an item - e.g. UpdateAnnotation + Delete remove an item - e.g. DeleteAnnotation, DeleteAnnotationByCID + Search search for item(s) - e.g. SearchAnnotations + New new item config - e.g. NewAnnotation (returns an empty item, + any applicable defaults defined) + + Not all endpoints support all verbs. +*/ +package api diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/graph.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/graph.go new file mode 100644 index 000000000..8c8353ef5 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/graph.go @@ -0,0 +1,356 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Graph API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/graph + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// GraphAccessKey defines an access key for a graph +type GraphAccessKey struct { + Active bool `json:"active,omitempty"` // boolean + Height uint `json:"height,omitempty"` // uint + Key string `json:"key,omitempty"` // string + Legend bool `json:"legend,omitempty"` // boolean + LockDate bool `json:"lock_date,omitempty"` // boolean + LockMode string `json:"lock_mode,omitempty"` // string + LockRangeEnd uint `json:"lock_range_end,omitempty"` // uint + LockRangeStart uint `json:"lock_range_start,omitempty"` // uint + LockShowTimes bool `json:"lock_show_times,omitempty"` // boolean + LockZoom string `json:"lock_zoom,omitempty"` // string + Nickname string `json:"nickname,omitempty"` // string + Title bool `json:"title,omitempty"` // boolean + Width uint `json:"width,omitempty"` // uint + XLabels bool `json:"x_labels,omitempty"` // boolean + YLabels bool `json:"y_labels,omitempty"` // boolean +} + +// GraphComposite defines a composite +type GraphComposite struct { + Axis string `json:"axis"` // string + Color string `json:"color"` // string + DataFormula *string `json:"data_formula"` // string or null + Hidden bool `json:"hidden"` // boolean + LegendFormula *string `json:"legend_formula"` // string or null + Name string `json:"name"` // string + Stack *uint `json:"stack"` // uint or null +} + +// GraphDatapoint defines a datapoint +type GraphDatapoint struct { + Alpha *float64 `json:"alpha,string,omitempty"` // float64 + Axis string `json:"axis,omitempty"` // string + CAQL *string `json:"caql,omitempty"` // string or null + CheckID uint `json:"check_id,omitempty"` // uint + Color *string `json:"color,omitempty"` // string + DataFormula *string `json:"data_formula"` // string or null + Derive interface{} `json:"derive,omitempty"` // BUG doc: string, api: string or boolean(for caql statements) + Hidden bool `json:"hidden"` // boolean + LegendFormula *string `json:"legend_formula"` // string or null + MetricName string `json:"metric_name,omitempty"` // string + MetricType string `json:"metric_type,omitempty"` // string + Name string `json:"name"` // string + Search *string `json:"search"` // string or null + Stack *uint `json:"stack"` // uint or null +} + +// GraphGuide defines a guide +type GraphGuide struct { + Color string `json:"color"` // string + DataFormula *string `json:"data_formula"` // string or null + Hidden bool `json:"hidden"` // boolean + LegendFormula *string `json:"legend_formula"` // string or null + Name string `json:"name"` // string +} + +// GraphMetricCluster defines a metric cluster +type GraphMetricCluster struct { + AggregateFunc string `json:"aggregate_function,omitempty"` // string + Axis string `json:"axis,omitempty"` // string + Color *string `json:"color,omitempty"` // string + DataFormula *string `json:"data_formula"` // string or null + Hidden bool `json:"hidden"` // boolean + LegendFormula *string `json:"legend_formula"` // string or null + MetricCluster string `json:"metric_cluster,omitempty"` // string + Name string `json:"name,omitempty"` // string + Stack *uint `json:"stack"` // uint or null +} + +// GraphOverlaySet defines an overlay set for a graph +type GraphOverlaySet struct { + Overlays map[string]GraphOverlay `json:"overlays"` + Title string `json:"title"` +} + +// GraphOverlay defines a single overlay in an overlay set +type GraphOverlay struct { + DataOpts OverlayDataOptions `json:"data_opts,omitempty"` // OverlayDataOptions + ID string `json:"id,omitempty"` // string + Title string `json:"title,omitempty"` // string + UISpecs OverlayUISpecs `json:"ui_specs,omitempty"` // OverlayUISpecs +} + +// OverlayUISpecs defines UI specs for overlay +type OverlayUISpecs struct { + Decouple bool `json:"decouple,omitempty"` // boolean + ID string `json:"id,omitempty"` // string + Label string `json:"label,omitempty"` // string + Type string `json:"type,omitempty"` // string + Z string `json:"z,omitempty"` // int encoded as string BUG doc: numeric, api: string +} + +// OverlayDataOptions defines overlay options for data. Note, each overlay type requires +// a _subset_ of the options. See Graph API documentation (URL above) for details. +type OverlayDataOptions struct { + Alerts string `json:"alerts,omitempty"` // int encoded as string BUG doc: numeric, api: string + ArrayOutput string `json:"array_output,omitempty"` // int encoded as string BUG doc: numeric, api: string + BasePeriod string `json:"base_period,omitempty"` // int encoded as string BUG doc: numeric, api: string + Delay string `json:"delay,omitempty"` // int encoded as string BUG doc: numeric, api: string + Extension string `json:"extension,omitempty"` // string + GraphTitle string `json:"graph_title,omitempty"` // string + GraphUUID string `json:"graph_id,omitempty"` // string + InPercent string `json:"in_percent,omitempty"` // boolean encoded as string BUG doc: boolean, api: string + Inverse string `json:"inverse,omitempty"` // int encoded as string BUG doc: numeric, api: string + Method string `json:"method,omitempty"` // string + Model string `json:"model,omitempty"` // string + ModelEnd string `json:"model_end,omitempty"` // string + ModelPeriod string `json:"model_period,omitempty"` // string + ModelRelative string `json:"model_relative,omitempty"` // int encoded as string BUG doc: numeric, api: string + Out string `json:"out,omitempty"` // string + Prequel string `json:"prequel,omitempty"` // int + Presets string `json:"presets,omitempty"` // string + Quantiles string `json:"quantiles,omitempty"` // string + SeasonLength string `json:"season_length,omitempty"` // int encoded as string BUG doc: numeric, api: string + Sensitivity string `json:"sensitivity,omitempty"` // int encoded as string BUG doc: numeric, api: string + SingleValue string `json:"single_value,omitempty"` // int encoded as string BUG doc: numeric, api: string + TargetPeriod string `json:"target_period,omitempty"` // string + TimeOffset string `json:"time_offset,omitempty"` // string + TimeShift string `json:"time_shift,omitempty"` // int encoded as string BUG doc: numeric, api: string + Transform string `json:"transform,omitempty"` // string + Version string `json:"version,omitempty"` // int encoded as string BUG doc: numeric, api: string + Window string `json:"window,omitempty"` // int encoded as string BUG doc: numeric, api: string + XShift string `json:"x_shift,omitempty"` // string +} + +// Graph defines a graph. See https://login.circonus.com/resources/api/calls/graph for more information. +type Graph struct { + AccessKeys []GraphAccessKey `json:"access_keys,omitempty"` // [] len >= 0 + CID string `json:"_cid,omitempty"` // string + Composites []GraphComposite `json:"composites,omitempty"` // [] len >= 0 + Datapoints []GraphDatapoint `json:"datapoints,omitempt"` // [] len >= 0 + Description string `json:"description,omitempty"` // string + Guides []GraphGuide `json:"guides,omitempty"` // [] len >= 0 + LineStyle *string `json:"line_style"` // string or null + LogLeftY *int `json:"logarithmic_left_y,string,omitempty"` // int encoded as string or null BUG doc: number (not string) + LogRightY *int `json:"logarithmic_right_y,string,omitempty"` // int encoded as string or null BUG doc: number (not string) + MaxLeftY *float64 `json:"max_left_y,string,omitempty"` // float64 encoded as string or null BUG doc: number (not string) + MaxRightY *float64 `json:"max_right_y,string,omitempty"` // float64 encoded as string or null BUG doc: number (not string) + MetricClusters []GraphMetricCluster `json:"metric_clusters,omitempty"` // [] len >= 0 + MinLeftY *float64 `json:"min_left_y,string,omitempty"` // float64 encoded as string or null BUG doc: number (not string) + MinRightY *float64 `json:"min_right_y,string,omitempty"` // float64 encoded as string or null BUG doc: number (not string) + Notes *string `json:"notes,omitempty"` // string or null + OverlaySets *map[string]GraphOverlaySet `json:"overlay_sets,omitempty"` // GroupOverLaySets or null + Style *string `json:"style"` // string or null + Tags []string `json:"tags,omitempty"` // [] len >= 0 + Title string `json:"title,omitempty"` // string +} + +// NewGraph returns a Graph (with defaults, if applicable) +func NewGraph() *Graph { + return &Graph{} +} + +// FetchGraph retrieves graph with passed cid. +func (a *API) FetchGraph(cid CIDType) (*Graph, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid graph CID [none]") + } + + graphCID := string(*cid) + + matched, err := regexp.MatchString(config.GraphCIDRegex, graphCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid graph CID [%s]", graphCID) + } + + result, err := a.Get(graphCID) + if err != nil { + return nil, err + } + if a.Debug { + a.Log.Printf("[DEBUG] fetch graph, received JSON: %s", string(result)) + } + + graph := new(Graph) + if err := json.Unmarshal(result, graph); err != nil { + return nil, err + } + + return graph, nil +} + +// FetchGraphs retrieves all graphs available to the API Token. +func (a *API) FetchGraphs() (*[]Graph, error) { + result, err := a.Get(config.GraphPrefix) + if err != nil { + return nil, err + } + + var graphs []Graph + if err := json.Unmarshal(result, &graphs); err != nil { + return nil, err + } + + return &graphs, nil +} + +// UpdateGraph updates passed graph. +func (a *API) UpdateGraph(cfg *Graph) (*Graph, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid graph config [nil]") + } + + graphCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.GraphCIDRegex, graphCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid graph CID [%s]", graphCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update graph, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(graphCID, jsonCfg) + if err != nil { + return nil, err + } + + graph := &Graph{} + if err := json.Unmarshal(result, graph); err != nil { + return nil, err + } + + return graph, nil +} + +// CreateGraph creates a new graph. +func (a *API) CreateGraph(cfg *Graph) (*Graph, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid graph config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update graph, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.GraphPrefix, jsonCfg) + if err != nil { + return nil, err + } + + graph := &Graph{} + if err := json.Unmarshal(result, graph); err != nil { + return nil, err + } + + return graph, nil +} + +// DeleteGraph deletes passed graph. +func (a *API) DeleteGraph(cfg *Graph) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid graph config [nil]") + } + return a.DeleteGraphByCID(CIDType(&cfg.CID)) +} + +// DeleteGraphByCID deletes graph with passed cid. +func (a *API) DeleteGraphByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid graph CID [none]") + } + + graphCID := string(*cid) + + matched, err := regexp.MatchString(config.GraphCIDRegex, graphCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid graph CID [%s]", graphCID) + } + + _, err = a.Delete(graphCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchGraphs returns graphs matching the specified search query +// and/or filter. If nil is passed for both parameters all graphs +// will be returned. +func (a *API) SearchGraphs(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Graph, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchGraphs() + } + + reqURL := url.URL{ + Path: config.GraphPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var graphs []Graph + if err := json.Unmarshal(result, &graphs); err != nil { + return nil, err + } + + return &graphs, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/maintenance.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/maintenance.go new file mode 100644 index 000000000..0e5e04729 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/maintenance.go @@ -0,0 +1,220 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Maintenance window API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/maintenance + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// Maintenance defines a maintenance window. See https://login.circonus.com/resources/api/calls/maintenance for more information. +type Maintenance struct { + CID string `json:"_cid,omitempty"` // string + Item string `json:"item,omitempty"` // string + Notes string `json:"notes,omitempty"` // string + Severities interface{} `json:"severities,omitempty"` // []string NOTE can be set with CSV string or []string + Start uint `json:"start,omitempty"` // uint + Stop uint `json:"stop,omitempty"` // uint + Tags []string `json:"tags,omitempty"` // [] len >= 0 + Type string `json:"type,omitempty"` // string +} + +// NewMaintenanceWindow returns a new Maintenance window (with defaults, if applicable) +func NewMaintenanceWindow() *Maintenance { + return &Maintenance{} +} + +// FetchMaintenanceWindow retrieves maintenance [window] with passed cid. +func (a *API) FetchMaintenanceWindow(cid CIDType) (*Maintenance, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid maintenance window CID [none]") + } + + maintenanceCID := string(*cid) + + matched, err := regexp.MatchString(config.MaintenanceCIDRegex, maintenanceCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid maintenance window CID [%s]", maintenanceCID) + } + + result, err := a.Get(maintenanceCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch maintenance window, received JSON: %s", string(result)) + } + + window := &Maintenance{} + if err := json.Unmarshal(result, window); err != nil { + return nil, err + } + + return window, nil +} + +// FetchMaintenanceWindows retrieves all maintenance [windows] available to API Token. +func (a *API) FetchMaintenanceWindows() (*[]Maintenance, error) { + result, err := a.Get(config.MaintenancePrefix) + if err != nil { + return nil, err + } + + var windows []Maintenance + if err := json.Unmarshal(result, &windows); err != nil { + return nil, err + } + + return &windows, nil +} + +// UpdateMaintenanceWindow updates passed maintenance [window]. +func (a *API) UpdateMaintenanceWindow(cfg *Maintenance) (*Maintenance, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid maintenance window config [nil]") + } + + maintenanceCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.MaintenanceCIDRegex, maintenanceCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid maintenance window CID [%s]", maintenanceCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update maintenance window, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(maintenanceCID, jsonCfg) + if err != nil { + return nil, err + } + + window := &Maintenance{} + if err := json.Unmarshal(result, window); err != nil { + return nil, err + } + + return window, nil +} + +// CreateMaintenanceWindow creates a new maintenance [window]. +func (a *API) CreateMaintenanceWindow(cfg *Maintenance) (*Maintenance, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid maintenance window config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create maintenance window, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.MaintenancePrefix, jsonCfg) + if err != nil { + return nil, err + } + + window := &Maintenance{} + if err := json.Unmarshal(result, window); err != nil { + return nil, err + } + + return window, nil +} + +// DeleteMaintenanceWindow deletes passed maintenance [window]. +func (a *API) DeleteMaintenanceWindow(cfg *Maintenance) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid maintenance window config [nil]") + } + return a.DeleteMaintenanceWindowByCID(CIDType(&cfg.CID)) +} + +// DeleteMaintenanceWindowByCID deletes maintenance [window] with passed cid. +func (a *API) DeleteMaintenanceWindowByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid maintenance window CID [none]") + } + + maintenanceCID := string(*cid) + + matched, err := regexp.MatchString(config.MaintenanceCIDRegex, maintenanceCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid maintenance window CID [%s]", maintenanceCID) + } + + _, err = a.Delete(maintenanceCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchMaintenanceWindows returns maintenance [windows] matching +// the specified search query and/or filter. If nil is passed for +// both parameters all maintenance [windows] will be returned. +func (a *API) SearchMaintenanceWindows(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Maintenance, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchMaintenanceWindows() + } + + reqURL := url.URL{ + Path: config.MaintenancePrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var windows []Maintenance + if err := json.Unmarshal(result, &windows); err != nil { + return nil, err + } + + return &windows, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/metric.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/metric.go new file mode 100644 index 000000000..3608b06ff --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/metric.go @@ -0,0 +1,162 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Metric API support - Fetch, Create*, Update, Delete*, and Search +// See: https://login.circonus.com/resources/api/calls/metric +// * : create and delete are handled via check_bundle or check_bundle_metrics + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// Metric defines a metric. See https://login.circonus.com/resources/api/calls/metric for more information. +type Metric struct { + Active bool `json:"_active,omitempty"` // boolean + CheckActive bool `json:"_check_active,omitempty"` // boolean + CheckBundleCID string `json:"_check_bundle,omitempty"` // string + CheckCID string `json:"_check,omitempty"` // string + CheckTags []string `json:"_check_tags,omitempty"` // [] len >= 0 + CheckUUID string `json:"_check_uuid,omitempty"` // string + CID string `json:"_cid,omitempty"` // string + Histogram string `json:"_histogram,omitempty"` // string + Link *string `json:"link,omitempty"` // string or null + MetricName string `json:"_metric_name,omitempty"` // string + MetricType string `json:"_metric_type,omitempty"` // string + Notes *string `json:"notes,omitempty"` // string or null + Tags []string `json:"tags,omitempty"` // [] len >= 0 + Units *string `json:"units,omitempty"` // string or null +} + +// FetchMetric retrieves metric with passed cid. +func (a *API) FetchMetric(cid CIDType) (*Metric, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid metric CID [none]") + } + + metricCID := string(*cid) + + matched, err := regexp.MatchString(config.MetricCIDRegex, metricCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid metric CID [%s]", metricCID) + } + + result, err := a.Get(metricCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch metric, received JSON: %s", string(result)) + } + + metric := &Metric{} + if err := json.Unmarshal(result, metric); err != nil { + return nil, err + } + + return metric, nil +} + +// FetchMetrics retrieves all metrics available to API Token. +func (a *API) FetchMetrics() (*[]Metric, error) { + result, err := a.Get(config.MetricPrefix) + if err != nil { + return nil, err + } + + var metrics []Metric + if err := json.Unmarshal(result, &metrics); err != nil { + return nil, err + } + + return &metrics, nil +} + +// UpdateMetric updates passed metric. +func (a *API) UpdateMetric(cfg *Metric) (*Metric, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid metric config [nil]") + } + + metricCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.MetricCIDRegex, metricCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid metric CID [%s]", metricCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update metric, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(metricCID, jsonCfg) + if err != nil { + return nil, err + } + + metric := &Metric{} + if err := json.Unmarshal(result, metric); err != nil { + return nil, err + } + + return metric, nil +} + +// SearchMetrics returns metrics matching the specified search query +// and/or filter. If nil is passed for both parameters all metrics +// will be returned. +func (a *API) SearchMetrics(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Metric, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchMetrics() + } + + reqURL := url.URL{ + Path: config.MetricPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var metrics []Metric + if err := json.Unmarshal(result, &metrics); err != nil { + return nil, err + } + + return &metrics, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/metric_cluster.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/metric_cluster.go new file mode 100644 index 000000000..d29c5a674 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/metric_cluster.go @@ -0,0 +1,261 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Metric Cluster API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/metric_cluster + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// MetricQuery object +type MetricQuery struct { + Query string `json:"query"` + Type string `json:"type"` +} + +// MetricCluster defines a metric cluster. See https://login.circonus.com/resources/api/calls/metric_cluster for more information. +type MetricCluster struct { + CID string `json:"_cid,omitempty"` // string + Description string `json:"description"` // string + MatchingMetrics []string `json:"_matching_metrics,omitempty"` // [] len >= 1 (result info only, if query has extras - cannot be set) + MatchingUUIDMetrics map[string][]string `json:"_matching_uuid_metrics,omitempty"` // [] len >= 1 (result info only, if query has extras - cannot be set) + Name string `json:"name"` // string + Queries []MetricQuery `json:"queries"` // [] len >= 1 + Tags []string `json:"tags"` // [] len >= 0 +} + +// NewMetricCluster returns a new MetricCluster (with defaults, if applicable) +func NewMetricCluster() *MetricCluster { + return &MetricCluster{} +} + +// FetchMetricCluster retrieves metric cluster with passed cid. +func (a *API) FetchMetricCluster(cid CIDType, extras string) (*MetricCluster, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid metric cluster CID [none]") + } + + clusterCID := string(*cid) + + matched, err := regexp.MatchString(config.MetricClusterCIDRegex, clusterCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid metric cluster CID [%s]", clusterCID) + } + + reqURL := url.URL{ + Path: clusterCID, + } + + extra := "" + switch extras { + case "metrics": + extra = "_matching_metrics" + case "uuids": + extra = "_matching_uuid_metrics" + } + + if extra != "" { + q := url.Values{} + q.Set("extra", extra) + reqURL.RawQuery = q.Encode() + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch metric cluster, received JSON: %s", string(result)) + } + + cluster := &MetricCluster{} + if err := json.Unmarshal(result, cluster); err != nil { + return nil, err + } + + return cluster, nil +} + +// FetchMetricClusters retrieves all metric clusters available to API Token. +func (a *API) FetchMetricClusters(extras string) (*[]MetricCluster, error) { + reqURL := url.URL{ + Path: config.MetricClusterPrefix, + } + + extra := "" + switch extras { + case "metrics": + extra = "_matching_metrics" + case "uuids": + extra = "_matching_uuid_metrics" + } + + if extra != "" { + q := url.Values{} + q.Set("extra", extra) + reqURL.RawQuery = q.Encode() + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, err + } + + var clusters []MetricCluster + if err := json.Unmarshal(result, &clusters); err != nil { + return nil, err + } + + return &clusters, nil +} + +// UpdateMetricCluster updates passed metric cluster. +func (a *API) UpdateMetricCluster(cfg *MetricCluster) (*MetricCluster, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid metric cluster config [nil]") + } + + clusterCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.MetricClusterCIDRegex, clusterCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid metric cluster CID [%s]", clusterCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update metric cluster, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(clusterCID, jsonCfg) + if err != nil { + return nil, err + } + + cluster := &MetricCluster{} + if err := json.Unmarshal(result, cluster); err != nil { + return nil, err + } + + return cluster, nil +} + +// CreateMetricCluster creates a new metric cluster. +func (a *API) CreateMetricCluster(cfg *MetricCluster) (*MetricCluster, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid metric cluster config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create metric cluster, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.MetricClusterPrefix, jsonCfg) + if err != nil { + return nil, err + } + + cluster := &MetricCluster{} + if err := json.Unmarshal(result, cluster); err != nil { + return nil, err + } + + return cluster, nil +} + +// DeleteMetricCluster deletes passed metric cluster. +func (a *API) DeleteMetricCluster(cfg *MetricCluster) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid metric cluster config [nil]") + } + return a.DeleteMetricClusterByCID(CIDType(&cfg.CID)) +} + +// DeleteMetricClusterByCID deletes metric cluster with passed cid. +func (a *API) DeleteMetricClusterByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid metric cluster CID [none]") + } + + clusterCID := string(*cid) + + matched, err := regexp.MatchString(config.MetricClusterCIDRegex, clusterCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid metric cluster CID [%s]", clusterCID) + } + + _, err = a.Delete(clusterCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchMetricClusters returns metric clusters matching the specified +// search query and/or filter. If nil is passed for both parameters +// all metric clusters will be returned. +func (a *API) SearchMetricClusters(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]MetricCluster, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchMetricClusters("") + } + + reqURL := url.URL{ + Path: config.MetricClusterPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var clusters []MetricCluster + if err := json.Unmarshal(result, &clusters); err != nil { + return nil, err + } + + return &clusters, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/outlier_report.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/outlier_report.go new file mode 100644 index 000000000..bc1a4d2b3 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/outlier_report.go @@ -0,0 +1,221 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// OutlierReport API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/report + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// OutlierReport defines a outlier report. See https://login.circonus.com/resources/api/calls/report for more information. +type OutlierReport struct { + CID string `json:"_cid,omitempty"` // string + Config string `json:"config,omitempty"` // string + Created uint `json:"_created,omitempty"` // uint + CreatedBy string `json:"_created_by,omitempty"` // string + LastModified uint `json:"_last_modified,omitempty"` // uint + LastModifiedBy string `json:"_last_modified_by,omitempty"` // string + MetricClusterCID string `json:"metric_cluster,omitempty"` // st ring + Tags []string `json:"tags,omitempty"` // [] len >= 0 + Title string `json:"title,omitempty"` // string +} + +// NewOutlierReport returns a new OutlierReport (with defaults, if applicable) +func NewOutlierReport() *OutlierReport { + return &OutlierReport{} +} + +// FetchOutlierReport retrieves outlier report with passed cid. +func (a *API) FetchOutlierReport(cid CIDType) (*OutlierReport, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid outlier report CID [none]") + } + + reportCID := string(*cid) + + matched, err := regexp.MatchString(config.OutlierReportCIDRegex, reportCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid outlier report CID [%s]", reportCID) + } + + result, err := a.Get(reportCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch outlier report, received JSON: %s", string(result)) + } + + report := &OutlierReport{} + if err := json.Unmarshal(result, report); err != nil { + return nil, err + } + + return report, nil +} + +// FetchOutlierReports retrieves all outlier reports available to API Token. +func (a *API) FetchOutlierReports() (*[]OutlierReport, error) { + result, err := a.Get(config.OutlierReportPrefix) + if err != nil { + return nil, err + } + + var reports []OutlierReport + if err := json.Unmarshal(result, &reports); err != nil { + return nil, err + } + + return &reports, nil +} + +// UpdateOutlierReport updates passed outlier report. +func (a *API) UpdateOutlierReport(cfg *OutlierReport) (*OutlierReport, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid outlier report config [nil]") + } + + reportCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.OutlierReportCIDRegex, reportCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid outlier report CID [%s]", reportCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update outlier report, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(reportCID, jsonCfg) + if err != nil { + return nil, err + } + + report := &OutlierReport{} + if err := json.Unmarshal(result, report); err != nil { + return nil, err + } + + return report, nil +} + +// CreateOutlierReport creates a new outlier report. +func (a *API) CreateOutlierReport(cfg *OutlierReport) (*OutlierReport, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid outlier report config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create outlier report, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.OutlierReportPrefix, jsonCfg) + if err != nil { + return nil, err + } + + report := &OutlierReport{} + if err := json.Unmarshal(result, report); err != nil { + return nil, err + } + + return report, nil +} + +// DeleteOutlierReport deletes passed outlier report. +func (a *API) DeleteOutlierReport(cfg *OutlierReport) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid outlier report config [nil]") + } + return a.DeleteOutlierReportByCID(CIDType(&cfg.CID)) +} + +// DeleteOutlierReportByCID deletes outlier report with passed cid. +func (a *API) DeleteOutlierReportByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid outlier report CID [none]") + } + + reportCID := string(*cid) + + matched, err := regexp.MatchString(config.OutlierReportCIDRegex, reportCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid outlier report CID [%s]", reportCID) + } + + _, err = a.Delete(reportCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchOutlierReports returns outlier report matching the +// specified search query and/or filter. If nil is passed for +// both parameters all outlier report will be returned. +func (a *API) SearchOutlierReports(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]OutlierReport, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchOutlierReports() + } + + reqURL := url.URL{ + Path: config.OutlierReportPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var reports []OutlierReport + if err := json.Unmarshal(result, &reports); err != nil { + return nil, err + } + + return &reports, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/provision_broker.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/provision_broker.go new file mode 100644 index 000000000..5b432a236 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/provision_broker.go @@ -0,0 +1,151 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// ProvisionBroker API support - Fetch, Create, and Update +// See: https://login.circonus.com/resources/api/calls/provision_broker +// Note that the provision_broker endpoint does not return standard cid format +// of '/object/item' (e.g. /provision_broker/abc-123) it just returns 'item' + +package api + +import ( + "encoding/json" + "fmt" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// BrokerStratcon defines stratcons for broker +type BrokerStratcon struct { + CN string `json:"cn,omitempty"` // string + Host string `json:"host,omitempty"` // string + Port string `json:"port,omitempty"` // string +} + +// ProvisionBroker defines a provision broker [request]. See https://login.circonus.com/resources/api/calls/provision_broker for more details. +type ProvisionBroker struct { + Cert string `json:"_cert,omitempty"` // string + CID string `json:"_cid,omitempty"` // string + CSR string `json:"_csr,omitempty"` // string + ExternalHost string `json:"external_host,omitempty"` // string + ExternalPort string `json:"external_port,omitempty"` // string + IPAddress string `json:"ipaddress,omitempty"` // string + Latitude string `json:"latitude,omitempty"` // string + Longitude string `json:"longitude,omitempty"` // string + Name string `json:"noit_name,omitempty"` // string + Port string `json:"port,omitempty"` // string + PreferReverseConnection bool `json:"prefer_reverse_connection,omitempty"` // boolean + Rebuild bool `json:"rebuild,omitempty"` // boolean + Stratcons []BrokerStratcon `json:"_stratcons,omitempty"` // [] len >= 1 + Tags []string `json:"tags,omitempty"` // [] len >= 0 +} + +// NewProvisionBroker returns a new ProvisionBroker (with defaults, if applicable) +func NewProvisionBroker() *ProvisionBroker { + return &ProvisionBroker{} +} + +// FetchProvisionBroker retrieves provision broker [request] with passed cid. +func (a *API) FetchProvisionBroker(cid CIDType) (*ProvisionBroker, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid provision broker request CID [none]") + } + + brokerCID := string(*cid) + + matched, err := regexp.MatchString(config.ProvisionBrokerCIDRegex, brokerCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid provision broker request CID [%s]", brokerCID) + } + + result, err := a.Get(brokerCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch broker provision request, received JSON: %s", string(result)) + } + + broker := &ProvisionBroker{} + if err := json.Unmarshal(result, broker); err != nil { + return nil, err + } + + return broker, nil +} + +// UpdateProvisionBroker updates a broker definition [request]. +func (a *API) UpdateProvisionBroker(cid CIDType, cfg *ProvisionBroker) (*ProvisionBroker, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid provision broker request config [nil]") + } + + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid provision broker request CID [none]") + } + + brokerCID := string(*cid) + + matched, err := regexp.MatchString(config.ProvisionBrokerCIDRegex, brokerCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid provision broker request CID [%s]", brokerCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update broker provision request, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(brokerCID, jsonCfg) + if err != nil { + return nil, err + } + + broker := &ProvisionBroker{} + if err := json.Unmarshal(result, broker); err != nil { + return nil, err + } + + return broker, nil +} + +// CreateProvisionBroker creates a new provison broker [request]. +func (a *API) CreateProvisionBroker(cfg *ProvisionBroker) (*ProvisionBroker, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid provision broker request config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create broker provision request, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.ProvisionBrokerPrefix, jsonCfg) + if err != nil { + return nil, err + } + + broker := &ProvisionBroker{} + if err := json.Unmarshal(result, broker); err != nil { + return nil, err + } + + return broker, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set.go new file mode 100644 index 000000000..3da0907f7 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set.go @@ -0,0 +1,234 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Rule Set API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/rule_set + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// RuleSetRule defines a ruleset rule +type RuleSetRule struct { + Criteria string `json:"criteria"` // string + Severity uint `json:"severity"` // uint + Value interface{} `json:"value"` // BUG doc: string, api: actual type returned switches based on Criteria + Wait uint `json:"wait"` // uint + WindowingDuration uint `json:"windowing_duration,omitempty"` // uint + WindowingFunction *string `json:"windowing_function,omitempty"` // string or null +} + +// RuleSet defines a ruleset. See https://login.circonus.com/resources/api/calls/rule_set for more information. +type RuleSet struct { + CheckCID string `json:"check"` // string + CID string `json:"_cid,omitempty"` // string + ContactGroups map[uint8][]string `json:"contact_groups"` // [] len 5 + Derive *string `json:"derive,omitempty"` // string or null + Link *string `json:"link"` // string or null + MetricName string `json:"metric_name"` // string + MetricTags []string `json:"metric_tags"` // [] len >= 0 + MetricType string `json:"metric_type"` // string + Notes *string `json:"notes"` // string or null + Parent *string `json:"parent,omitempty"` // string or null + Rules []RuleSetRule `json:"rules"` // [] len >= 1 + Tags []string `json:"tags"` // [] len >= 0 +} + +// NewRuleSet returns a new RuleSet (with defaults if applicable) +func NewRuleSet() *RuleSet { + return &RuleSet{} +} + +// FetchRuleSet retrieves rule set with passed cid. +func (a *API) FetchRuleSet(cid CIDType) (*RuleSet, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid rule set CID [none]") + } + + rulesetCID := string(*cid) + + matched, err := regexp.MatchString(config.RuleSetCIDRegex, rulesetCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid rule set CID [%s]", rulesetCID) + } + + result, err := a.Get(rulesetCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch rule set, received JSON: %s", string(result)) + } + + ruleset := &RuleSet{} + if err := json.Unmarshal(result, ruleset); err != nil { + return nil, err + } + + return ruleset, nil +} + +// FetchRuleSets retrieves all rule sets available to API Token. +func (a *API) FetchRuleSets() (*[]RuleSet, error) { + result, err := a.Get(config.RuleSetPrefix) + if err != nil { + return nil, err + } + + var rulesets []RuleSet + if err := json.Unmarshal(result, &rulesets); err != nil { + return nil, err + } + + return &rulesets, nil +} + +// UpdateRuleSet updates passed rule set. +func (a *API) UpdateRuleSet(cfg *RuleSet) (*RuleSet, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid rule set config [nil]") + } + + rulesetCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.RuleSetCIDRegex, rulesetCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid rule set CID [%s]", rulesetCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update rule set, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(rulesetCID, jsonCfg) + if err != nil { + return nil, err + } + + ruleset := &RuleSet{} + if err := json.Unmarshal(result, ruleset); err != nil { + return nil, err + } + + return ruleset, nil +} + +// CreateRuleSet creates a new rule set. +func (a *API) CreateRuleSet(cfg *RuleSet) (*RuleSet, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid rule set config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create rule set, sending JSON: %s", string(jsonCfg)) + } + + resp, err := a.Post(config.RuleSetPrefix, jsonCfg) + if err != nil { + return nil, err + } + + ruleset := &RuleSet{} + if err := json.Unmarshal(resp, ruleset); err != nil { + return nil, err + } + + return ruleset, nil +} + +// DeleteRuleSet deletes passed rule set. +func (a *API) DeleteRuleSet(cfg *RuleSet) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid rule set config [nil]") + } + return a.DeleteRuleSetByCID(CIDType(&cfg.CID)) +} + +// DeleteRuleSetByCID deletes rule set with passed cid. +func (a *API) DeleteRuleSetByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid rule set CID [none]") + } + + rulesetCID := string(*cid) + + matched, err := regexp.MatchString(config.RuleSetCIDRegex, rulesetCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid rule set CID [%s]", rulesetCID) + } + + _, err = a.Delete(rulesetCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchRuleSets returns rule sets matching the specified search +// query and/or filter. If nil is passed for both parameters all +// rule sets will be returned. +func (a *API) SearchRuleSets(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]RuleSet, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchRuleSets() + } + + reqURL := url.URL{ + Path: config.RuleSetPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var rulesets []RuleSet + if err := json.Unmarshal(result, &rulesets); err != nil { + return nil, err + } + + return &rulesets, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set_group.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set_group.go new file mode 100644 index 000000000..382c9221c --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set_group.go @@ -0,0 +1,231 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// RuleSetGroup API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/rule_set_group + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// RuleSetGroupFormula defines a formula for raising alerts +type RuleSetGroupFormula struct { + Expression interface{} `json:"expression"` // string or uint BUG doc: string, api: string or numeric + RaiseSeverity uint `json:"raise_severity"` // uint + Wait uint `json:"wait"` // uint +} + +// RuleSetGroupCondition defines conditions for raising alerts +type RuleSetGroupCondition struct { + MatchingSeverities []string `json:"matching_serverities"` // [] len >= 1 + RuleSetCID string `json:"rule_set"` // string +} + +// RuleSetGroup defines a ruleset group. See https://login.circonus.com/resources/api/calls/rule_set_group for more information. +type RuleSetGroup struct { + CID string `json:"_cid,omitempty"` // string + ContactGroups map[uint8][]string `json:"contact_groups"` // [] len == 5 + Formulas []RuleSetGroupFormula `json:"formulas"` // [] len >= 0 + Name string `json:"name"` // string + RuleSetConditions []RuleSetGroupCondition `json:"rule_set_conditions"` // [] len >= 1 + Tags []string `json:"tags"` // [] len >= 0 +} + +// NewRuleSetGroup returns a new RuleSetGroup (with defaults, if applicable) +func NewRuleSetGroup() *RuleSetGroup { + return &RuleSetGroup{} +} + +// FetchRuleSetGroup retrieves rule set group with passed cid. +func (a *API) FetchRuleSetGroup(cid CIDType) (*RuleSetGroup, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid rule set group CID [none]") + } + + groupCID := string(*cid) + + matched, err := regexp.MatchString(config.RuleSetGroupCIDRegex, groupCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid rule set group CID [%s]", groupCID) + } + + result, err := a.Get(groupCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch rule set group, received JSON: %s", string(result)) + } + + rulesetGroup := &RuleSetGroup{} + if err := json.Unmarshal(result, rulesetGroup); err != nil { + return nil, err + } + + return rulesetGroup, nil +} + +// FetchRuleSetGroups retrieves all rule set groups available to API Token. +func (a *API) FetchRuleSetGroups() (*[]RuleSetGroup, error) { + result, err := a.Get(config.RuleSetGroupPrefix) + if err != nil { + return nil, err + } + + var rulesetGroups []RuleSetGroup + if err := json.Unmarshal(result, &rulesetGroups); err != nil { + return nil, err + } + + return &rulesetGroups, nil +} + +// UpdateRuleSetGroup updates passed rule set group. +func (a *API) UpdateRuleSetGroup(cfg *RuleSetGroup) (*RuleSetGroup, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid rule set group config [nil]") + } + + groupCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.RuleSetGroupCIDRegex, groupCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid rule set group CID [%s]", groupCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update rule set group, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(groupCID, jsonCfg) + if err != nil { + return nil, err + } + + groups := &RuleSetGroup{} + if err := json.Unmarshal(result, groups); err != nil { + return nil, err + } + + return groups, nil +} + +// CreateRuleSetGroup creates a new rule set group. +func (a *API) CreateRuleSetGroup(cfg *RuleSetGroup) (*RuleSetGroup, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid rule set group config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create rule set group, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.RuleSetGroupPrefix, jsonCfg) + if err != nil { + return nil, err + } + + group := &RuleSetGroup{} + if err := json.Unmarshal(result, group); err != nil { + return nil, err + } + + return group, nil +} + +// DeleteRuleSetGroup deletes passed rule set group. +func (a *API) DeleteRuleSetGroup(cfg *RuleSetGroup) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid rule set group config [nil]") + } + return a.DeleteRuleSetGroupByCID(CIDType(&cfg.CID)) +} + +// DeleteRuleSetGroupByCID deletes rule set group with passed cid. +func (a *API) DeleteRuleSetGroupByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid rule set group CID [none]") + } + + groupCID := string(*cid) + + matched, err := regexp.MatchString(config.RuleSetGroupCIDRegex, groupCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid rule set group CID [%s]", groupCID) + } + + _, err = a.Delete(groupCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchRuleSetGroups returns rule set groups matching the +// specified search query and/or filter. If nil is passed for +// both parameters all rule set groups will be returned. +func (a *API) SearchRuleSetGroups(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]RuleSetGroup, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchRuleSetGroups() + } + + reqURL := url.URL{ + Path: config.RuleSetGroupPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var groups []RuleSetGroup + if err := json.Unmarshal(result, &groups); err != nil { + return nil, err + } + + return &groups, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/user.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/user.go new file mode 100644 index 000000000..7771991d3 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/user.go @@ -0,0 +1,159 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// User API support - Fetch, Update, and Search +// See: https://login.circonus.com/resources/api/calls/user +// Note: Create and Delete are not supported directly via the User API +// endpoint. See the Account endpoint for inviting and removing users +// from specific accounts. + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// UserContactInfo defines known contact details +type UserContactInfo struct { + SMS string `json:"sms,omitempty"` // string + XMPP string `json:"xmpp,omitempty"` // string +} + +// User defines a user. See https://login.circonus.com/resources/api/calls/user for more information. +type User struct { + CID string `json:"_cid,omitempty"` // string + ContactInfo UserContactInfo `json:"contact_info,omitempty"` // UserContactInfo + Email string `json:"email"` // string + Firstname string `json:"firstname"` // string + Lastname string `json:"lastname"` // string +} + +// FetchUser retrieves user with passed cid. Pass nil for '/user/current'. +func (a *API) FetchUser(cid CIDType) (*User, error) { + var userCID string + + if cid == nil || *cid == "" { + userCID = config.UserPrefix + "/current" + } else { + userCID = string(*cid) + } + + matched, err := regexp.MatchString(config.UserCIDRegex, userCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid user CID [%s]", userCID) + } + + result, err := a.Get(userCID) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch user, received JSON: %s", string(result)) + } + + user := new(User) + if err := json.Unmarshal(result, user); err != nil { + return nil, err + } + + return user, nil +} + +// FetchUsers retrieves all users available to API Token. +func (a *API) FetchUsers() (*[]User, error) { + result, err := a.Get(config.UserPrefix) + if err != nil { + return nil, err + } + + var users []User + if err := json.Unmarshal(result, &users); err != nil { + return nil, err + } + + return &users, nil +} + +// UpdateUser updates passed user. +func (a *API) UpdateUser(cfg *User) (*User, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid user config [nil]") + } + + userCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.UserCIDRegex, userCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid user CID [%s]", userCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update user, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(userCID, jsonCfg) + if err != nil { + return nil, err + } + + user := &User{} + if err := json.Unmarshal(result, user); err != nil { + return nil, err + } + + return user, nil +} + +// SearchUsers returns users matching a filter (search queries +// are not suppoted by the user endpoint). Pass nil as filter for all +// users available to the API Token. +func (a *API) SearchUsers(filterCriteria *SearchFilterType) (*[]User, error) { + q := url.Values{} + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchUsers() + } + + reqURL := url.URL{ + Path: config.UserPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var users []User + if err := json.Unmarshal(result, &users); err != nil { + return nil, err + } + + return &users, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/worksheet.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/worksheet.go new file mode 100644 index 000000000..d9d9675f9 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/worksheet.go @@ -0,0 +1,234 @@ +// Copyright 2016 Circonus, Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Worksheet API support - Fetch, Create, Update, Delete, and Search +// See: https://login.circonus.com/resources/api/calls/worksheet + +package api + +import ( + "encoding/json" + "fmt" + "net/url" + "regexp" + + "github.com/circonus-labs/circonus-gometrics/api/config" +) + +// WorksheetGraph defines a worksheet cid to be include in the worksheet +type WorksheetGraph struct { + GraphCID string `json:"graph"` // string +} + +// WorksheetSmartQuery defines a query to include multiple worksheets +type WorksheetSmartQuery struct { + Name string `json:"name"` + Order []string `json:"order"` + Query string `json:"query"` +} + +// Worksheet defines a worksheet. See https://login.circonus.com/resources/api/calls/worksheet for more information. +type Worksheet struct { + CID string `json:"_cid,omitempty"` // string + Description *string `json:"description"` // string or null + Favorite bool `json:"favorite"` // boolean + Graphs []WorksheetGraph `json:"graphs"` // [] len >= 0 + Notes *string `json:"notes"` // string or null + SmartQueries []WorksheetSmartQuery `json:"smart_queries,omitempty"` // [] len >= 0 + Tags []string `json:"tags"` // [] len >= 0 + Title string `json:"title"` // string +} + +// NewWorksheet returns a new Worksheet (with defaults, if applicable) +func NewWorksheet() *Worksheet { + return &Worksheet{ + Graphs: []WorksheetGraph{}, // graphs is a required attribute and cannot be null + } +} + +// FetchWorksheet retrieves worksheet with passed cid. +func (a *API) FetchWorksheet(cid CIDType) (*Worksheet, error) { + if cid == nil || *cid == "" { + return nil, fmt.Errorf("Invalid worksheet CID [none]") + } + + worksheetCID := string(*cid) + + matched, err := regexp.MatchString(config.WorksheetCIDRegex, worksheetCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid worksheet CID [%s]", worksheetCID) + } + + result, err := a.Get(string(*cid)) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] fetch worksheet, received JSON: %s", string(result)) + } + + worksheet := new(Worksheet) + if err := json.Unmarshal(result, worksheet); err != nil { + return nil, err + } + + return worksheet, nil +} + +// FetchWorksheets retrieves all worksheets available to API Token. +func (a *API) FetchWorksheets() (*[]Worksheet, error) { + result, err := a.Get(config.WorksheetPrefix) + if err != nil { + return nil, err + } + + var worksheets []Worksheet + if err := json.Unmarshal(result, &worksheets); err != nil { + return nil, err + } + + return &worksheets, nil +} + +// UpdateWorksheet updates passed worksheet. +func (a *API) UpdateWorksheet(cfg *Worksheet) (*Worksheet, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid worksheet config [nil]") + } + + worksheetCID := string(cfg.CID) + + matched, err := regexp.MatchString(config.WorksheetCIDRegex, worksheetCID) + if err != nil { + return nil, err + } + if !matched { + return nil, fmt.Errorf("Invalid worksheet CID [%s]", worksheetCID) + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] update worksheet, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Put(worksheetCID, jsonCfg) + if err != nil { + return nil, err + } + + worksheet := &Worksheet{} + if err := json.Unmarshal(result, worksheet); err != nil { + return nil, err + } + + return worksheet, nil +} + +// CreateWorksheet creates a new worksheet. +func (a *API) CreateWorksheet(cfg *Worksheet) (*Worksheet, error) { + if cfg == nil { + return nil, fmt.Errorf("Invalid worksheet config [nil]") + } + + jsonCfg, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + if a.Debug { + a.Log.Printf("[DEBUG] create annotation, sending JSON: %s", string(jsonCfg)) + } + + result, err := a.Post(config.WorksheetPrefix, jsonCfg) + if err != nil { + return nil, err + } + + worksheet := &Worksheet{} + if err := json.Unmarshal(result, worksheet); err != nil { + return nil, err + } + + return worksheet, nil +} + +// DeleteWorksheet deletes passed worksheet. +func (a *API) DeleteWorksheet(cfg *Worksheet) (bool, error) { + if cfg == nil { + return false, fmt.Errorf("Invalid worksheet config [nil]") + } + return a.DeleteWorksheetByCID(CIDType(&cfg.CID)) +} + +// DeleteWorksheetByCID deletes worksheet with passed cid. +func (a *API) DeleteWorksheetByCID(cid CIDType) (bool, error) { + if cid == nil || *cid == "" { + return false, fmt.Errorf("Invalid worksheet CID [none]") + } + + worksheetCID := string(*cid) + + matched, err := regexp.MatchString(config.WorksheetCIDRegex, worksheetCID) + if err != nil { + return false, err + } + if !matched { + return false, fmt.Errorf("Invalid worksheet CID [%s]", worksheetCID) + } + + _, err = a.Delete(worksheetCID) + if err != nil { + return false, err + } + + return true, nil +} + +// SearchWorksheets returns worksheets matching the specified search +// query and/or filter. If nil is passed for both parameters all +// worksheets will be returned. +func (a *API) SearchWorksheets(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Worksheet, error) { + q := url.Values{} + + if searchCriteria != nil && *searchCriteria != "" { + q.Set("search", string(*searchCriteria)) + } + + if filterCriteria != nil && len(*filterCriteria) > 0 { + for filter, criteria := range *filterCriteria { + for _, val := range criteria { + q.Add(filter, val) + } + } + } + + if q.Encode() == "" { + return a.FetchWorksheets() + } + + reqURL := url.URL{ + Path: config.WorksheetPrefix, + RawQuery: q.Encode(), + } + + result, err := a.Get(reqURL.String()) + if err != nil { + return nil, fmt.Errorf("[ERROR] API call error %+v", err) + } + + var worksheets []Worksheet + if err := json.Unmarshal(result, &worksheets); err != nil { + return nil, err + } + + return &worksheets, nil +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go index 78fff7606..221d8a247 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go @@ -24,7 +24,8 @@ func init() { // Get Broker to use when creating a check func (cm *CheckManager) getBroker() (*api.Broker, error) { if cm.brokerID != 0 { - broker, err := cm.apih.FetchBrokerByID(cm.brokerID) + cid := fmt.Sprintf("/broker/%d", cm.brokerID) + broker, err := cm.apih.FetchBroker(api.CIDType(&cid)) if err != nil { return nil, err } @@ -60,7 +61,7 @@ func (cm *CheckManager) getBrokerCN(broker *api.Broker, submissionURL api.URLTyp cn := "" for _, detail := range broker.Details { - if detail.IP == host { + if *detail.IP == host { cn = detail.CN break } @@ -77,32 +78,37 @@ func (cm *CheckManager) getBrokerCN(broker *api.Broker, submissionURL api.URLTyp // Select a broker for use when creating a check, if a specific broker // was not specified. func (cm *CheckManager) selectBroker() (*api.Broker, error) { - var brokerList []api.Broker + var brokerList *[]api.Broker var err error + enterpriseType := "enterprise" if len(cm.brokerSelectTag) > 0 { - brokerList, err = cm.apih.FetchBrokerListByTag(cm.brokerSelectTag) + filter := api.SearchFilterType{ + "f__tags_has": cm.brokerSelectTag, + } + brokerList, err = cm.apih.SearchBrokers(nil, &filter) if err != nil { return nil, err } } else { - brokerList, err = cm.apih.FetchBrokerList() + brokerList, err = cm.apih.FetchBrokers() if err != nil { return nil, err } } - if len(brokerList) == 0 { + if len(*brokerList) == 0 { return nil, fmt.Errorf("zero brokers found") } validBrokers := make(map[string]api.Broker) haveEnterprise := false - for _, broker := range brokerList { + for _, broker := range *brokerList { + broker := broker if cm.isValidBroker(&broker) { - validBrokers[broker.Cid] = broker - if broker.Type == "enterprise" { + validBrokers[broker.CID] = broker + if broker.Type == enterpriseType { haveEnterprise = true } } @@ -110,14 +116,14 @@ func (cm *CheckManager) selectBroker() (*api.Broker, error) { if haveEnterprise { // eliminate non-enterprise brokers from valid brokers for k, v := range validBrokers { - if v.Type != "enterprise" { + if v.Type != enterpriseType { delete(validBrokers, k) } } } if len(validBrokers) == 0 { - return nil, fmt.Errorf("found %d broker(s), zero are valid", len(brokerList)) + return nil, fmt.Errorf("found %d broker(s), zero are valid", len(*brokerList)) } validBrokerKeys := reflect.ValueOf(validBrokers).MapKeys() @@ -134,8 +140,20 @@ func (cm *CheckManager) selectBroker() (*api.Broker, error) { // Verify broker supports the check type to be used func (cm *CheckManager) brokerSupportsCheckType(checkType CheckTypeType, details *api.BrokerDetail) bool { + baseType := string(checkType) + for _, module := range details.Modules { - if CheckTypeType(module) == checkType { + if module == baseType { + return true + } + } + + if idx := strings.Index(baseType, ":"); idx > 0 { + baseType = baseType[0:idx] + } + + for _, module := range details.Modules { + if module == baseType { return true } } @@ -146,10 +164,17 @@ func (cm *CheckManager) brokerSupportsCheckType(checkType CheckTypeType, details // Is the broker valid (active, supports check type, and reachable) func (cm *CheckManager) isValidBroker(broker *api.Broker) bool { - brokerHost := "" - brokerPort := "" + var brokerHost string + var brokerPort string + + if broker.Type != "circonus" && broker.Type != "enterprise" { + return false + } + valid := false + for _, detail := range broker.Details { + detail := detail // broker must be active if detail.Status != statusActive { @@ -168,49 +193,50 @@ func (cm *CheckManager) isValidBroker(broker *api.Broker) bool { } if detail.ExternalPort != 0 { - brokerPort = strconv.Itoa(detail.ExternalPort) + brokerPort = strconv.Itoa(int(detail.ExternalPort)) } else { - if detail.Port != 0 { - brokerPort = strconv.Itoa(detail.Port) + if detail.Port != nil && *detail.Port != 0 { + brokerPort = strconv.Itoa(int(*detail.Port)) } else { brokerPort = "43191" } } - if detail.ExternalHost != "" { - brokerHost = detail.ExternalHost - } else { - brokerHost = detail.IP + if detail.ExternalHost != nil && *detail.ExternalHost != "" { + brokerHost = *detail.ExternalHost + } else if detail.IP != nil && *detail.IP != "" { + brokerHost = *detail.IP } - // broker must be reachable and respond within designated time - conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%s", brokerHost, brokerPort), cm.brokerMaxResponseTime) - if err != nil { - if detail.CN != "trap.noit.circonus.net" { - if cm.Debug { - cm.Log.Printf("[DEBUG] Broker '%s' unable to connect, %v\n", broker.Name, err) - } - continue // not able to reach the broker (or respone slow enough for it to be considered not usable) - } - // if circonus trap broker, try port 443 + if brokerHost == "" { + cm.Log.Printf("[WARN] Broker '%s' instance %s has no IP or external host set", broker.Name, detail.CN) + continue + } + + if brokerHost == "trap.noit.circonus.net" && brokerPort != "443" { brokerPort = "443" - conn, err = net.DialTimeout("tcp", fmt.Sprintf("%s:%s", detail.CN, brokerPort), cm.brokerMaxResponseTime) - if err != nil { - if cm.Debug { - cm.Log.Printf("[DEBUG] Broker '%s' unable to connect %v\n", broker.Name, err) - } - continue // not able to reach the broker on 443 either (or respone slow enough for it to be considered not usable) + } + + retries := 5 + for attempt := 1; attempt <= retries; attempt++ { + // broker must be reachable and respond within designated time + conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%s", brokerHost, brokerPort), cm.brokerMaxResponseTime) + if err == nil { + conn.Close() + valid = true + break } - } - conn.Close() - if cm.Debug { - cm.Log.Printf("[DEBUG] Broker '%s' is valid\n", broker.Name) + cm.Log.Printf("[WARN] Broker '%s' unable to connect, %v. Retrying in 2 seconds, attempt %d of %d.", broker.Name, err, attempt, retries) + time.Sleep(2 * time.Second) } - valid = true - break - + if valid { + if cm.Debug { + cm.Log.Printf("[DEBUG] Broker '%s' is valid\n", broker.Name) + } + break + } } return valid } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go index c10ffd12b..cbe3ba706 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go @@ -7,6 +7,7 @@ package checkmgr import ( "crypto/x509" "encoding/json" + "errors" "fmt" ) @@ -41,17 +42,22 @@ type CACert struct { } // loadCACert loads the CA cert for the broker designated by the submission url -func (cm *CheckManager) loadCACert() { +func (cm *CheckManager) loadCACert() error { if cm.certPool != nil { - return + return nil } cm.certPool = x509.NewCertPool() - cert, err := cm.fetchCert() - if err != nil { - if cm.Debug { - cm.Log.Printf("[DEBUG] Unable to fetch ca.crt, using default. %+v\n", err) + var cert []byte + var err error + + if cm.enabled { + // only attempt to retrieve broker CA cert if + // the check is being managed. + cert, err = cm.fetchCert() + if err != nil { + return err } } @@ -60,12 +66,14 @@ func (cm *CheckManager) loadCACert() { } cm.certPool.AppendCertsFromPEM(cert) + + return nil } // fetchCert fetches CA certificate using Circonus API func (cm *CheckManager) fetchCert() ([]byte, error) { if !cm.enabled { - return circonusCA, nil + return nil, errors.New("check manager is not enabled") } response, err := cm.apih.Get("/pki/ca.crt") diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go index 201ef1e0c..2f0c9eb13 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go @@ -10,11 +10,13 @@ import ( "encoding/hex" "errors" "fmt" + "net/url" "strconv" "strings" "time" "github.com/circonus-labs/circonus-gometrics/api" + "github.com/circonus-labs/circonus-gometrics/api/config" ) // UpdateCheck determines if the check needs to be updated (new metrics, tags, etc.) @@ -35,7 +37,8 @@ func (cm *CheckManager) UpdateCheck(newMetrics map[string]*api.CheckBundleMetric } // refresh check bundle (in case there were changes made by other apps or in UI) - checkBundle, err := cm.apih.FetchCheckBundleByCID(api.CIDType(cm.checkBundle.Cid)) + cid := cm.checkBundle.CID + checkBundle, err := cm.apih.FetchCheckBundle(api.CIDType(&cid)) if err != nil { cm.Log.Printf("[ERROR] unable to fetch up-to-date check bundle %v", err) return @@ -44,6 +47,8 @@ func (cm *CheckManager) UpdateCheck(newMetrics map[string]*api.CheckBundleMetric cm.checkBundle = checkBundle cm.cbmu.Unlock() + // check metric_limit and see if it’s 0, if so, don't even bother to try to update the check. + cm.addNewMetrics(newMetrics) if len(cm.metricTags) > 0 { @@ -105,7 +110,7 @@ func (cm *CheckManager) initializeTrapURL() error { } if !cm.enabled { - return errors.New("Unable to initialize trap, check manager is disabled.") + return errors.New("unable to initialize trap, check manager is disabled") } var err error @@ -114,12 +119,12 @@ func (cm *CheckManager) initializeTrapURL() error { var broker *api.Broker if cm.checkSubmissionURL != "" { - check, err = cm.apih.FetchCheckBySubmissionURL(cm.checkSubmissionURL) + check, err = cm.fetchCheckBySubmissionURL(cm.checkSubmissionURL) if err != nil { return err } if !check.Active { - return fmt.Errorf("[ERROR] Check ID %v is not active", check.Cid) + return fmt.Errorf("[ERROR] Check ID %v is not active", check.CID) } // extract check id from check object returned from looking up using submission url // set m.CheckId to the id @@ -128,30 +133,44 @@ func (cm *CheckManager) initializeTrapURL() error { // unless the new submission url can be fetched with the API (which is no // longer possible using the original submission url) var id int - id, err = strconv.Atoi(strings.Replace(check.Cid, "/check/", "", -1)) + id, err = strconv.Atoi(strings.Replace(check.CID, "/check/", "", -1)) if err == nil { cm.checkID = api.IDType(id) cm.checkSubmissionURL = "" } else { cm.Log.Printf( "[WARN] SubmissionUrl check to Check ID: unable to convert %s to int %q\n", - check.Cid, err) + check.CID, err) } } else if cm.checkID > 0 { - check, err = cm.apih.FetchCheckByID(cm.checkID) + cid := fmt.Sprintf("/check/%d", cm.checkID) + check, err = cm.apih.FetchCheck(api.CIDType(&cid)) if err != nil { return err } if !check.Active { - return fmt.Errorf("[ERROR] Check ID %v is not active", check.Cid) + return fmt.Errorf("[ERROR] Check ID %v is not active", check.CID) } } else { - searchCriteria := fmt.Sprintf( - "(active:1)(host:\"%s\")(type:\"%s\")(tags:%s)(notes:%s)", - cm.checkTarget, cm.checkType, strings.Join(cm.checkSearchTag, ","), fmt.Sprintf("cgm_instanceid=%s", cm.checkInstanceID)) - checkBundle, err = cm.checkBundleSearch(searchCriteria) - if err != nil { - return err + if checkBundle == nil { + // old search (instanceid as check.target) + searchCriteria := fmt.Sprintf( + "(active:1)(type:\"%s\")(host:\"%s\")(tags:%s)", cm.checkType, cm.checkTarget, strings.Join(cm.checkSearchTag, ",")) + checkBundle, err = cm.checkBundleSearch(searchCriteria, map[string][]string{}) + if err != nil { + return err + } + } + + if checkBundle == nil { + // new search (check.target != instanceid, instanceid encoded in notes field) + searchCriteria := fmt.Sprintf( + "(active:1)(type:\"%s\")(tags:%s)", cm.checkType, strings.Join(cm.checkSearchTag, ",")) + filterCriteria := map[string][]string{"f_notes": {*cm.getNotes()}} + checkBundle, err = cm.checkBundleSearch(searchCriteria, filterCriteria) + if err != nil { + return err + } } if checkBundle == nil { @@ -166,7 +185,8 @@ func (cm *CheckManager) initializeTrapURL() error { if checkBundle == nil { if check != nil { - checkBundle, err = cm.apih.FetchCheckBundleByCID(api.CIDType(check.CheckBundleCid)) + cid := check.CheckBundleCID + checkBundle, err = cm.apih.FetchCheckBundle(api.CIDType(&cid)) if err != nil { return err } @@ -176,7 +196,8 @@ func (cm *CheckManager) initializeTrapURL() error { } if broker == nil { - broker, err = cm.apih.FetchBrokerByCID(api.CIDType(checkBundle.Brokers[0])) + cid := checkBundle.Brokers[0] + broker, err = cm.apih.FetchBroker(api.CIDType(&cid)) if err != nil { return err } @@ -188,7 +209,14 @@ func (cm *CheckManager) initializeTrapURL() error { // determine the trap url to which metrics should be PUT if checkBundle.Type == "httptrap" { - cm.trapURL = api.URLType(checkBundle.Config.SubmissionURL) + if turl, found := checkBundle.Config[config.SubmissionURL]; found { + cm.trapURL = api.URLType(turl) + } else { + if cm.Debug { + cm.Log.Printf("Missing config.%s %+v", config.SubmissionURL, checkBundle) + } + return fmt.Errorf("[ERROR] Unable to use check, no %s in config", config.SubmissionURL) + } } else { // build a submission_url for non-httptrap checks out of mtev_reverse url if len(checkBundle.ReverseConnectURLs) == 0 { @@ -197,7 +225,14 @@ func (cm *CheckManager) initializeTrapURL() error { mtevURL := checkBundle.ReverseConnectURLs[0] mtevURL = strings.Replace(mtevURL, "mtev_reverse", "https", 1) mtevURL = strings.Replace(mtevURL, "check", "module/httptrap", 1) - cm.trapURL = api.URLType(fmt.Sprintf("%s/%s", mtevURL, checkBundle.Config.ReverseSecret)) + if rs, found := checkBundle.Config[config.ReverseSecretKey]; found { + cm.trapURL = api.URLType(fmt.Sprintf("%s/%s", mtevURL, rs)) + } else { + if cm.Debug { + cm.Log.Printf("Missing config.%s %+v", config.ReverseSecretKey, checkBundle) + } + return fmt.Errorf("[ERROR] Unable to use check, no %s in config", config.ReverseSecretKey) + } } // used when sending as "ServerName" get around certs not having IP SANS @@ -208,26 +243,39 @@ func (cm *CheckManager) initializeTrapURL() error { } cm.trapCN = BrokerCNType(cn) + if cm.enabled { + u, err := url.Parse(string(cm.trapURL)) + if err != nil { + return err + } + if u.Scheme == "https" { + if err := cm.loadCACert(); err != nil { + return err + } + } + } + cm.trapLastUpdate = time.Now() return nil } // Search for a check bundle given a predetermined set of criteria -func (cm *CheckManager) checkBundleSearch(criteria string) (*api.CheckBundle, error) { - checkBundles, err := cm.apih.CheckBundleSearch(api.SearchQueryType(criteria)) +func (cm *CheckManager) checkBundleSearch(criteria string, filter map[string][]string) (*api.CheckBundle, error) { + search := api.SearchQueryType(criteria) + checkBundles, err := cm.apih.SearchCheckBundles(&search, &filter) if err != nil { return nil, err } - if len(checkBundles) == 0 { + if len(*checkBundles) == 0 { return nil, nil // trigger creation of a new check } numActive := 0 checkID := -1 - for idx, check := range checkBundles { + for idx, check := range *checkBundles { if check.Status == statusActive { numActive++ checkID = idx @@ -235,10 +283,12 @@ func (cm *CheckManager) checkBundleSearch(criteria string) (*api.CheckBundle, er } if numActive > 1 { - return nil, fmt.Errorf("[ERROR] Multiple possibilities multiple check bundles match criteria %s\n", criteria) + return nil, fmt.Errorf("[ERROR] multiple check bundles match criteria %s", criteria) } - return &checkBundles[checkID], nil + bundle := (*checkBundles)[checkID] + + return &bundle, nil } // Create a new check to receive metrics @@ -257,22 +307,39 @@ func (cm *CheckManager) createNewCheck() (*api.CheckBundle, *api.Broker, error) return nil, nil, err } - config := api.CheckBundle{ - Brokers: []string{broker.Cid}, - Config: api.CheckBundleConfig{AsyncMetrics: true, Secret: checkSecret}, + chkcfg := &api.CheckBundle{ + Brokers: []string{broker.CID}, + Config: make(map[config.Key]string), DisplayName: string(cm.checkDisplayName), Metrics: []api.CheckBundleMetric{}, - MetricLimit: 0, - Notes: fmt.Sprintf("cgm_instanceid=%s", cm.checkInstanceID), + MetricLimit: config.DefaultCheckBundleMetricLimit, + Notes: cm.getNotes(), Period: 60, Status: statusActive, Tags: append(cm.checkSearchTag, cm.checkTags...), - Target: cm.checkTarget, + Target: string(cm.checkTarget), Timeout: 10, Type: string(cm.checkType), } - checkBundle, err := cm.apih.CreateCheckBundle(config) + if len(cm.customConfigFields) > 0 { + for fld, val := range cm.customConfigFields { + chkcfg.Config[config.Key(fld)] = val + } + } + + // + // use the default config settings if these are NOT set by user configuration + // + if val, ok := chkcfg.Config[config.AsyncMetrics]; !ok || val == "" { + chkcfg.Config[config.AsyncMetrics] = "true" + } + + if val, ok := chkcfg.Config[config.Secret]; !ok || val == "" { + chkcfg.Config[config.Secret] = checkSecret + } + + checkBundle, err := cm.apih.CreateCheckBundle(chkcfg) if err != nil { return nil, nil, err } @@ -290,3 +357,64 @@ func (cm *CheckManager) makeSecret() (string, error) { hash.Write(x) return hex.EncodeToString(hash.Sum(nil))[0:16], nil } + +func (cm *CheckManager) getNotes() *string { + notes := fmt.Sprintf("cgm_instanceid|%s", cm.checkInstanceID) + return ¬es +} + +// FetchCheckBySubmissionURL fetch a check configuration by submission_url +func (cm *CheckManager) fetchCheckBySubmissionURL(submissionURL api.URLType) (*api.Check, error) { + if string(submissionURL) == "" { + return nil, errors.New("[ERROR] Invalid submission URL (blank)") + } + + u, err := url.Parse(string(submissionURL)) + if err != nil { + return nil, err + } + + // valid trap url: scheme://host[:port]/module/httptrap/UUID/secret + + // does it smell like a valid trap url path + if !strings.Contains(u.Path, "/module/httptrap/") { + return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', unrecognized path", submissionURL) + } + + // extract uuid + pathParts := strings.Split(strings.Replace(u.Path, "/module/httptrap/", "", 1), "/") + if len(pathParts) != 2 { + return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', UUID not where expected", submissionURL) + } + uuid := pathParts[0] + + filter := api.SearchFilterType{"f__check_uuid": []string{uuid}} + + checks, err := cm.apih.SearchChecks(nil, &filter) + if err != nil { + return nil, err + } + + if len(*checks) == 0 { + return nil, fmt.Errorf("[ERROR] No checks found with UUID %s", uuid) + } + + numActive := 0 + checkID := -1 + + for idx, check := range *checks { + if check.Active { + numActive++ + checkID = idx + } + } + + if numActive > 1 { + return nil, fmt.Errorf("[ERROR] Multiple checks with same UUID %s", uuid) + } + + check := (*checks)[checkID] + + return &check, nil + +} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go index c44daccc0..80b0c08e1 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go @@ -2,25 +2,27 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package checkmgr provides a check management interace to circonus-gometrics +// Package checkmgr provides a check management interface to circonus-gometrics package checkmgr import ( "crypto/tls" "crypto/x509" - "errors" "fmt" "io/ioutil" "log" "net/url" "os" "path" + "regexp" "strconv" "strings" "sync" "time" "github.com/circonus-labs/circonus-gometrics/api" + "github.com/pkg/errors" + "github.com/tv42/httpunix" ) // Check management offers: @@ -35,7 +37,7 @@ import ( // - configuration parameters other than Check.SubmissionUrl, Debug and Log are ignored // - note: SubmissionUrl is **required** in this case as there is no way to derive w/o api // configure with api token - check management enabled -// - all otehr configuration parameters affect how the trap url is obtained +// - all other configuration parameters affect how the trap url is obtained // 1. provided (Check.SubmissionUrl) // 2. via check lookup (CheckConfig.Id) // 3. via a search using CheckConfig.InstanceId + CheckConfig.SearchTag @@ -59,12 +61,15 @@ type CheckConfig struct { // used to search for a check to use // used as check.target when creating a check InstanceID string + // explicitly set check.target (default: instance id) + TargetHost string + // a custom display name for the check (as viewed in UI Checks) + // default: instance id + DisplayName string // unique check searching tag (or tags) // used to search for a check to use (combined with instanceid) // used as a regular tag when creating a check SearchTag string - // a custom display name for the check (as viewed in UI Checks) - DisplayName string // httptrap check secret (for creating a check) Secret string // additional tags to add to a check (when creating a check) @@ -82,6 +87,10 @@ type CheckConfig struct { // overrides the behavior and will re-activate the metric when it is // encountered. "(true|false)", default "false" ForceMetricActivation string + // Type of check to use (default: httptrap) + Type string + // Custom check config fields (default: none) + CustomConfigFields map[string]string } // BrokerConfig options for broker @@ -94,6 +103,8 @@ type BrokerConfig struct { // for a broker to be considered viable it must respond to a // connection attempt within this amount of time e.g. 200ms, 2s, 1m MaxResponseTime string + // TLS configuration to use when communicating within broker + TLSConfig *tls.Config } // Config options @@ -115,6 +126,9 @@ type CheckTypeType string // CheckInstanceIDType check instance id type CheckInstanceIDType string +// CheckTargetType check target/host +type CheckTargetType string + // CheckSecretType check secret type CheckSecretType string @@ -134,14 +148,18 @@ type CheckManager struct { Debug bool apih *api.API + initialized bool + initializedmu sync.RWMutex + // check checkType CheckTypeType checkID api.IDType checkInstanceID CheckInstanceIDType - checkTarget string + checkTarget CheckTargetType checkSearchTag api.TagType checkSecret CheckSecretType checkTags api.TagType + customConfigFields map[string]string checkSubmissionURL api.URLType checkDisplayName CheckDisplayNameType forceMetricActivation bool @@ -155,36 +173,45 @@ type CheckManager struct { brokerID api.IDType brokerSelectTag api.TagType brokerMaxResponseTime time.Duration + brokerTLS *tls.Config // state - checkBundle *api.CheckBundle - cbmu sync.Mutex - availableMetrics map[string]bool - trapURL api.URLType - trapCN BrokerCNType - trapLastUpdate time.Time - trapMaxURLAge time.Duration - trapmu sync.Mutex - certPool *x509.CertPool + checkBundle *api.CheckBundle + cbmu sync.Mutex + availableMetrics map[string]bool + availableMetricsmu sync.Mutex + trapURL api.URLType + trapCN BrokerCNType + trapLastUpdate time.Time + trapMaxURLAge time.Duration + trapmu sync.Mutex + certPool *x509.CertPool + sockRx *regexp.Regexp } // Trap config type Trap struct { - URL *url.URL - TLS *tls.Config + URL *url.URL + TLS *tls.Config + IsSocket bool + SockTransport *httpunix.Transport } // NewCheckManager returns a new check manager func NewCheckManager(cfg *Config) (*CheckManager, error) { + return New(cfg) +} + +// New returns a new check manager +func New(cfg *Config) (*CheckManager, error) { if cfg == nil { - return nil, errors.New("Invalid Check Manager configuration (nil).") + return nil, errors.New("invalid Check Manager configuration (nil)") } - cm := &CheckManager{ - enabled: false, - } + cm := &CheckManager{enabled: true, initialized: false} + // Setup logging for check manager cm.Debug = cfg.Debug cm.Log = cfg.Log if cm.Debug && cm.Log == nil { @@ -194,38 +221,44 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) { cm.Log = log.New(ioutil.Discard, "", log.LstdFlags) } + { + rx, err := regexp.Compile(`^http\+unix://(?P.+)/write/(?P.+)$`) + if err != nil { + return nil, errors.Wrap(err, "compiling socket regex") + } + cm.sockRx = rx + } + if cfg.Check.SubmissionURL != "" { cm.checkSubmissionURL = api.URLType(cfg.Check.SubmissionURL) } + // Blank API Token *disables* check management if cfg.API.TokenKey == "" { - if cm.checkSubmissionURL == "" { - return nil, errors.New("Invalid check manager configuration (no API token AND no submission url).") - } - if err := cm.initializeTrapURL(); err != nil { - return nil, err - } - return cm, nil + cm.enabled = false } - // enable check manager - - cm.enabled = true - - // initialize api handle - - cfg.API.Debug = cm.Debug - cfg.API.Log = cm.Log - - apih, err := api.NewAPI(&cfg.API) - if err != nil { - return nil, err + if !cm.enabled && cm.checkSubmissionURL == "" { + return nil, errors.New("invalid check manager configuration (no API token AND no submission url)") + } + + if cm.enabled { + // initialize api handle + cfg.API.Debug = cm.Debug + cfg.API.Log = cm.Log + apih, err := api.New(&cfg.API) + if err != nil { + return nil, errors.Wrap(err, "initializing api client") + } + cm.apih = apih } - cm.apih = apih // initialize check related data - - cm.checkType = defaultCheckType + if cfg.Check.Type != "" { + cm.checkType = CheckTypeType(cfg.Check.Type) + } else { + cm.checkType = defaultCheckType + } idSetting := "0" if cfg.Check.ID != "" { @@ -233,11 +266,12 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) { } id, err := strconv.Atoi(idSetting) if err != nil { - return nil, err + return nil, errors.Wrap(err, "converting check id") } cm.checkID = api.IDType(id) cm.checkInstanceID = CheckInstanceIDType(cfg.Check.InstanceID) + cm.checkTarget = CheckTargetType(cfg.Check.TargetHost) cm.checkDisplayName = CheckDisplayNameType(cfg.Check.DisplayName) cm.checkSecret = CheckSecretType(cfg.Check.Secret) @@ -247,7 +281,7 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) { } fm, err := strconv.ParseBool(fma) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing force metric activation") } cm.forceMetricActivation = fm @@ -259,7 +293,12 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) { if cm.checkInstanceID == "" { cm.checkInstanceID = CheckInstanceIDType(fmt.Sprintf("%s:%s", hn, an)) } - cm.checkTarget = hn + if cm.checkDisplayName == "" { + cm.checkDisplayName = CheckDisplayNameType(cm.checkInstanceID) + } + if cm.checkTarget == "" { + cm.checkTarget = CheckTargetType(cm.checkInstanceID) + } if cfg.Check.SearchTag == "" { cm.checkSearchTag = []string{fmt.Sprintf("service:%s", an)} @@ -271,8 +310,11 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) { cm.checkTags = strings.Split(strings.Replace(cfg.Check.Tags, " ", "", -1), ",") } - if cm.checkDisplayName == "" { - cm.checkDisplayName = CheckDisplayNameType(fmt.Sprintf("%s", string(cm.checkInstanceID))) + cm.customConfigFields = make(map[string]string) + if len(cfg.Check.CustomConfigFields) > 0 { + for fld, val := range cfg.Check.CustomConfigFields { + cm.customConfigFields[fld] = val + } } dur := cfg.Check.MaxURLAge @@ -281,19 +323,18 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) { } maxDur, err := time.ParseDuration(dur) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing max url age") } cm.trapMaxURLAge = maxDur // setup broker - idSetting = "0" if cfg.Broker.ID != "" { idSetting = cfg.Broker.ID } id, err = strconv.Atoi(idSetting) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing broker id") } cm.brokerID = api.IDType(id) @@ -307,41 +348,127 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) { } maxDur, err = time.ParseDuration(dur) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing broker max response time") } cm.brokerMaxResponseTime = maxDur + // add user specified tls config for broker if provided + cm.brokerTLS = cfg.Broker.TLSConfig + // metrics cm.availableMetrics = make(map[string]bool) cm.metricTags = make(map[string][]string) - if err := cm.initializeTrapURL(); err != nil { - return nil, err - } - return cm, nil } -// GetTrap return the trap url -func (cm *CheckManager) GetTrap() (*Trap, error) { - if cm.trapURL == "" { - if err := cm.initializeTrapURL(); err != nil { - return nil, err +// Initialize for sending metrics +func (cm *CheckManager) Initialize() { + + // if not managing the check, quicker initialization + if !cm.enabled { + err := cm.initializeTrapURL() + if err == nil { + cm.initializedmu.Lock() + cm.initialized = true + cm.initializedmu.Unlock() + } else { + cm.Log.Printf("[WARN] error initializing trap %s", err.Error()) } + return + } + + // background initialization when we have to reach out to the api + go func() { + cm.apih.EnableExponentialBackoff() + err := cm.initializeTrapURL() + if err == nil { + cm.initializedmu.Lock() + cm.initialized = true + cm.initializedmu.Unlock() + } else { + cm.Log.Printf("[WARN] error initializing trap %s", err.Error()) + } + cm.apih.DisableExponentialBackoff() + }() +} + +// IsReady reflects if the check has been initialied and metrics can be sent to Circonus +func (cm *CheckManager) IsReady() bool { + cm.initializedmu.RLock() + defer cm.initializedmu.RUnlock() + return cm.initialized +} + +// GetSubmissionURL returns submission url for circonus +func (cm *CheckManager) GetSubmissionURL() (*Trap, error) { + if cm.trapURL == "" { + return nil, errors.Errorf("get submission url - submission url unavailable") } trap := &Trap{} u, err := url.Parse(string(cm.trapURL)) if err != nil { - return nil, err + return nil, errors.Wrap(err, "get submission url") } - trap.URL = u + if u.Scheme == "http+unix" { + service := "circonus-agent" + sockPath := "" + metricID := "" + + subNames := cm.sockRx.SubexpNames() + matches := cm.sockRx.FindAllStringSubmatch(string(cm.trapURL), -1) + for _, match := range matches { + for idx, val := range match { + switch subNames[idx] { + case "sockfile": + sockPath = val + case "id": + metricID = val + } + } + } + + if sockPath == "" || metricID == "" { + return nil, errors.Errorf("get submission url - invalid socket url (%s)", cm.trapURL) + } + + u, err = url.Parse(fmt.Sprintf("http+unix://%s/write/%s", service, metricID)) + if err != nil { + return nil, errors.Wrap(err, "get submission url") + } + trap.URL = u + + trap.SockTransport = &httpunix.Transport{ + DialTimeout: 100 * time.Millisecond, + RequestTimeout: 1 * time.Second, + ResponseHeaderTimeout: 1 * time.Second, + } + trap.SockTransport.RegisterLocation(service, sockPath) + trap.IsSocket = true + } + if u.Scheme == "https" { + // preference user-supplied TLS configuration + if cm.brokerTLS != nil { + trap.TLS = cm.brokerTLS + return trap, nil + } + + // api.circonus.com uses a public CA signed certificate + // trap.noit.circonus.net uses Circonus CA private certificate + // enterprise brokers use private CA certificate + if trap.URL.Hostname() == "api.circonus.com" { + return trap, nil + } + if cm.certPool == nil { - cm.loadCACert() + if err := cm.loadCACert(); err != nil { + return nil, errors.Wrap(err, "get submission url") + } } t := &tls.Config{ RootCAs: cm.certPool, @@ -362,18 +489,19 @@ func (cm *CheckManager) ResetTrap() error { } cm.trapURL = "" - cm.certPool = nil - err := cm.initializeTrapURL() - return err + cm.certPool = nil // force re-fetching CA cert (if custom TLS config not supplied) + return cm.initializeTrapURL() } // RefreshTrap check when the last time the URL was reset, reset if needed -func (cm *CheckManager) RefreshTrap() { +func (cm *CheckManager) RefreshTrap() error { if cm.trapURL == "" { - return + return nil } if time.Since(cm.trapLastUpdate) >= cm.trapMaxURLAge { - cm.ResetTrap() + return cm.ResetTrap() } + + return nil } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go index 49b7c9457..61c4986b7 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go @@ -10,12 +10,17 @@ import ( // IsMetricActive checks whether a given metric name is currently active(enabled) func (cm *CheckManager) IsMetricActive(name string) bool { - active, _ := cm.availableMetrics[name] - return active + cm.availableMetricsmu.Lock() + defer cm.availableMetricsmu.Unlock() + + return cm.availableMetrics[name] } // ActivateMetric determines if a given metric should be activated func (cm *CheckManager) ActivateMetric(name string) bool { + cm.availableMetricsmu.Lock() + defer cm.availableMetricsmu.Unlock() + active, exists := cm.availableMetrics[name] if !exists { @@ -33,41 +38,57 @@ func (cm *CheckManager) ActivateMetric(name string) bool { func (cm *CheckManager) AddMetricTags(metricName string, tags []string, appendTags bool) bool { tagsUpdated := false - if len(tags) == 0 { + if appendTags && len(tags) == 0 { return tagsUpdated } - if _, exists := cm.metricTags[metricName]; !exists { + currentTags, exists := cm.metricTags[metricName] + if !exists { foundMetric := false - for _, metric := range cm.checkBundle.Metrics { - if metric.Name == metricName { - foundMetric = true - cm.metricTags[metricName] = metric.Tags - break + if cm.checkBundle != nil { + for _, metric := range cm.checkBundle.Metrics { + if metric.Name == metricName { + foundMetric = true + currentTags = metric.Tags + break + } } } if !foundMetric { - cm.metricTags[metricName] = []string{} + currentTags = []string{} } } - action := "no new" + action := "" if appendTags { - numNewTags := countNewTags(cm.metricTags[metricName], tags) + numNewTags := countNewTags(currentTags, tags) if numNewTags > 0 { action = "Added" - cm.metricTags[metricName] = append(cm.metricTags[metricName], tags...) + currentTags = append(currentTags, tags...) tagsUpdated = true } } else { - action = "Set" - cm.metricTags[metricName] = tags - tagsUpdated = true + if len(tags) != len(currentTags) { + action = "Set" + currentTags = tags + tagsUpdated = true + } else { + numNewTags := countNewTags(currentTags, tags) + if numNewTags > 0 { + action = "Set" + currentTags = tags + tagsUpdated = true + } + } } - if cm.Debug { + if tagsUpdated { + cm.metricTags[metricName] = currentTags + } + + if cm.Debug && action != "" { cm.Log.Printf("[DEBUG] %s metric tag(s) %s %v\n", action, metricName, tags) } @@ -116,7 +137,9 @@ func (cm *CheckManager) inventoryMetrics() { for _, metric := range cm.checkBundle.Metrics { availableMetrics[metric.Name] = metric.Status == "active" } + cm.availableMetricsmu.Lock() cm.availableMetrics = availableMetrics + cm.availableMetricsmu.Unlock() } // countNewTags returns a count of new tags which do not exist in the current list of tags diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go b/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go index eb15f3aaf..019cc8f86 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go @@ -30,22 +30,35 @@ package circonusgometrics import ( - "errors" + "bufio" + "bytes" + "fmt" "io/ioutil" "log" "os" "strconv" + "strings" "sync" "time" "github.com/circonus-labs/circonus-gometrics/api" "github.com/circonus-labs/circonus-gometrics/checkmgr" + "github.com/pkg/errors" ) const ( defaultFlushInterval = "10s" // 10 * time.Second ) +// Metric defines an individual metric +type Metric struct { + Type string `json:"_type"` + Value interface{} `json:"_value"` +} + +// Metrics holds host metrics +type Metrics map[string]Metric + // Config options for circonus-gometrics type Config struct { Log *log.Logger @@ -58,14 +71,22 @@ type Config struct { // API, Check and Broker configuration options CheckManager checkmgr.Config - // how frequenly to submit metrics to Circonus, default 10 seconds + // how frequenly to submit metrics to Circonus, default 10 seconds. + // Set to 0 to disable automatic flushes and call Flush manually. Interval string } +type prevMetrics struct { + metrics *Metrics + metricsmu sync.Mutex + ts time.Time +} + // CirconusMetrics state type CirconusMetrics struct { - Log *log.Logger - Debug bool + Log *log.Logger + Debug bool + resetCounters bool resetGauges bool resetHistograms bool @@ -73,7 +94,9 @@ type CirconusMetrics struct { flushInterval time.Duration flushing bool flushmu sync.Mutex + packagingmu sync.Mutex check *checkmgr.CheckManager + lastMetrics *prevMetrics counters map[string]uint64 cm sync.Mutex @@ -81,7 +104,7 @@ type CirconusMetrics struct { counterFuncs map[string]func() uint64 cfm sync.Mutex - gauges map[string]string + gauges map[string]interface{} gm sync.Mutex gaugeFuncs map[string]func() int64 @@ -99,118 +122,142 @@ type CirconusMetrics struct { // NewCirconusMetrics returns a CirconusMetrics instance func NewCirconusMetrics(cfg *Config) (*CirconusMetrics, error) { + return New(cfg) +} + +// New returns a CirconusMetrics instance +func New(cfg *Config) (*CirconusMetrics, error) { if cfg == nil { - return nil, errors.New("Invalid configuration (nil).") + return nil, errors.New("invalid configuration (nil)") } cm := &CirconusMetrics{ counters: make(map[string]uint64), counterFuncs: make(map[string]func() uint64), - gauges: make(map[string]string), + gauges: make(map[string]interface{}), gaugeFuncs: make(map[string]func() int64), histograms: make(map[string]*Histogram), text: make(map[string]string), textFuncs: make(map[string]func() string), + lastMetrics: &prevMetrics{}, } - cm.Debug = cfg.Debug - cm.Log = cfg.Log + // Logging + { + cm.Debug = cfg.Debug + cm.Log = cfg.Log - if cm.Debug && cfg.Log == nil { - cm.Log = log.New(os.Stderr, "", log.LstdFlags) - } - if cm.Log == nil { - cm.Log = log.New(ioutil.Discard, "", log.LstdFlags) + if cm.Debug && cm.Log == nil { + cm.Log = log.New(os.Stderr, "", log.LstdFlags) + } + if cm.Log == nil { + cm.Log = log.New(ioutil.Discard, "", log.LstdFlags) + } } - fi := defaultFlushInterval - if cfg.Interval != "" { - fi = cfg.Interval + // Flush Interval + { + fi := defaultFlushInterval + if cfg.Interval != "" { + fi = cfg.Interval + } + + dur, err := time.ParseDuration(fi) + if err != nil { + return nil, errors.Wrap(err, "parsing flush interval") + } + cm.flushInterval = dur } - dur, err := time.ParseDuration(fi) - if err != nil { - return nil, err - } - cm.flushInterval = dur - - var setting bool + // metric resets cm.resetCounters = true if cfg.ResetCounters != "" { - if setting, err = strconv.ParseBool(cfg.ResetCounters); err == nil { - cm.resetCounters = setting + setting, err := strconv.ParseBool(cfg.ResetCounters) + if err != nil { + return nil, errors.Wrap(err, "parsing reset counters") } + cm.resetCounters = setting } cm.resetGauges = true if cfg.ResetGauges != "" { - if setting, err = strconv.ParseBool(cfg.ResetGauges); err == nil { - cm.resetGauges = setting + setting, err := strconv.ParseBool(cfg.ResetGauges) + if err != nil { + return nil, errors.Wrap(err, "parsing reset gauges") } + cm.resetGauges = setting } cm.resetHistograms = true if cfg.ResetHistograms != "" { - if setting, err = strconv.ParseBool(cfg.ResetHistograms); err == nil { - cm.resetHistograms = setting + setting, err := strconv.ParseBool(cfg.ResetHistograms) + if err != nil { + return nil, errors.Wrap(err, "parsing reset histograms") } + cm.resetHistograms = setting } cm.resetText = true if cfg.ResetText != "" { - if setting, err = strconv.ParseBool(cfg.ResetText); err == nil { - cm.resetText = setting + setting, err := strconv.ParseBool(cfg.ResetText) + if err != nil { + return nil, errors.Wrap(err, "parsing reset text") } + cm.resetText = setting } - cfg.CheckManager.Debug = cm.Debug - cfg.CheckManager.Log = cm.Log + // check manager + { + cfg.CheckManager.Debug = cm.Debug + cfg.CheckManager.Log = cm.Log - check, err := checkmgr.NewCheckManager(&cfg.CheckManager) - if err != nil { - return nil, err + check, err := checkmgr.New(&cfg.CheckManager) + if err != nil { + return nil, errors.Wrap(err, "creating new check manager") + } + cm.check = check } - cm.check = check - if _, err := cm.check.GetTrap(); err != nil { - return nil, err + // start background initialization + cm.check.Initialize() + + // if automatic flush is enabled, start it. + // NOTE: submit will jettison metrics until initialization has completed. + if cm.flushInterval > time.Duration(0) { + go func() { + for range time.NewTicker(cm.flushInterval).C { + cm.Flush() + } + }() } return cm, nil } -// Start initializes the CirconusMetrics instance based on -// configuration settings and sets the httptrap check url to -// which metrics should be sent. It then starts a perdiodic -// submission process of all metrics collected. +// Start deprecated NOP, automatic flush is started in New if flush interval > 0. func (m *CirconusMetrics) Start() { - go func() { - for _ = range time.NewTicker(m.flushInterval).C { - m.Flush() - } - }() + // nop } -// Flush metrics kicks off the process of sending metrics to Circonus -func (m *CirconusMetrics) Flush() { - if m.flushing { - return - } - m.flushmu.Lock() - m.flushing = true - m.flushmu.Unlock() +// Ready returns true or false indicating if the check is ready to accept metrics +func (m *CirconusMetrics) Ready() bool { + return m.check.IsReady() +} + +func (m *CirconusMetrics) packageMetrics() (map[string]*api.CheckBundleMetric, Metrics) { + + m.packagingmu.Lock() + defer m.packagingmu.Unlock() if m.Debug { - m.Log.Println("[DEBUG] Flushing metrics") + m.Log.Println("[DEBUG] Packaging metrics") } - // check for new metrics and enable them automatically - newMetrics := make(map[string]*api.CheckBundleMetric) - counters, gauges, histograms, text := m.snapshot() - output := make(map[string]interface{}) + newMetrics := make(map[string]*api.CheckBundleMetric) + output := make(Metrics, len(counters)+len(gauges)+len(histograms)+len(text)) for name, value := range counters { send := m.check.IsMetricActive(name) if !send && m.check.ActivateMetric(name) { @@ -222,10 +269,7 @@ func (m *CirconusMetrics) Flush() { } } if send { - output[name] = map[string]interface{}{ - "_type": "n", - "_value": value, - } + output[name] = Metric{Type: "L", Value: value} } } @@ -240,10 +284,7 @@ func (m *CirconusMetrics) Flush() { } } if send { - output[name] = map[string]interface{}{ - "_type": "n", - "_value": value, - } + output[name] = Metric{Type: m.getGaugeType(value), Value: value} } } @@ -258,10 +299,7 @@ func (m *CirconusMetrics) Flush() { } } if send { - output[name] = map[string]interface{}{ - "_type": "n", - "_value": value.DecStrings(), - } + output[name] = Metric{Type: "n", Value: value.DecStrings()} } } @@ -276,13 +314,85 @@ func (m *CirconusMetrics) Flush() { } } if send { - output[name] = map[string]interface{}{ - "_type": "s", - "_value": value, - } + output[name] = Metric{Type: "s", Value: value} } } + m.lastMetrics.metricsmu.Lock() + defer m.lastMetrics.metricsmu.Unlock() + m.lastMetrics.metrics = &output + m.lastMetrics.ts = time.Now() + + return newMetrics, output +} + +// PromOutput returns lines of metrics in prom format +func (m *CirconusMetrics) PromOutput() (*bytes.Buffer, error) { + m.lastMetrics.metricsmu.Lock() + defer m.lastMetrics.metricsmu.Unlock() + + if m.lastMetrics.metrics == nil { + return nil, errors.New("no metrics available") + } + + var b bytes.Buffer + w := bufio.NewWriter(&b) + + ts := m.lastMetrics.ts.UnixNano() / int64(time.Millisecond) + + for name, metric := range *m.lastMetrics.metrics { + switch metric.Type { + case "n": + if strings.HasPrefix(fmt.Sprintf("%v", metric.Value), "[H[") { + continue // circonus histogram != prom "histogram" (aka percentile) + } + case "s": + continue // text metrics unsupported + } + fmt.Fprintf(w, "%s %v %d\n", name, metric.Value, ts) + } + + err := w.Flush() + if err != nil { + return nil, errors.Wrap(err, "flushing metric buffer") + } + + return &b, err +} + +// FlushMetrics flushes current metrics to a structure and returns it (does NOT send to Circonus) +func (m *CirconusMetrics) FlushMetrics() *Metrics { + m.flushmu.Lock() + if m.flushing { + m.flushmu.Unlock() + return &Metrics{} + } + + m.flushing = true + m.flushmu.Unlock() + + _, output := m.packageMetrics() + + m.flushmu.Lock() + m.flushing = false + m.flushmu.Unlock() + + return &output +} + +// Flush metrics kicks off the process of sending metrics to Circonus +func (m *CirconusMetrics) Flush() { + m.flushmu.Lock() + if m.flushing { + m.flushmu.Unlock() + return + } + + m.flushing = true + m.flushmu.Unlock() + + newMetrics, output := m.packageMetrics() + if len(output) > 0 { m.submit(output, newMetrics) } else { diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/counter.go b/vendor/github.com/circonus-labs/circonus-gometrics/counter.go index 2b34961f1..2311b0a41 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/counter.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/counter.go @@ -4,6 +4,8 @@ package circonusgometrics +import "fmt" + // A Counter is a monotonically increasing unsigned integer. // // Use a counter to derive rates (e.g., record total number of requests, derive @@ -40,6 +42,19 @@ func (m *CirconusMetrics) RemoveCounter(metric string) { delete(m.counters, metric) } +// GetCounterTest returns the current value for a counter. (note: it is a function specifically for "testing", disable automatic submission during testing.) +func (m *CirconusMetrics) GetCounterTest(metric string) (uint64, error) { + m.cm.Lock() + defer m.cm.Unlock() + + if val, ok := m.counters[metric]; ok { + return val, nil + } + + return 0, fmt.Errorf("Counter metric '%s' not found", metric) + +} + // SetCounterFunc set counter to a function [called at flush interval] func (m *CirconusMetrics) SetCounterFunc(metric string, fn func() uint64) { m.cfm.Lock() diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go b/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go index b44236959..4e05484ec 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go @@ -22,7 +22,48 @@ func (m *CirconusMetrics) Gauge(metric string, val interface{}) { func (m *CirconusMetrics) SetGauge(metric string, val interface{}) { m.gm.Lock() defer m.gm.Unlock() - m.gauges[metric] = m.gaugeValString(val) + m.gauges[metric] = val +} + +// AddGauge adds value to existing gauge +func (m *CirconusMetrics) AddGauge(metric string, val interface{}) { + m.gm.Lock() + defer m.gm.Unlock() + + v, ok := m.gauges[metric] + if !ok { + m.gauges[metric] = val + return + } + + switch val.(type) { + default: + // ignore it, unsupported type + case int: + m.gauges[metric] = v.(int) + val.(int) + case int8: + m.gauges[metric] = v.(int8) + val.(int8) + case int16: + m.gauges[metric] = v.(int16) + val.(int16) + case int32: + m.gauges[metric] = v.(int32) + val.(int32) + case int64: + m.gauges[metric] = v.(int64) + val.(int64) + case uint: + m.gauges[metric] = v.(uint) + val.(uint) + case uint8: + m.gauges[metric] = v.(uint8) + val.(uint8) + case uint16: + m.gauges[metric] = v.(uint16) + val.(uint16) + case uint32: + m.gauges[metric] = v.(uint32) + val.(uint32) + case uint64: + m.gauges[metric] = v.(uint64) + val.(uint64) + case float32: + m.gauges[metric] = v.(float32) + val.(float32) + case float64: + m.gauges[metric] = v.(float64) + val.(float64) + } } // RemoveGauge removes a gauge @@ -32,6 +73,18 @@ func (m *CirconusMetrics) RemoveGauge(metric string) { delete(m.gauges, metric) } +// GetGaugeTest returns the current value for a gauge. (note: it is a function specifically for "testing", disable automatic submission during testing.) +func (m *CirconusMetrics) GetGaugeTest(metric string) (interface{}, error) { + m.gm.Lock() + defer m.gm.Unlock() + + if val, ok := m.gauges[metric]; ok { + return val, nil + } + + return nil, fmt.Errorf("Gauge metric '%s' not found", metric) +} + // SetGaugeFunc sets a gauge to a function [called at flush interval] func (m *CirconusMetrics) SetGaugeFunc(metric string, fn func() int64) { m.gfm.Lock() @@ -46,36 +99,31 @@ func (m *CirconusMetrics) RemoveGaugeFunc(metric string) { delete(m.gaugeFuncs, metric) } -// gaugeValString converts an interface value (of a supported type) to a string -func (m *CirconusMetrics) gaugeValString(val interface{}) string { - vs := "" - switch v := val.(type) { - default: - // ignore it, unsupported type +// getGaugeType returns accurate resmon type for underlying type of gauge value +func (m *CirconusMetrics) getGaugeType(v interface{}) string { + mt := "n" + switch v.(type) { case int: - vs = fmt.Sprintf("%d", v) + mt = "i" case int8: - vs = fmt.Sprintf("%d", v) + mt = "i" case int16: - vs = fmt.Sprintf("%d", v) + mt = "i" case int32: - vs = fmt.Sprintf("%d", v) - case int64: - vs = fmt.Sprintf("%d", v) + mt = "i" case uint: - vs = fmt.Sprintf("%d", v) + mt = "I" case uint8: - vs = fmt.Sprintf("%d", v) + mt = "I" case uint16: - vs = fmt.Sprintf("%d", v) + mt = "I" case uint32: - vs = fmt.Sprintf("%d", v) + mt = "I" + case int64: + mt = "l" case uint64: - vs = fmt.Sprintf("%d", v) - case float32: - vs = fmt.Sprintf("%f", v) - case float64: - vs = fmt.Sprintf("%f", v) + mt = "L" } - return vs + + return mt } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go b/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go index 0ba1a3b23..d39f008de 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go @@ -5,6 +5,7 @@ package circonusgometrics import ( + "fmt" "sync" "github.com/circonus-labs/circonusllhist" @@ -27,6 +28,17 @@ func (m *CirconusMetrics) RecordValue(metric string, val float64) { m.SetHistogramValue(metric, val) } +// RecordCountForValue adds count n for value to a histogram +func (m *CirconusMetrics) RecordCountForValue(metric string, val float64, n int64) { + hist := m.NewHistogram(metric) + + m.hm.Lock() + hist.rw.Lock() + hist.hist.RecordValues(val, n) + hist.rw.Unlock() + m.hm.Unlock() +} + // SetHistogramValue adds a value to a histogram func (m *CirconusMetrics) SetHistogramValue(metric string, val float64) { hist := m.NewHistogram(metric) @@ -38,6 +50,18 @@ func (m *CirconusMetrics) SetHistogramValue(metric string, val float64) { m.hm.Unlock() } +// GetHistogramTest returns the current value for a gauge. (note: it is a function specifically for "testing", disable automatic submission during testing.) +func (m *CirconusMetrics) GetHistogramTest(metric string) ([]string, error) { + m.hm.Lock() + defer m.hm.Unlock() + + if hist, ok := m.histograms[metric]; ok { + return hist.hist.DecStrings(), nil + } + + return []string{""}, fmt.Errorf("Histogram metric '%s' not found", metric) +} + // RemoveHistogram removes a histogram func (m *CirconusMetrics) RemoveHistogram(metric string) { m.hm.Lock() diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/submit.go b/vendor/github.com/circonus-labs/circonus-gometrics/submit.go index a8692c26c..f99bc4ced 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/submit.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/submit.go @@ -6,8 +6,8 @@ package circonusgometrics import ( "bytes" + "context" "encoding/json" - "errors" "fmt" "io/ioutil" "log" @@ -17,17 +17,24 @@ import ( "time" "github.com/circonus-labs/circonus-gometrics/api" - "github.com/hashicorp/go-retryablehttp" + retryablehttp "github.com/hashicorp/go-retryablehttp" + "github.com/pkg/errors" ) -func (m *CirconusMetrics) submit(output map[string]interface{}, newMetrics map[string]*api.CheckBundleMetric) { +func (m *CirconusMetrics) submit(output Metrics, newMetrics map[string]*api.CheckBundleMetric) { + + // if there is nowhere to send metrics to, just return. + if !m.check.IsReady() { + m.Log.Printf("[WARN] check not ready, skipping metric submission") + return + } // update check if there are any new metrics or, if metric tags have been added since last submit m.check.UpdateCheck(newMetrics) str, err := json.Marshal(output) if err != nil { - m.Log.Printf("[ERROR] marshling output %+v", err) + m.Log.Printf("[ERROR] marshaling output %+v", err) return } @@ -37,15 +44,21 @@ func (m *CirconusMetrics) submit(output map[string]interface{}, newMetrics map[s return } + // OK response from circonus-agent does not + // indicate how many metrics were received + if numStats == -1 { + numStats = len(output) + } + if m.Debug { m.Log.Printf("[DEBUG] %d stats sent\n", numStats) } } func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { - trap, err := m.check.GetTrap() + trap, err := m.check.GetSubmissionURL() if err != nil { - return 0, err + return 0, errors.Wrap(err, "trap call") } dataReader := bytes.NewReader(payload) @@ -59,10 +72,14 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { // keep last HTTP error in the event of retry failure var lastHTTPError error - retryPolicy := func(resp *http.Response, err error) (bool, error) { + retryPolicy := func(ctx context.Context, resp *http.Response, err error) (bool, error) { + if ctxErr := ctx.Err(); ctxErr != nil { + return false, ctxErr + } + if err != nil { lastHTTPError = err - return true, err + return true, errors.Wrap(err, "retry policy") } // Check the response code. We retry on 500-range responses to allow // the server time to recover, as 500's are typically not permanent @@ -92,20 +109,24 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { TLSClientConfig: trap.TLS, DisableKeepAlives: true, MaxIdleConnsPerHost: -1, - DisableCompression: true, + DisableCompression: false, } - } else { + } else if trap.URL.Scheme == "http" { client.HTTPClient.Transport = &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).Dial, - TLSHandshakeTimeout: 10 * time.Second, DisableKeepAlives: true, MaxIdleConnsPerHost: -1, - DisableCompression: true, + DisableCompression: false, } + } else if trap.IsSocket { + m.Log.Println("using socket transport") + client.HTTPClient.Transport = trap.SockTransport + } else { + return 0, errors.Errorf("unknown scheme (%s), skipping submission", trap.URL.Scheme) } client.RetryWaitMin = 1 * time.Second client.RetryWaitMax = 5 * time.Second @@ -120,7 +141,8 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { client.CheckRetry = retryPolicy attempts := -1 - client.RequestLogHook = func(logger *log.Logger, req *http.Request, retryNumber int) { + client.RequestLogHook = func(logger retryablehttp.Logger, req *http.Request, retryNumber int) { + //client.RequestLogHook = func(logger *log.Logger, req *http.Request, retryNumber int) { attempts = retryNumber } @@ -132,10 +154,17 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { if attempts == client.RetryMax { m.check.RefreshTrap() } - return 0, err + return 0, errors.Wrap(err, "trap call") } defer resp.Body.Close() + + // no content - expected result from + // circonus-agent when metrics accepted + if resp.StatusCode == http.StatusNoContent { + return -1, nil + } + body, err := ioutil.ReadAll(resp.Body) if err != nil { m.Log.Printf("[ERROR] reading body, proceeding. %s\n", err) @@ -146,7 +175,7 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { m.Log.Printf("[ERROR] parsing body, proceeding. %v (%s)\n", err, body) } - if resp.StatusCode != 200 { + if resp.StatusCode != http.StatusOK { return 0, errors.New("[ERROR] bad response code: " + strconv.Itoa(resp.StatusCode)) } switch v := response["stats"].(type) { diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/tools.go b/vendor/github.com/circonus-labs/circonus-gometrics/tools.go index 73259a7b1..87c80516b 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/tools.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/tools.go @@ -17,7 +17,6 @@ func (m *CirconusMetrics) TrackHTTPLatency(name string, handler func(http.Respon start := time.Now().UnixNano() handler(rw, req) elapsed := time.Now().UnixNano() - start - //hist := m.NewHistogram("go`HTTP`" + req.Method + "`" + name + "`latency") m.RecordValue("go`HTTP`"+req.Method+"`"+name+"`latency", float64(elapsed)/float64(time.Second)) } } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/util.go b/vendor/github.com/circonus-labs/circonus-gometrics/util.go index b5e9f4777..3def2caa3 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/util.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/util.go @@ -33,7 +33,7 @@ func (m *CirconusMetrics) Reset() { m.counters = make(map[string]uint64) m.counterFuncs = make(map[string]func() uint64) - m.gauges = make(map[string]string) + m.gauges = make(map[string]interface{}) m.gaugeFuncs = make(map[string]func() int64) m.histograms = make(map[string]*Histogram) m.text = make(map[string]string) @@ -41,81 +41,95 @@ func (m *CirconusMetrics) Reset() { } // snapshot returns a copy of the values of all registered counters and gauges. -func (m *CirconusMetrics) snapshot() (c map[string]uint64, g map[string]string, h map[string]*circonusllhist.Histogram, t map[string]string) { +func (m *CirconusMetrics) snapshot() (c map[string]uint64, g map[string]interface{}, h map[string]*circonusllhist.Histogram, t map[string]string) { + c = m.snapCounters() + g = m.snapGauges() + h = m.snapHistograms() + t = m.snapText() + + return +} + +func (m *CirconusMetrics) snapCounters() map[string]uint64 { m.cm.Lock() defer m.cm.Unlock() - m.cfm.Lock() defer m.cfm.Unlock() - m.gm.Lock() - defer m.gm.Unlock() + c := make(map[string]uint64, len(m.counters)+len(m.counterFuncs)) - m.gfm.Lock() - defer m.gfm.Unlock() - - m.hm.Lock() - defer m.hm.Unlock() - - m.tm.Lock() - defer m.tm.Unlock() - - m.tfm.Lock() - defer m.tfm.Unlock() - - c = make(map[string]uint64, len(m.counters)+len(m.counterFuncs)) for n, v := range m.counters { c[n] = v } + if m.resetCounters && len(c) > 0 { + m.counters = make(map[string]uint64) + } for n, f := range m.counterFuncs { c[n] = f() } - //g = make(map[string]int64, len(m.gauges)+len(m.gaugeFuncs)) - g = make(map[string]string, len(m.gauges)+len(m.gaugeFuncs)) + return c +} + +func (m *CirconusMetrics) snapGauges() map[string]interface{} { + m.gm.Lock() + defer m.gm.Unlock() + m.gfm.Lock() + defer m.gfm.Unlock() + + g := make(map[string]interface{}, len(m.gauges)+len(m.gaugeFuncs)) + for n, v := range m.gauges { g[n] = v } - - for n, f := range m.gaugeFuncs { - g[n] = m.gaugeValString(f()) + if m.resetGauges && len(g) > 0 { + m.gauges = make(map[string]interface{}) } - h = make(map[string]*circonusllhist.Histogram, len(m.histograms)) + for n, f := range m.gaugeFuncs { + g[n] = f() + } + + return g +} + +func (m *CirconusMetrics) snapHistograms() map[string]*circonusllhist.Histogram { + m.hm.Lock() + defer m.hm.Unlock() + + h := make(map[string]*circonusllhist.Histogram, len(m.histograms)) + for n, hist := range m.histograms { hist.rw.Lock() h[n] = hist.hist.CopyAndReset() hist.rw.Unlock() } + if m.resetHistograms && len(h) > 0 { + m.histograms = make(map[string]*Histogram) + } + + return h +} + +func (m *CirconusMetrics) snapText() map[string]string { + m.tm.Lock() + defer m.tm.Unlock() + m.tfm.Lock() + defer m.tfm.Unlock() + + t := make(map[string]string, len(m.text)+len(m.textFuncs)) - t = make(map[string]string, len(m.text)+len(m.textFuncs)) for n, v := range m.text { t[n] = v } + if m.resetText && len(t) > 0 { + m.text = make(map[string]string) + } for n, f := range m.textFuncs { t[n] = f() } - if m.resetCounters { - m.counters = make(map[string]uint64) - m.counterFuncs = make(map[string]func() uint64) - } - - if m.resetGauges { - m.gauges = make(map[string]string) - m.gaugeFuncs = make(map[string]func() int64) - } - - if m.resetHistograms { - m.histograms = make(map[string]*Histogram) - } - - if m.resetText { - m.text = make(map[string]string) - m.textFuncs = make(map[string]func() string) - } - - return + return t } diff --git a/vendor/github.com/circonus-labs/circonusllhist/circonusllhist.go b/vendor/github.com/circonus-labs/circonusllhist/circonusllhist.go index cf4f482c1..f5c372749 100644 --- a/vendor/github.com/circonus-labs/circonusllhist/circonusllhist.go +++ b/vendor/github.com/circonus-labs/circonusllhist/circonusllhist.go @@ -8,17 +8,24 @@ package circonusllhist import ( "bytes" + "encoding/base64" + "encoding/binary" + "encoding/json" "errors" "fmt" + "io" "math" + "strconv" + "strings" "sync" + "time" ) const ( - DEFAULT_HIST_SIZE = int16(100) + defaultHistSize = uint16(100) ) -var power_of_ten = [...]float64{ +var powerOfTen = [...]float64{ 1, 10, 100, 1000, 10000, 100000, 1e+06, 1e+07, 1e+08, 1e+09, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20, 1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, @@ -49,28 +56,39 @@ var power_of_ten = [...]float64{ } // A Bracket is a part of a cumulative distribution. -type Bin struct { +type bin struct { + count uint64 val int8 exp int8 - count uint64 } -func NewBinRaw(val int8, exp int8, count uint64) *Bin { - return &Bin{ +func newBinRaw(val int8, exp int8, count uint64) *bin { + return &bin{ + count: count, val: val, exp: exp, - count: count, } } -func NewBin() *Bin { - return NewBinRaw(0, 0, 0) + +func newBin() *bin { + return newBinRaw(0, 0, 0) } -func NewBinFromFloat64(d float64) *Bin { - hb := NewBinRaw(0, 0, 0) - hb.SetFromFloat64(d) + +func newBinFromFloat64(d float64) *bin { + hb := newBinRaw(0, 0, 0) + hb.setFromFloat64(d) return hb } -func (hb *Bin) SetFromFloat64(d float64) *Bin { + +type fastL2 struct { + l1, l2 int +} + +func (hb *bin) newFastL2() fastL2 { + return fastL2{l1: int(uint8(hb.exp)), l2: int(uint8(hb.val))} +} + +func (hb *bin) setFromFloat64(d float64) *bin { hb.val = -1 if math.IsInf(d, 0) || math.IsNaN(d) { return hb @@ -93,7 +111,7 @@ func (hb *Bin) SetFromFloat64(d float64) *Bin { } return hb } - d = d / hb.PowerOfTen() + d = d / hb.powerOfTen() d = d * 10 hb.val = int8(sign * int(math.Floor(d+1e-13))) if hb.val == 100 || hb.val == -100 { @@ -116,136 +134,308 @@ func (hb *Bin) SetFromFloat64(d float64) *Bin { } return hb } -func (hb *Bin) PowerOfTen() float64 { - idx := int(hb.exp) - if idx < 0 { - idx = 256 + idx - } - return power_of_ten[idx] + +func (hb *bin) powerOfTen() float64 { + idx := int(uint8(hb.exp)) + return powerOfTen[idx] } -func (hb *Bin) IsNaN() bool { - if hb.val > 99 || hb.val < -99 { +func (hb *bin) isNaN() bool { + // aval := abs(hb.val) + aval := hb.val + if aval < 0 { + aval = -aval + } + if 99 < aval { // in [100... ]: nan return true } + if 9 < aval { // in [10 - 99]: valid range + return false + } + if 0 < aval { // in [1 - 9 ]: nan + return true + } + if 0 == aval { // in [0] : zero bucket + return false + } return false } -func (hb *Bin) Val() int8 { - return hb.val -} -func (hb *Bin) Exp() int8 { - return hb.exp -} -func (hb *Bin) Count() uint64 { - return hb.count -} -func (hb *Bin) Value() float64 { - if hb.IsNaN() { + +func (hb *bin) value() float64 { + if hb.isNaN() { return math.NaN() } if hb.val < 10 && hb.val > -10 { return 0.0 } - return (float64(hb.val) / 10.0) * hb.PowerOfTen() + return (float64(hb.val) / 10.0) * hb.powerOfTen() } -func (hb *Bin) BinWidth() float64 { - if hb.IsNaN() { + +func (hb *bin) binWidth() float64 { + if hb.isNaN() { return math.NaN() } if hb.val < 10 && hb.val > -10 { return 0.0 } - return hb.PowerOfTen() / 10.0 + return hb.powerOfTen() / 10.0 } -func (hb *Bin) Midpoint() float64 { - if hb.IsNaN() { + +func (hb *bin) midpoint() float64 { + if hb.isNaN() { return math.NaN() } - out := hb.Value() + out := hb.value() if out == 0 { return 0 } - interval := hb.BinWidth() + interval := hb.binWidth() if out < 0 { interval = interval * -1 } return out + interval/2.0 } -func (hb *Bin) Left() float64 { - if hb.IsNaN() { + +func (hb *bin) left() float64 { + if hb.isNaN() { return math.NaN() } - out := hb.Value() + out := hb.value() if out >= 0 { return out } - return out - hb.BinWidth() + return out - hb.binWidth() } -func (h1 *Bin) Compare(h2 *Bin) int { - if h1.val == h2.val && h1.exp == h2.exp { - return 0 +func (h1 *bin) compare(h2 *bin) int { + var v1, v2 int + + // 1) slide exp positive + // 2) shift by size of val multiple by (val != 0) + // 3) then add or subtract val accordingly + + if h1.val >= 0 { + v1 = ((int(h1.exp)+256)<<8)*int(((int(h1.val)|(^int(h1.val)+1))>>8)&1) + int(h1.val) + } else { + v1 = ((int(h1.exp)+256)<<8)*int(((int(h1.val)|(^int(h1.val)+1))>>8)&1) - int(h1.val) } - if h1.val == -1 { - return 1 + + if h2.val >= 0 { + v2 = ((int(h2.exp)+256)<<8)*int(((int(h2.val)|(^int(h2.val)+1))>>8)&1) + int(h2.val) + } else { + v2 = ((int(h2.exp)+256)<<8)*int(((int(h2.val)|(^int(h2.val)+1))>>8)&1) - int(h2.val) } - if h2.val == -1 { - return -1 - } - if h1.val == 0 { - if h2.val > 0 { - return 1 - } - return -1 - } - if h2.val == 0 { - if h1.val < 0 { - return 1 - } - return -1 - } - if h1.val < 0 && h2.val > 0 { - return 1 - } - if h1.val > 0 && h2.val < 0 { - return -1 - } - if h1.exp == h2.exp { - if h1.val < h2.val { - return 1 - } - return -1 - } - if h1.exp > h2.exp { - if h1.val < 0 { - return 1 - } - return -1 - } - if h1.exp < h2.exp { - if h1.val < 0 { - return -1 - } - return 1 - } - return 0 + + // return the difference + return v2 - v1 } // This histogram structure tracks values are two decimal digits of precision // with a bounded error that remains bounded upon composition type Histogram struct { - mutex sync.Mutex - bvs []Bin - used int16 - allocd int16 + bvs []bin + used uint16 + allocd uint16 + + lookup [256][]uint16 + + mutex sync.RWMutex + useLocks bool +} + +const ( + BVL1, BVL1MASK uint64 = iota, 0xff << (8 * iota) + BVL2, BVL2MASK + BVL3, BVL3MASK + BVL4, BVL4MASK + BVL5, BVL5MASK + BVL6, BVL6MASK + BVL7, BVL7MASK + BVL8, BVL8MASK +) + +func getBytesRequired(val uint64) (len int8) { + if 0 != (BVL8MASK|BVL7MASK|BVL6MASK|BVL5MASK)&val { + if 0 != BVL8MASK&val { + return int8(BVL8) + } + if 0 != BVL7MASK&val { + return int8(BVL7) + } + if 0 != BVL6MASK&val { + return int8(BVL6) + } + if 0 != BVL5MASK&val { + return int8(BVL5) + } + } else { + if 0 != BVL4MASK&val { + return int8(BVL4) + } + if 0 != BVL3MASK&val { + return int8(BVL3) + } + if 0 != BVL2MASK&val { + return int8(BVL2) + } + } + return int8(BVL1) +} + +func writeBin(out io.Writer, in bin, idx int) (err error) { + + err = binary.Write(out, binary.BigEndian, in.val) + if err != nil { + return + } + + err = binary.Write(out, binary.BigEndian, in.exp) + if err != nil { + return + } + + var tgtType int8 = getBytesRequired(in.count) + + err = binary.Write(out, binary.BigEndian, tgtType) + if err != nil { + return + } + + var bcount = make([]uint8, 8) + b := bcount[0 : tgtType+1] + for i := tgtType; i >= 0; i-- { + b[i] = uint8(uint64(in.count>>(uint8(i)*8)) & 0xff) + } + + err = binary.Write(out, binary.BigEndian, b) + if err != nil { + return + } + return +} + +func readBin(in io.Reader) (out bin, err error) { + err = binary.Read(in, binary.BigEndian, &out.val) + if err != nil { + return + } + + err = binary.Read(in, binary.BigEndian, &out.exp) + if err != nil { + return + } + var bvl uint8 + err = binary.Read(in, binary.BigEndian, &bvl) + if err != nil { + return + } + if bvl > uint8(BVL8) { + return out, errors.New("encoding error: bvl value is greater than max allowable") + } + + bcount := make([]byte, 8) + b := bcount[0 : bvl+1] + err = binary.Read(in, binary.BigEndian, b) + if err != nil { + return + } + + var count uint64 = 0 + for i := int(bvl + 1); i >= 0; i-- { + count |= (uint64(bcount[i]) << (uint8(i) * 8)) + } + + out.count = count + return +} + +func Deserialize(in io.Reader) (h *Histogram, err error) { + h = New() + if h.bvs == nil { + h.bvs = make([]bin, 0, defaultHistSize) + } + + var nbin int16 + err = binary.Read(in, binary.BigEndian, &nbin) + if err != nil { + return + } + + for ii := int16(0); ii < nbin; ii++ { + bb, err := readBin(in) + if err != nil { + return h, err + } + h.insertBin(&bb, int64(bb.count)) + } + return h, nil +} + +func (h *Histogram) Serialize(w io.Writer) error { + + var nbin int16 = int16(len(h.bvs)) + if err := binary.Write(w, binary.BigEndian, nbin); err != nil { + return err + } + + for i := 0; i < len(h.bvs); i++ { + if err := writeBin(w, h.bvs[i], i); err != nil { + return err + } + } + return nil +} + +func (h *Histogram) SerializeB64(w io.Writer) error { + buf := bytes.NewBuffer([]byte{}) + h.Serialize(buf) + + encoder := base64.NewEncoder(base64.StdEncoding, w) + if _, err := encoder.Write(buf.Bytes()); err != nil { + return err + } + encoder.Close() + return nil } // New returns a new Histogram func New() *Histogram { return &Histogram{ - allocd: DEFAULT_HIST_SIZE, - used: 0, - bvs: make([]Bin, DEFAULT_HIST_SIZE), + allocd: defaultHistSize, + used: 0, + bvs: make([]bin, defaultHistSize), + useLocks: true, + } +} + +// New returns a Histogram without locking +func NewNoLocks() *Histogram { + return &Histogram{ + allocd: defaultHistSize, + used: 0, + bvs: make([]bin, defaultHistSize), + useLocks: false, + } +} + +// NewFromStrings returns a Histogram created from DecStrings strings +func NewFromStrings(strs []string, locks bool) (*Histogram, error) { + + bin, err := stringsToBin(strs) + if err != nil { + return nil, err + } + + return newFromBins(bin, locks), nil +} + +// NewFromBins returns a Histogram created from a bins struct slice +func newFromBins(bins []bin, locks bool) *Histogram { + return &Histogram{ + allocd: uint16(len(bins) + 10), // pad it with 10 + used: uint16(len(bins)), + bvs: bins, + useLocks: locks, } } @@ -266,9 +456,24 @@ func (h *Histogram) Mean() float64 { // Reset forgets all bins in the histogram (they remain allocated) func (h *Histogram) Reset() { - h.mutex.Lock() + if h.useLocks { + h.mutex.Lock() + defer h.mutex.Unlock() + } + for i := 0; i < 256; i++ { + if h.lookup[i] != nil { + for j := range h.lookup[i] { + h.lookup[i][j] = 0 + } + } + } h.used = 0 - h.mutex.Unlock() +} + +// RecordIntScale records an integer scaler value, returning an error if the +// value is out of range. +func (h *Histogram) RecordIntScale(val int64, scale int) error { + return h.RecordIntScales(val, scale, 1) } // RecordValue records the given value, returning an error if the value is out @@ -277,6 +482,12 @@ func (h *Histogram) RecordValue(v float64) error { return h.RecordValues(v, 1) } +// RecordDuration records the given time.Duration in seconds, returning an error +// if the value is out of range. +func (h *Histogram) RecordDuration(v time.Duration) error { + return h.RecordIntScale(int64(v), -9) +} + // RecordCorrectedValue records the given value, correcting for stalls in the // recording process. This only works for processes which are recording values // at an expected interval (e.g., doing jitter analysis). Processes which are @@ -304,17 +515,23 @@ func (h *Histogram) RecordCorrectedValue(v, expectedInterval int64) error { } // find where a new bin should go -func (h *Histogram) InternalFind(hb *Bin) (bool, int16) { +func (h *Histogram) internalFind(hb *bin) (bool, uint16) { if h.used == 0 { return false, 0 } + f2 := hb.newFastL2() + if h.lookup[f2.l1] != nil { + if idx := h.lookup[f2.l1][f2.l2]; idx != 0 { + return true, idx - 1 + } + } rv := -1 - idx := int16(0) - l := int16(0) - r := h.used - 1 + idx := uint16(0) + l := int(0) + r := int(h.used - 1) for l < r { check := (r + l) / 2 - rv = h.bvs[check].Compare(hb) + rv = h.bvs[check].compare(hb) if rv == 0 { l = check r = check @@ -325,9 +542,9 @@ func (h *Histogram) InternalFind(hb *Bin) (bool, int16) { } } if rv != 0 { - rv = h.bvs[l].Compare(hb) + rv = h.bvs[l].compare(hb) } - idx = l + idx = uint16(l) if rv == 0 { return true, idx } @@ -338,23 +555,22 @@ func (h *Histogram) InternalFind(hb *Bin) (bool, int16) { return false, idx } -func (h *Histogram) InsertBin(hb *Bin, count int64) uint64 { - h.mutex.Lock() - defer h.mutex.Unlock() - if count == 0 { - return 0 +func (h *Histogram) insertBin(hb *bin, count int64) uint64 { + if h.useLocks { + h.mutex.Lock() + defer h.mutex.Unlock() } - found, idx := h.InternalFind(hb) + found, idx := h.internalFind(hb) if !found { if h.used == h.allocd { - new_bvs := make([]Bin, h.allocd+DEFAULT_HIST_SIZE) + new_bvs := make([]bin, h.allocd+defaultHistSize) if idx > 0 { copy(new_bvs[0:], h.bvs[0:idx]) } if idx < h.used { copy(new_bvs[idx+1:], h.bvs[idx:]) } - h.allocd = h.allocd + DEFAULT_HIST_SIZE + h.allocd = h.allocd + defaultHistSize h.bvs = new_bvs } else { copy(h.bvs[idx+1:], h.bvs[idx:h.used]) @@ -363,13 +579,20 @@ func (h *Histogram) InsertBin(hb *Bin, count int64) uint64 { h.bvs[idx].exp = hb.exp h.bvs[idx].count = uint64(count) h.used++ + for i := idx; i < h.used; i++ { + f2 := h.bvs[i].newFastL2() + if h.lookup[f2.l1] == nil { + h.lookup[f2.l1] = make([]uint16, 256) + } + h.lookup[f2.l1][f2.l2] = uint16(i) + 1 + } return h.bvs[idx].count } var newval uint64 - if count < 0 { - newval = h.bvs[idx].count - uint64(-count) - } else { + if count >= 0 { newval = h.bvs[idx].count + uint64(count) + } else { + newval = h.bvs[idx].count - uint64(-count) } if newval < h.bvs[idx].count { //rolled newval = ^uint64(0) @@ -378,23 +601,59 @@ func (h *Histogram) InsertBin(hb *Bin, count int64) uint64 { return newval - h.bvs[idx].count } +// RecordIntScales records n occurrences of the given value, returning an error if +// the value is out of range. +func (h *Histogram) RecordIntScales(val int64, scale int, n int64) error { + sign := int64(1) + if val == 0 { + scale = 0 + } else { + scale++ + if val < 0 { + val = 0 - val + sign = -1 + } + if val < 10 { + val *= 10 + scale -= 1 + } + for val >= 100 { + val /= 10 + scale++ + } + } + if scale < -128 { + val = 0 + scale = 0 + } else if scale > 127 { + val = 0xff + scale = 0 + } + val *= sign + hb := bin{val: int8(val), exp: int8(scale), count: 0} + h.insertBin(&hb, n) + return nil +} + // RecordValues records n occurrences of the given value, returning an error if // the value is out of range. func (h *Histogram) RecordValues(v float64, n int64) error { - var hb Bin - hb.SetFromFloat64(v) - h.InsertBin(&hb, n) + var hb bin + hb.setFromFloat64(v) + h.insertBin(&hb, n) return nil } // Approximate mean func (h *Histogram) ApproxMean() float64 { - h.mutex.Lock() - defer h.mutex.Unlock() + if h.useLocks { + h.mutex.RLock() + defer h.mutex.RUnlock() + } divisor := 0.0 sum := 0.0 - for i := int16(0); i < h.used; i++ { - midpoint := h.bvs[i].Midpoint() + for i := uint16(0); i < h.used; i++ { + midpoint := h.bvs[i].midpoint() cardinality := float64(h.bvs[i].count) divisor += cardinality sum += midpoint * cardinality @@ -407,11 +666,13 @@ func (h *Histogram) ApproxMean() float64 { // Approximate sum func (h *Histogram) ApproxSum() float64 { - h.mutex.Lock() - defer h.mutex.Unlock() + if h.useLocks { + h.mutex.RLock() + defer h.mutex.RUnlock() + } sum := 0.0 - for i := int16(0); i < h.used; i++ { - midpoint := h.bvs[i].Midpoint() + for i := uint16(0); i < h.used; i++ { + midpoint := h.bvs[i].midpoint() cardinality := float64(h.bvs[i].count) sum += midpoint * cardinality } @@ -419,10 +680,12 @@ func (h *Histogram) ApproxSum() float64 { } func (h *Histogram) ApproxQuantile(q_in []float64) ([]float64, error) { - h.mutex.Lock() - defer h.mutex.Unlock() + if h.useLocks { + h.mutex.RLock() + defer h.mutex.RUnlock() + } q_out := make([]float64, len(q_in)) - i_q, i_b := 0, int16(0) + i_q, i_b := 0, uint16(0) total_cnt, bin_width, bin_left, lower_cnt, upper_cnt := 0.0, 0.0, 0.0, 0.0, 0.0 if len(q_in) == 0 { return q_out, nil @@ -435,7 +698,7 @@ func (h *Histogram) ApproxQuantile(q_in []float64) ([]float64, error) { } // Add up the bins for i_b = 0; i_b < h.used; i_b++ { - if !h.bvs[i_b].IsNaN() { + if !h.bvs[i_b].isNaN() { total_cnt += float64(h.bvs[i_b].count) } } @@ -451,11 +714,11 @@ func (h *Histogram) ApproxQuantile(q_in []float64) ([]float64, error) { } for i_b = 0; i_b < h.used; i_b++ { - if h.bvs[i_b].IsNaN() { + if h.bvs[i_b].isNaN() { continue } - bin_width = h.bvs[i_b].BinWidth() - bin_left = h.bvs[i_b].Left() + bin_width = h.bvs[i_b].binWidth() + bin_left = h.bvs[i_b].left() lower_cnt = upper_cnt upper_cnt = lower_cnt + float64(h.bvs[i_b].count) break @@ -463,8 +726,8 @@ func (h *Histogram) ApproxQuantile(q_in []float64) ([]float64, error) { for i_q = 0; i_q < len(q_in); i_q++ { for i_b < (h.used-1) && upper_cnt < q_out[i_q] { i_b++ - bin_width = h.bvs[i_b].BinWidth() - bin_left = h.bvs[i_b].Left() + bin_width = h.bvs[i_b].binWidth() + bin_left = h.bvs[i_b].left() lower_cnt = upper_cnt upper_cnt = lower_cnt + float64(h.bvs[i_b].count) } @@ -485,8 +748,10 @@ func (h *Histogram) ApproxQuantile(q_in []float64) ([]float64, error) { // ValueAtQuantile returns the recorded value at the given quantile (0..1). func (h *Histogram) ValueAtQuantile(q float64) float64 { - h.mutex.Lock() - defer h.mutex.Unlock() + if h.useLocks { + h.mutex.RLock() + defer h.mutex.RUnlock() + } q_in := make([]float64, 1) q_in[0] = q q_out, err := h.ApproxQuantile(q_in) @@ -505,17 +770,21 @@ func (h *Histogram) SignificantFigures() int64 { // Equals returns true if the two Histograms are equivalent, false if not. func (h *Histogram) Equals(other *Histogram) bool { - h.mutex.Lock() - other.mutex.Lock() - defer h.mutex.Unlock() - defer other.mutex.Unlock() + if h.useLocks { + h.mutex.RLock() + defer h.mutex.RUnlock() + } + if other.useLocks { + other.mutex.RLock() + defer other.mutex.RUnlock() + } switch { case h.used != other.used: return false default: - for i := int16(0); i < h.used; i++ { - if h.bvs[i].Compare(&other.bvs[i]) != 0 { + for i := uint16(0); i < h.used; i++ { + if h.bvs[i].compare(&other.bvs[i]) != 0 { return false } if h.bvs[i].count != other.bvs[i].count { @@ -526,30 +795,123 @@ func (h *Histogram) Equals(other *Histogram) bool { return true } -func (h *Histogram) CopyAndReset() *Histogram { - h.mutex.Lock() - defer h.mutex.Unlock() - newhist := &Histogram{ - allocd: h.allocd, - used: h.used, - bvs: h.bvs, +// Copy creates and returns an exact copy of a histogram. +func (h *Histogram) Copy() *Histogram { + if h.useLocks { + h.mutex.Lock() + defer h.mutex.Unlock() } - h.allocd = DEFAULT_HIST_SIZE - h.bvs = make([]Bin, DEFAULT_HIST_SIZE) - h.used = 0 + + newhist := New() + newhist.allocd = h.allocd + newhist.used = h.used + newhist.useLocks = h.useLocks + + newhist.bvs = []bin{} + for _, v := range h.bvs { + newhist.bvs = append(newhist.bvs, v) + } + + for i, u := range h.lookup { + for _, v := range u { + newhist.lookup[i] = append(newhist.lookup[i], v) + } + } + return newhist } + +// FullReset resets a histogram to default empty values. +func (h *Histogram) FullReset() { + if h.useLocks { + h.mutex.Lock() + defer h.mutex.Unlock() + } + + h.allocd = defaultHistSize + h.bvs = make([]bin, defaultHistSize) + h.used = 0 + h.lookup = [256][]uint16{} +} + +// CopyAndReset creates and returns an exact copy of a histogram, +// and resets it to default empty values. +func (h *Histogram) CopyAndReset() *Histogram { + newhist := h.Copy() + h.FullReset() + return newhist +} + func (h *Histogram) DecStrings() []string { - h.mutex.Lock() - defer h.mutex.Unlock() + if h.useLocks { + h.mutex.Lock() + defer h.mutex.Unlock() + } out := make([]string, h.used) for i, bin := range h.bvs[0:h.used] { var buffer bytes.Buffer buffer.WriteString("H[") - buffer.WriteString(fmt.Sprintf("%3.1e", bin.Value())) + buffer.WriteString(fmt.Sprintf("%3.1e", bin.value())) buffer.WriteString("]=") buffer.WriteString(fmt.Sprintf("%v", bin.count)) out[i] = buffer.String() } return out } + +// takes the output of DecStrings and deserializes it into a Bin struct slice +func stringsToBin(strs []string) ([]bin, error) { + + bins := make([]bin, len(strs)) + for i, str := range strs { + + // H[0.0e+00]=1 + + // H[0.0e+00]= <1> + countString := strings.Split(str, "=")[1] + countInt, err := strconv.ParseInt(countString, 10, 64) + if err != nil { + return nil, err + } + + // H[ <0.0> e+00]=1 + valString := strings.Split(strings.Split(strings.Split(str, "=")[0], "e")[0], "[")[1] + valInt, err := strconv.ParseFloat(valString, 64) + if err != nil { + return nil, err + } + + // H[0.0e <+00> ]=1 + expString := strings.Split(strings.Split(strings.Split(str, "=")[0], "e")[1], "]")[0] + expInt, err := strconv.ParseInt(expString, 10, 8) + if err != nil { + return nil, err + } + bins[i] = *newBinRaw(int8(valInt*10), int8(expInt), uint64(countInt)) + } + + return bins, nil +} + +// UnmarshalJSON - histogram will come in a base64 encoded serialized form +func (h *Histogram) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + data, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return err + } + h, err = Deserialize(bytes.NewBuffer(data)) + return err +} + +func (h *Histogram) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer([]byte{}) + err := h.SerializeB64(buf) + if err != nil { + return buf.Bytes(), err + } + return json.Marshal(buf.String()) +} diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md new file mode 100644 index 000000000..3255cbb24 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/README.md @@ -0,0 +1,67 @@ +Consul API client +================= + +This package provides the `api` package which attempts to +provide programmatic access to the full Consul API. + +Currently, all of the Consul APIs included in version 0.6.0 are supported. + +Documentation +============= + +The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api) + +Usage +===== + +Below is an example of using the Consul client: + +```go +package main + +import "github.com/hashicorp/consul/api" +import "fmt" + +func main() { + // Get a new client + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + panic(err) + } + + // Get a handle to the KV API + kv := client.KV() + + // PUT a new KV pair + p := &api.KVPair{Key: "REDIS_MAXCLIENTS", Value: []byte("1000")} + _, err = kv.Put(p, nil) + if err != nil { + panic(err) + } + + // Lookup the pair + pair, _, err := kv.Get("REDIS_MAXCLIENTS", nil) + if err != nil { + panic(err) + } + fmt.Printf("KV: %v %s\n", pair.Key, pair.Value) +} +``` + +To run this example, start a Consul server: + +```bash +consul agent -dev +``` + +Copy the code above into a file such as `main.go`. + +Install and run. You'll see a key (`REDIS_MAXCLIENTS`) and value (`1000`) printed. + +```bash +$ go get +$ go run main.go +KV: REDIS_MAXCLIENTS 1000 +``` + +After running the code, you can also view the values in the Consul UI on your local machine at http://localhost:8500/ui/dc1/kv diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go new file mode 100644 index 000000000..124409ff2 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -0,0 +1,1116 @@ +package api + +import ( + "fmt" + "io" + "io/ioutil" + "net/url" + "time" + + "github.com/mitchellh/mapstructure" +) + +const ( + // ACLClientType is the client type token + ACLClientType = "client" + + // ACLManagementType is the management type token + ACLManagementType = "management" +) + +type ACLTokenPolicyLink struct { + ID string + Name string +} +type ACLTokenRoleLink struct { + ID string + Name string +} + +// ACLToken represents an ACL Token +type ACLToken struct { + CreateIndex uint64 + ModifyIndex uint64 + AccessorID string + SecretID string + Description string + Policies []*ACLTokenPolicyLink `json:",omitempty"` + Roles []*ACLTokenRoleLink `json:",omitempty"` + ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` + Local bool + ExpirationTTL time.Duration `json:",omitempty"` + ExpirationTime *time.Time `json:",omitempty"` + CreateTime time.Time `json:",omitempty"` + Hash []byte `json:",omitempty"` + + // DEPRECATED (ACL-Legacy-Compat) + // Rules will only be present for legacy tokens returned via the new APIs + Rules string `json:",omitempty"` +} + +type ACLTokenListEntry struct { + CreateIndex uint64 + ModifyIndex uint64 + AccessorID string + Description string + Policies []*ACLTokenPolicyLink `json:",omitempty"` + Roles []*ACLTokenRoleLink `json:",omitempty"` + ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` + Local bool + ExpirationTime *time.Time `json:",omitempty"` + CreateTime time.Time + Hash []byte + Legacy bool +} + +// ACLEntry is used to represent a legacy ACL token +// The legacy tokens are deprecated. +type ACLEntry struct { + CreateIndex uint64 + ModifyIndex uint64 + ID string + Name string + Type string + Rules string +} + +// ACLReplicationStatus is used to represent the status of ACL replication. +type ACLReplicationStatus struct { + Enabled bool + Running bool + SourceDatacenter string + ReplicationType string + ReplicatedIndex uint64 + ReplicatedRoleIndex uint64 + ReplicatedTokenIndex uint64 + LastSuccess time.Time + LastError time.Time +} + +// ACLServiceIdentity represents a high-level grant of all necessary privileges +// to assume the identity of the named Service in the Catalog and within +// Connect. +type ACLServiceIdentity struct { + ServiceName string + Datacenters []string `json:",omitempty"` +} + +// ACLPolicy represents an ACL Policy. +type ACLPolicy struct { + ID string + Name string + Description string + Rules string + Datacenters []string + Hash []byte + CreateIndex uint64 + ModifyIndex uint64 +} + +type ACLPolicyListEntry struct { + ID string + Name string + Description string + Datacenters []string + Hash []byte + CreateIndex uint64 + ModifyIndex uint64 +} + +type ACLRolePolicyLink struct { + ID string + Name string +} + +// ACLRole represents an ACL Role. +type ACLRole struct { + ID string + Name string + Description string + Policies []*ACLRolePolicyLink `json:",omitempty"` + ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` + Hash []byte + CreateIndex uint64 + ModifyIndex uint64 +} + +// BindingRuleBindType is the type of binding rule mechanism used. +type BindingRuleBindType string + +const ( + // BindingRuleBindTypeService binds to a service identity with the given name. + BindingRuleBindTypeService BindingRuleBindType = "service" + + // BindingRuleBindTypeRole binds to pre-existing roles with the given name. + BindingRuleBindTypeRole BindingRuleBindType = "role" +) + +type ACLBindingRule struct { + ID string + Description string + AuthMethod string + Selector string + BindType BindingRuleBindType + BindName string + + CreateIndex uint64 + ModifyIndex uint64 +} + +type ACLAuthMethod struct { + Name string + Type string + Description string + + // Configuration is arbitrary configuration for the auth method. This + // should only contain primitive values and containers (such as lists and + // maps). + Config map[string]interface{} + + CreateIndex uint64 + ModifyIndex uint64 +} + +type ACLAuthMethodListEntry struct { + Name string + Type string + Description string + CreateIndex uint64 + ModifyIndex uint64 +} + +// ParseKubernetesAuthMethodConfig takes a raw config map and returns a parsed +// KubernetesAuthMethodConfig. +func ParseKubernetesAuthMethodConfig(raw map[string]interface{}) (*KubernetesAuthMethodConfig, error) { + var config KubernetesAuthMethodConfig + decodeConf := &mapstructure.DecoderConfig{ + Result: &config, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decodeConf) + if err != nil { + return nil, err + } + + if err := decoder.Decode(raw); err != nil { + return nil, fmt.Errorf("error decoding config: %s", err) + } + + return &config, nil +} + +// KubernetesAuthMethodConfig is the config for the built-in Consul auth method +// for Kubernetes. +type KubernetesAuthMethodConfig struct { + Host string `json:",omitempty"` + CACert string `json:",omitempty"` + ServiceAccountJWT string `json:",omitempty"` +} + +// RenderToConfig converts this into a map[string]interface{} suitable for use +// in the ACLAuthMethod.Config field. +func (c *KubernetesAuthMethodConfig) RenderToConfig() map[string]interface{} { + return map[string]interface{}{ + "Host": c.Host, + "CACert": c.CACert, + "ServiceAccountJWT": c.ServiceAccountJWT, + } +} + +type ACLLoginParams struct { + AuthMethod string + BearerToken string + Meta map[string]string `json:",omitempty"` +} + +// ACL can be used to query the ACL endpoints +type ACL struct { + c *Client +} + +// ACL returns a handle to the ACL endpoints +func (c *Client) ACL() *ACL { + return &ACL{c} +} + +// Bootstrap is used to perform a one-time ACL bootstrap operation on a cluster +// to get the first management token. +func (a *ACL) Bootstrap() (*ACLToken, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/bootstrap") + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, wm, nil +} + +// Create is used to generate a new token with the given parameters +// +// Deprecated: Use TokenCreate instead. +func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/create") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update is used to update the rules of an existing token +// +// Deprecated: Use TokenUpdate instead. +func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/update") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Destroy is used to destroy a given ACL token ID +// +// Deprecated: Use TokenDelete instead. +func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Clone is used to return a new token cloned from an existing one +// +// Deprecated: Use TokenClone instead. +func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Info is used to query for information about an ACL token +// +// Deprecated: Use TokenRead instead. +func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/info/"+id) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to get all the ACL tokens +// +// Deprecated: Use TokenList instead. +func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/list") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Replication returns the status of the ACL replication process in the datacenter +func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/replication") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries *ACLReplicationStatus + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// TokenCreate creates a new ACL token. If either the AccessorID or SecretID fields +// of the ACLToken structure are empty they will be filled in by Consul. +func (a *ACL) TokenCreate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/token") + r.setWriteOptions(q) + r.obj = token + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// TokenUpdate updates a token in place without modifying its AccessorID or SecretID. A valid +// AccessorID must be set in the ACLToken structure passed to this function but the SecretID may +// be omitted and will be filled in by Consul with its existing value. +func (a *ACL) TokenUpdate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + if token.AccessorID == "" { + return nil, nil, fmt.Errorf("Must specify an AccessorID for Token Updating") + } + r := a.c.newRequest("PUT", "/v1/acl/token/"+token.AccessorID) + r.setWriteOptions(q) + r.obj = token + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// TokenClone will create a new token with the same policies and locality as the original +// token but will have its own auto-generated AccessorID and SecretID as well having the +// description passed to this function. The tokenID parameter must be a valid Accessor ID +// of an existing token. +func (a *ACL) TokenClone(tokenID string, description string, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + if tokenID == "" { + return nil, nil, fmt.Errorf("Must specify a tokenID for Token Cloning") + } + + r := a.c.newRequest("PUT", "/v1/acl/token/"+tokenID+"/clone") + r.setWriteOptions(q) + r.obj = struct{ Description string }{description} + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// TokenDelete removes a single ACL token. The tokenID parameter must be a valid +// Accessor ID of an existing token. +func (a *ACL) TokenDelete(tokenID string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("DELETE", "/v1/acl/token/"+tokenID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// TokenRead retrieves the full token details. The tokenID parameter must be a valid +// Accessor ID of an existing token. +func (a *ACL) TokenRead(tokenID string, q *QueryOptions) (*ACLToken, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/token/"+tokenID) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// TokenReadSelf retrieves the full token details of the token currently +// assigned to the API Client. In this manner its possible to read a token +// by its Secret ID. +func (a *ACL) TokenReadSelf(q *QueryOptions) (*ACLToken, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/token/self") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// TokenList lists all tokens. The listing does not contain any SecretIDs as those +// may only be retrieved by a call to TokenRead. +func (a *ACL) TokenList(q *QueryOptions) ([]*ACLTokenListEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/tokens") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLTokenListEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// PolicyCreate will create a new policy. It is not allowed for the policy parameters +// ID field to be set as this will be generated by Consul while processing the request. +func (a *ACL) PolicyCreate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) { + if policy.ID != "" { + return nil, nil, fmt.Errorf("Cannot specify an ID in Policy Creation") + } + r := a.c.newRequest("PUT", "/v1/acl/policy") + r.setWriteOptions(q) + r.obj = policy + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLPolicy + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// PolicyUpdate updates a policy. The ID field of the policy parameter must be set to an +// existing policy ID +func (a *ACL) PolicyUpdate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) { + if policy.ID == "" { + return nil, nil, fmt.Errorf("Must specify an ID in Policy Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/policy/"+policy.ID) + r.setWriteOptions(q) + r.obj = policy + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLPolicy + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// PolicyDelete deletes a policy given its ID. +func (a *ACL) PolicyDelete(policyID string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("DELETE", "/v1/acl/policy/"+policyID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// PolicyRead retrieves the policy details including the rule set. +func (a *ACL) PolicyRead(policyID string, q *QueryOptions) (*ACLPolicy, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/policy/"+policyID) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out ACLPolicy + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// PolicyList retrieves a listing of all policies. The listing does not include the +// rules for any policy as those should be retrieved by subsequent calls to PolicyRead. +func (a *ACL) PolicyList(q *QueryOptions) ([]*ACLPolicyListEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/policies") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLPolicyListEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// RulesTranslate translates the legacy rule syntax into the current syntax. +// +// Deprecated: Support for the legacy syntax translation will be removed +// when legacy ACL support is removed. +func (a *ACL) RulesTranslate(rules io.Reader) (string, error) { + r := a.c.newRequest("POST", "/v1/acl/rules/translate") + r.body = rules + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", err + } + defer resp.Body.Close() + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + ruleBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("Failed to read translated rule body: %v", err) + } + + return string(ruleBytes), nil +} + +// RulesTranslateToken translates the rules associated with the legacy syntax +// into the current syntax and returns the results. +// +// Deprecated: Support for the legacy syntax translation will be removed +// when legacy ACL support is removed. +func (a *ACL) RulesTranslateToken(tokenID string) (string, error) { + r := a.c.newRequest("GET", "/v1/acl/rules/translate/"+tokenID) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", err + } + defer resp.Body.Close() + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + ruleBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("Failed to read translated rule body: %v", err) + } + + return string(ruleBytes), nil +} + +// RoleCreate will create a new role. It is not allowed for the role parameters +// ID field to be set as this will be generated by Consul while processing the request. +func (a *ACL) RoleCreate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) { + if role.ID != "" { + return nil, nil, fmt.Errorf("Cannot specify an ID in Role Creation") + } + + r := a.c.newRequest("PUT", "/v1/acl/role") + r.setWriteOptions(q) + r.obj = role + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// RoleUpdate updates a role. The ID field of the role parameter must be set to an +// existing role ID +func (a *ACL) RoleUpdate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) { + if role.ID == "" { + return nil, nil, fmt.Errorf("Must specify an ID in Role Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/role/"+role.ID) + r.setWriteOptions(q) + r.obj = role + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// RoleDelete deletes a role given its ID. +func (a *ACL) RoleDelete(roleID string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("DELETE", "/v1/acl/role/"+roleID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// RoleRead retrieves the role details (by ID). Returns nil if not found. +func (a *ACL) RoleRead(roleID string, q *QueryOptions) (*ACLRole, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/role/"+roleID) + r.setQueryOptions(q) + found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// RoleReadByName retrieves the role details (by name). Returns nil if not found. +func (a *ACL) RoleReadByName(roleName string, q *QueryOptions) (*ACLRole, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/role/name/"+url.QueryEscape(roleName)) + r.setQueryOptions(q) + found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// RoleList retrieves a listing of all roles. The listing does not include some +// metadata for the role as those should be retrieved by subsequent calls to +// RoleRead. +func (a *ACL) RoleList(q *QueryOptions) ([]*ACLRole, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/roles") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLRole + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// AuthMethodCreate will create a new auth method. +func (a *ACL) AuthMethodCreate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) { + if method.Name == "" { + return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Creation") + } + + r := a.c.newRequest("PUT", "/v1/acl/auth-method") + r.setWriteOptions(q) + r.obj = method + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLAuthMethod + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// AuthMethodUpdate updates an auth method. +func (a *ACL) AuthMethodUpdate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) { + if method.Name == "" { + return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/auth-method/"+url.QueryEscape(method.Name)) + r.setWriteOptions(q) + r.obj = method + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLAuthMethod + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// AuthMethodDelete deletes an auth method given its Name. +func (a *ACL) AuthMethodDelete(methodName string, q *WriteOptions) (*WriteMeta, error) { + if methodName == "" { + return nil, fmt.Errorf("Must specify a Name in Auth Method Delete") + } + + r := a.c.newRequest("DELETE", "/v1/acl/auth-method/"+url.QueryEscape(methodName)) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// AuthMethodRead retrieves the auth method. Returns nil if not found. +func (a *ACL) AuthMethodRead(methodName string, q *QueryOptions) (*ACLAuthMethod, *QueryMeta, error) { + if methodName == "" { + return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Read") + } + + r := a.c.newRequest("GET", "/v1/acl/auth-method/"+url.QueryEscape(methodName)) + r.setQueryOptions(q) + found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLAuthMethod + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// AuthMethodList retrieves a listing of all auth methods. The listing does not +// include some metadata for the auth method as those should be retrieved by +// subsequent calls to AuthMethodRead. +func (a *ACL) AuthMethodList(q *QueryOptions) ([]*ACLAuthMethodListEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/auth-methods") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLAuthMethodListEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// BindingRuleCreate will create a new binding rule. It is not allowed for the +// binding rule parameter's ID field to be set as this will be generated by +// Consul while processing the request. +func (a *ACL) BindingRuleCreate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) { + if rule.ID != "" { + return nil, nil, fmt.Errorf("Cannot specify an ID in Binding Rule Creation") + } + + r := a.c.newRequest("PUT", "/v1/acl/binding-rule") + r.setWriteOptions(q) + r.obj = rule + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLBindingRule + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// BindingRuleUpdate updates a binding rule. The ID field of the role binding +// rule parameter must be set to an existing binding rule ID. +func (a *ACL) BindingRuleUpdate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) { + if rule.ID == "" { + return nil, nil, fmt.Errorf("Must specify an ID in Binding Rule Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/binding-rule/"+rule.ID) + r.setWriteOptions(q) + r.obj = rule + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLBindingRule + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// BindingRuleDelete deletes a binding rule given its ID. +func (a *ACL) BindingRuleDelete(bindingRuleID string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("DELETE", "/v1/acl/binding-rule/"+bindingRuleID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// BindingRuleRead retrieves the binding rule details. Returns nil if not found. +func (a *ACL) BindingRuleRead(bindingRuleID string, q *QueryOptions) (*ACLBindingRule, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/binding-rule/"+bindingRuleID) + r.setQueryOptions(q) + found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLBindingRule + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// BindingRuleList retrieves a listing of all binding rules. +func (a *ACL) BindingRuleList(methodName string, q *QueryOptions) ([]*ACLBindingRule, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/binding-rules") + if methodName != "" { + r.params.Set("authmethod", methodName) + } + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLBindingRule + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Login is used to exchange auth method credentials for a newly-minted Consul Token. +func (a *ACL) Login(auth *ACLLoginParams, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + r := a.c.newRequest("POST", "/v1/acl/login") + r.setWriteOptions(q) + r.obj = auth + + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, wm, nil +} + +// Logout is used to destroy a Consul Token created via Login(). +func (a *ACL) Logout(q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("POST", "/v1/acl/logout") + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go new file mode 100644 index 000000000..04043ba84 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -0,0 +1,1035 @@ +package api + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "net/url" +) + +// ServiceKind is the kind of service being registered. +type ServiceKind string + +const ( + // ServiceKindTypical is a typical, classic Consul service. This is + // represented by the absence of a value. This was chosen for ease of + // backwards compatibility: existing services in the catalog would + // default to the typical service. + ServiceKindTypical ServiceKind = "" + + // ServiceKindConnectProxy is a proxy for the Connect feature. This + // service proxies another service within Consul and speaks the connect + // protocol. + ServiceKindConnectProxy ServiceKind = "connect-proxy" +) + +// ProxyExecMode is the execution mode for a managed Connect proxy. +type ProxyExecMode string + +const ( + // ProxyExecModeDaemon indicates that the proxy command should be long-running + // and should be started and supervised by the agent until it's target service + // is deregistered. + ProxyExecModeDaemon ProxyExecMode = "daemon" + + // ProxyExecModeScript indicates that the proxy command should be invoke to + // completion on each change to the configuration of lifecycle event. The + // script typically fetches the config and certificates from the agent API and + // then configures an externally managed daemon, perhaps starting and stopping + // it if necessary. + ProxyExecModeScript ProxyExecMode = "script" +) + +// UpstreamDestType is the type of upstream discovery mechanism. +type UpstreamDestType string + +const ( + // UpstreamDestTypeService discovers instances via healthy service lookup. + UpstreamDestTypeService UpstreamDestType = "service" + + // UpstreamDestTypePreparedQuery discovers instances via prepared query + // execution. + UpstreamDestTypePreparedQuery UpstreamDestType = "prepared_query" +) + +// AgentCheck represents a check known to the agent +type AgentCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string + Definition HealthCheckDefinition +} + +// AgentWeights represent optional weights for a service +type AgentWeights struct { + Passing int + Warning int +} + +// AgentService represents a service known to the agent +type AgentService struct { + Kind ServiceKind `json:",omitempty"` + ID string + Service string + Tags []string + Meta map[string]string + Port int + Address string + Weights AgentWeights + EnableTagOverride bool + CreateIndex uint64 `json:",omitempty" bexpr:"-"` + ModifyIndex uint64 `json:",omitempty" bexpr:"-"` + ContentHash string `json:",omitempty" bexpr:"-"` + // DEPRECATED (ProxyDestination) - remove this field + ProxyDestination string `json:",omitempty" bexpr:"-"` + Proxy *AgentServiceConnectProxyConfig `json:",omitempty"` + Connect *AgentServiceConnect `json:",omitempty"` +} + +// AgentServiceChecksInfo returns information about a Service and its checks +type AgentServiceChecksInfo struct { + AggregatedStatus string + Service *AgentService + Checks HealthChecks +} + +// AgentServiceConnect represents the Connect configuration of a service. +type AgentServiceConnect struct { + Native bool `json:",omitempty"` + Proxy *AgentServiceConnectProxy `json:",omitempty" bexpr:"-"` + SidecarService *AgentServiceRegistration `json:",omitempty" bexpr:"-"` +} + +// AgentServiceConnectProxy represents the Connect Proxy configuration of a +// service. +type AgentServiceConnectProxy struct { + ExecMode ProxyExecMode `json:",omitempty"` + Command []string `json:",omitempty"` + Config map[string]interface{} `json:",omitempty" bexpr:"-"` + Upstreams []Upstream `json:",omitempty"` +} + +// AgentServiceConnectProxyConfig is the proxy configuration in a connect-proxy +// ServiceDefinition or response. +type AgentServiceConnectProxyConfig struct { + DestinationServiceName string + DestinationServiceID string `json:",omitempty"` + LocalServiceAddress string `json:",omitempty"` + LocalServicePort int `json:",omitempty"` + Config map[string]interface{} `json:",omitempty" bexpr:"-"` + Upstreams []Upstream +} + +// AgentMember represents a cluster member known to the agent +type AgentMember struct { + Name string + Addr string + Port uint16 + Tags map[string]string + Status int + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// AllSegments is used to select for all segments in MembersOpts. +const AllSegments = "_all" + +// MembersOpts is used for querying member information. +type MembersOpts struct { + // WAN is whether to show members from the WAN. + WAN bool + + // Segment is the LAN segment to show members for. Setting this to the + // AllSegments value above will show members in all segments. + Segment string +} + +// AgentServiceRegistration is used to register a new service +type AgentServiceRegistration struct { + Kind ServiceKind `json:",omitempty"` + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Tags []string `json:",omitempty"` + Port int `json:",omitempty"` + Address string `json:",omitempty"` + EnableTagOverride bool `json:",omitempty"` + Meta map[string]string `json:",omitempty"` + Weights *AgentWeights `json:",omitempty"` + Check *AgentServiceCheck + Checks AgentServiceChecks + // DEPRECATED (ProxyDestination) - remove this field + ProxyDestination string `json:",omitempty"` + Proxy *AgentServiceConnectProxyConfig `json:",omitempty"` + Connect *AgentServiceConnect `json:",omitempty"` +} + +// AgentCheckRegistration is used to register a new check +type AgentCheckRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Notes string `json:",omitempty"` + ServiceID string `json:",omitempty"` + AgentServiceCheck +} + +// AgentServiceCheck is used to define a node or service level check +type AgentServiceCheck struct { + CheckID string `json:",omitempty"` + Name string `json:",omitempty"` + Args []string `json:"ScriptArgs,omitempty"` + DockerContainerID string `json:",omitempty"` + Shell string `json:",omitempty"` // Only supported for Docker. + Interval string `json:",omitempty"` + Timeout string `json:",omitempty"` + TTL string `json:",omitempty"` + HTTP string `json:",omitempty"` + Header map[string][]string `json:",omitempty"` + Method string `json:",omitempty"` + TCP string `json:",omitempty"` + Status string `json:",omitempty"` + Notes string `json:",omitempty"` + TLSSkipVerify bool `json:",omitempty"` + GRPC string `json:",omitempty"` + GRPCUseTLS bool `json:",omitempty"` + AliasNode string `json:",omitempty"` + AliasService string `json:",omitempty"` + + // In Consul 0.7 and later, checks that are associated with a service + // may also contain this optional DeregisterCriticalServiceAfter field, + // which is a timeout in the same Go time format as Interval and TTL. If + // a check is in the critical state for more than this configured value, + // then its associated service (and all of its associated checks) will + // automatically be deregistered. + DeregisterCriticalServiceAfter string `json:",omitempty"` +} +type AgentServiceChecks []*AgentServiceCheck + +// AgentToken is used when updating ACL tokens for an agent. +type AgentToken struct { + Token string +} + +// Metrics info is used to store different types of metric values from the agent. +type MetricsInfo struct { + Timestamp string + Gauges []GaugeValue + Points []PointValue + Counters []SampledValue + Samples []SampledValue +} + +// GaugeValue stores one value that is updated as time goes on, such as +// the amount of memory allocated. +type GaugeValue struct { + Name string + Value float32 + Labels map[string]string +} + +// PointValue holds a series of points for a metric. +type PointValue struct { + Name string + Points []float32 +} + +// SampledValue stores info about a metric that is incremented over time, +// such as the number of requests to an HTTP endpoint. +type SampledValue struct { + Name string + Count int + Sum float64 + Min float64 + Max float64 + Mean float64 + Stddev float64 + Labels map[string]string +} + +// AgentAuthorizeParams are the request parameters for authorizing a request. +type AgentAuthorizeParams struct { + Target string + ClientCertURI string + ClientCertSerial string +} + +// AgentAuthorize is the response structure for Connect authorization. +type AgentAuthorize struct { + Authorized bool + Reason string +} + +// ConnectProxyConfig is the response structure for agent-local proxy +// configuration. +type ConnectProxyConfig struct { + ProxyServiceID string + TargetServiceID string + TargetServiceName string + ContentHash string + // DEPRECATED(managed-proxies) - this struct is re-used for sidecar configs + // but they don't need ExecMode or Command + ExecMode ProxyExecMode `json:",omitempty"` + Command []string `json:",omitempty"` + Config map[string]interface{} `bexpr:"-"` + Upstreams []Upstream +} + +// Upstream is the response structure for a proxy upstream configuration. +type Upstream struct { + DestinationType UpstreamDestType `json:",omitempty"` + DestinationNamespace string `json:",omitempty"` + DestinationName string + Datacenter string `json:",omitempty"` + LocalBindAddress string `json:",omitempty"` + LocalBindPort int `json:",omitempty"` + Config map[string]interface{} `json:",omitempty" bexpr:"-"` +} + +// Agent can be used to query the Agent endpoints +type Agent struct { + c *Client + + // cache the node name + nodeName string +} + +// Agent returns a handle to the agent endpoints +func (c *Client) Agent() *Agent { + return &Agent{c: c} +} + +// Self is used to query the agent we are speaking to for +// information about itself +func (a *Agent) Self() (map[string]map[string]interface{}, error) { + r := a.c.newRequest("GET", "/v1/agent/self") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]map[string]interface{} + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Host is used to retrieve information about the host the +// agent is running on such as CPU, memory, and disk. Requires +// a operator:read ACL token. +func (a *Agent) Host() (map[string]interface{}, error) { + r := a.c.newRequest("GET", "/v1/agent/host") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]interface{} + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Metrics is used to query the agent we are speaking to for +// its current internal metric data +func (a *Agent) Metrics() (*MetricsInfo, error) { + r := a.c.newRequest("GET", "/v1/agent/metrics") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out *MetricsInfo + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Reload triggers a configuration reload for the agent we are connected to. +func (a *Agent) Reload() error { + r := a.c.newRequest("PUT", "/v1/agent/reload") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// NodeName is used to get the node name of the agent +func (a *Agent) NodeName() (string, error) { + if a.nodeName != "" { + return a.nodeName, nil + } + info, err := a.Self() + if err != nil { + return "", err + } + name := info["Config"]["NodeName"].(string) + a.nodeName = name + return name, nil +} + +// Checks returns the locally registered checks +func (a *Agent) Checks() (map[string]*AgentCheck, error) { + return a.ChecksWithFilter("") +} + +// ChecksWithFilter returns a subset of the locally registered checks that match +// the given filter expression +func (a *Agent) ChecksWithFilter(filter string) (map[string]*AgentCheck, error) { + r := a.c.newRequest("GET", "/v1/agent/checks") + r.filterQuery(filter) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentCheck + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Services returns the locally registered services +func (a *Agent) Services() (map[string]*AgentService, error) { + return a.ServicesWithFilter("") +} + +// ServicesWithFilter returns a subset of the locally registered services that match +// the given filter expression +func (a *Agent) ServicesWithFilter(filter string) (map[string]*AgentService, error) { + r := a.c.newRequest("GET", "/v1/agent/services") + r.filterQuery(filter) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentService + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + + return out, nil +} + +// AgentHealthServiceByID returns for a given serviceID: the aggregated health status, the service definition or an error if any +// - If the service is not found, will return status (critical, nil, nil) +// - If the service is found, will return (critical|passing|warning), AgentServiceChecksInfo, nil) +// - In all other cases, will return an error +func (a *Agent) AgentHealthServiceByID(serviceID string) (string, *AgentServiceChecksInfo, error) { + path := fmt.Sprintf("/v1/agent/health/service/id/%v", url.PathEscape(serviceID)) + r := a.c.newRequest("GET", path) + r.params.Add("format", "json") + r.header.Set("Accept", "application/json") + _, resp, err := a.c.doRequest(r) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + // Service not Found + if resp.StatusCode == http.StatusNotFound { + return HealthCritical, nil, nil + } + var out *AgentServiceChecksInfo + if err := decodeBody(resp, &out); err != nil { + return HealthCritical, out, err + } + switch resp.StatusCode { + case http.StatusOK: + return HealthPassing, out, nil + case http.StatusTooManyRequests: + return HealthWarning, out, nil + case http.StatusServiceUnavailable: + return HealthCritical, out, nil + } + return HealthCritical, out, fmt.Errorf("Unexpected Error Code %v for %s", resp.StatusCode, path) +} + +// AgentHealthServiceByName returns for a given service name: the aggregated health status for all services +// having the specified name. +// - If no service is not found, will return status (critical, [], nil) +// - If the service is found, will return (critical|passing|warning), []api.AgentServiceChecksInfo, nil) +// - In all other cases, will return an error +func (a *Agent) AgentHealthServiceByName(service string) (string, []AgentServiceChecksInfo, error) { + path := fmt.Sprintf("/v1/agent/health/service/name/%v", url.PathEscape(service)) + r := a.c.newRequest("GET", path) + r.params.Add("format", "json") + r.header.Set("Accept", "application/json") + _, resp, err := a.c.doRequest(r) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + // Service not Found + if resp.StatusCode == http.StatusNotFound { + return HealthCritical, nil, nil + } + var out []AgentServiceChecksInfo + if err := decodeBody(resp, &out); err != nil { + return HealthCritical, out, err + } + switch resp.StatusCode { + case http.StatusOK: + return HealthPassing, out, nil + case http.StatusTooManyRequests: + return HealthWarning, out, nil + case http.StatusServiceUnavailable: + return HealthCritical, out, nil + } + return HealthCritical, out, fmt.Errorf("Unexpected Error Code %v for %s", resp.StatusCode, path) +} + +// Service returns a locally registered service instance and allows for +// hash-based blocking. +// +// Note that this uses an unconventional blocking mechanism since it's +// agent-local state. That means there is no persistent raft index so we block +// based on object hash instead. +func (a *Agent) Service(serviceID string, q *QueryOptions) (*AgentService, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/agent/service/"+serviceID) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *AgentService + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return out, qm, nil +} + +// Members returns the known gossip members. The WAN +// flag can be used to query a server for WAN members. +func (a *Agent) Members(wan bool) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// MembersOpts returns the known gossip members and can be passed +// additional options for WAN/segment filtering. +func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + r.params.Set("segment", opts.Segment) + if opts.WAN { + r.params.Set("wan", "1") + } + + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// ServiceRegister is used to register a new service with +// the local agent +func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/service/register") + r.obj = service + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ServiceDeregister is used to deregister a service with +// the local agent +func (a *Agent) ServiceDeregister(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// PassTTL is used to set a TTL check to the passing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) PassTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "pass") +} + +// WarnTTL is used to set a TTL check to the warning state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) WarnTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "warn") +} + +// FailTTL is used to set a TTL check to the failing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) FailTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "fail") +} + +// updateTTL is used to update the TTL of a check. This is the internal +// method that uses the old API that's present in Consul versions prior to +// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed +// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below, +// but keep the old Pass/Warn/Fail methods using the old API under the hood. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 and the server endpoints will +// be removed in 0.9. +func (a *Agent) updateTTL(checkID, note, status string) error { + switch status { + case "pass": + case "warn": + case "fail": + default: + return fmt.Errorf("Invalid status: %s", status) + } + endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) + r := a.c.newRequest("PUT", endpoint) + r.params.Set("note", note) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// checkUpdate is the payload for a PUT for a check update. +type checkUpdate struct { + // Status is one of the api.Health* states: HealthPassing + // ("passing"), HealthWarning ("warning"), or HealthCritical + // ("critical"). + Status string + + // Output is the information to post to the UI for operators as the + // output of the process that decided to hit the TTL check. This is + // different from the note field that's associated with the check + // itself. + Output string +} + +// UpdateTTL is used to update the TTL of a check. This uses the newer API +// that was introduced in Consul 0.6.4 and later. We translate the old status +// strings for compatibility (though a newer version of Consul will still be +// required to use this API). +func (a *Agent) UpdateTTL(checkID, output, status string) error { + switch status { + case "pass", HealthPassing: + status = HealthPassing + case "warn", HealthWarning: + status = HealthWarning + case "fail", HealthCritical: + status = HealthCritical + default: + return fmt.Errorf("Invalid status: %s", status) + } + + endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID) + r := a.c.newRequest("PUT", endpoint) + r.obj = &checkUpdate{ + Status: status, + Output: output, + } + + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckRegister is used to register a new check with +// the local agent +func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/check/register") + r.obj = check + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckDeregister is used to deregister a check with +// the local agent +func (a *Agent) CheckDeregister(checkID string) error { + r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Join is used to instruct the agent to attempt a join to +// another cluster member +func (a *Agent) Join(addr string, wan bool) error { + r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Leave is used to have the agent gracefully leave the cluster and shutdown +func (a *Agent) Leave() error { + r := a.c.newRequest("PUT", "/v1/agent/leave") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ForceLeave is used to have the agent eject a failed node +func (a *Agent) ForceLeave(node string) error { + r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ConnectAuthorize is used to authorize an incoming connection +// to a natively integrated Connect service. +func (a *Agent) ConnectAuthorize(auth *AgentAuthorizeParams) (*AgentAuthorize, error) { + r := a.c.newRequest("POST", "/v1/agent/connect/authorize") + r.obj = auth + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out AgentAuthorize + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} + +// ConnectCARoots returns the list of roots. +func (a *Agent) ConnectCARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/agent/connect/ca/roots") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out CARootList + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// ConnectCALeaf gets the leaf certificate for the given service ID. +func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*LeafCert, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/agent/connect/ca/leaf/"+serviceID) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out LeafCert + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// ConnectProxyConfig gets the configuration for a local managed proxy instance. +// +// Note that this uses an unconventional blocking mechanism since it's +// agent-local state. That means there is no persistent raft index so we block +// based on object hash instead. +func (a *Agent) ConnectProxyConfig(proxyServiceID string, q *QueryOptions) (*ConnectProxyConfig, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/agent/connect/proxy/"+proxyServiceID) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out ConnectProxyConfig + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// EnableServiceMaintenance toggles service maintenance mode on +// for the given service ID. +func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableServiceMaintenance toggles service maintenance mode off +// for the given service ID. +func (a *Agent) DisableServiceMaintenance(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// EnableNodeMaintenance toggles node maintenance mode on for the +// agent we are connected to. +func (a *Agent) EnableNodeMaintenance(reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableNodeMaintenance toggles node maintenance mode off for the +// agent we are connected to. +func (a *Agent) DisableNodeMaintenance() error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Monitor returns a channel which will receive streaming logs from the agent +// Providing a non-nil stopCh can be used to close the connection and stop the +// log stream. An empty string will be sent down the given channel when there's +// nothing left to stream, after which the caller should close the stopCh. +func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { + r := a.c.newRequest("GET", "/v1/agent/monitor") + r.setQueryOptions(q) + if loglevel != "" { + r.params.Add("loglevel", loglevel) + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + + logCh := make(chan string, 64) + go func() { + defer resp.Body.Close() + + scanner := bufio.NewScanner(resp.Body) + for { + select { + case <-stopCh: + close(logCh) + return + default: + } + if scanner.Scan() { + // An empty string signals to the caller that + // the scan is done, so make sure we only emit + // that when the scanner says it's done, not if + // we happen to ingest an empty line. + if text := scanner.Text(); text != "" { + logCh <- text + } else { + logCh <- " " + } + } else { + logCh <- "" + } + } + }() + + return logCh, nil +} + +// UpdateACLToken updates the agent's "acl_token". See updateToken for more +// details. +// +// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateDefaultACLToken for v1.4.3 and above +func (a *Agent) UpdateACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("acl_token", token, q) +} + +// UpdateACLAgentToken updates the agent's "acl_agent_token". See updateToken +// for more details. +// +// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateAgentACLToken for v1.4.3 and above +func (a *Agent) UpdateACLAgentToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("acl_agent_token", token, q) +} + +// UpdateACLAgentMasterToken updates the agent's "acl_agent_master_token". See +// updateToken for more details. +// +// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateAgentMasterACLToken for v1.4.3 and above +func (a *Agent) UpdateACLAgentMasterToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("acl_agent_master_token", token, q) +} + +// UpdateACLReplicationToken updates the agent's "acl_replication_token". See +// updateToken for more details. +// +// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateReplicationACLToken for v1.4.3 and above +func (a *Agent) UpdateACLReplicationToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("acl_replication_token", token, q) +} + +// UpdateDefaultACLToken updates the agent's "default" token. See updateToken +// for more details +func (a *Agent) UpdateDefaultACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateTokenFallback("default", "acl_token", token, q) +} + +// UpdateAgentACLToken updates the agent's "agent" token. See updateToken +// for more details +func (a *Agent) UpdateAgentACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateTokenFallback("agent", "acl_agent_token", token, q) +} + +// UpdateAgentMasterACLToken updates the agent's "agent_master" token. See updateToken +// for more details +func (a *Agent) UpdateAgentMasterACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateTokenFallback("agent_master", "acl_agent_master_token", token, q) +} + +// UpdateReplicationACLToken updates the agent's "replication" token. See updateToken +// for more details +func (a *Agent) UpdateReplicationACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateTokenFallback("replication", "acl_replication_token", token, q) +} + +// updateToken can be used to update one of an agent's ACL tokens after the agent has +// started. The tokens are may not be persisted, so will need to be updated again if +// the agent is restarted unless the agent is configured to persist them. +func (a *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, error) { + meta, _, err := a.updateTokenOnce(target, token, q) + return meta, err +} + +func (a *Agent) updateTokenFallback(target, fallback, token string, q *WriteOptions) (*WriteMeta, error) { + meta, status, err := a.updateTokenOnce(target, token, q) + if err != nil && status == 404 { + meta, _, err = a.updateTokenOnce(fallback, token, q) + } + return meta, err +} + +func (a *Agent) updateTokenOnce(target, token string, q *WriteOptions) (*WriteMeta, int, error) { + r := a.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target)) + r.setWriteOptions(q) + r.obj = &AgentToken{Token: token} + + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, 0, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + + if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + return wm, resp.StatusCode, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) + } + + return wm, resp.StatusCode, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go new file mode 100644 index 000000000..4b17ff6cd --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -0,0 +1,966 @@ +package api + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-rootcerts" +) + +const ( + // HTTPAddrEnvName defines an environment variable name which sets + // the HTTP address if there is no -http-addr specified. + HTTPAddrEnvName = "CONSUL_HTTP_ADDR" + + // HTTPTokenEnvName defines an environment variable name which sets + // the HTTP token. + HTTPTokenEnvName = "CONSUL_HTTP_TOKEN" + + // HTTPTokenFileEnvName defines an environment variable name which sets + // the HTTP token file. + HTTPTokenFileEnvName = "CONSUL_HTTP_TOKEN_FILE" + + // HTTPAuthEnvName defines an environment variable name which sets + // the HTTP authentication header. + HTTPAuthEnvName = "CONSUL_HTTP_AUTH" + + // HTTPSSLEnvName defines an environment variable name which sets + // whether or not to use HTTPS. + HTTPSSLEnvName = "CONSUL_HTTP_SSL" + + // HTTPCAFile defines an environment variable name which sets the + // CA file to use for talking to Consul over TLS. + HTTPCAFile = "CONSUL_CACERT" + + // HTTPCAPath defines an environment variable name which sets the + // path to a directory of CA certs to use for talking to Consul over TLS. + HTTPCAPath = "CONSUL_CAPATH" + + // HTTPClientCert defines an environment variable name which sets the + // client cert file to use for talking to Consul over TLS. + HTTPClientCert = "CONSUL_CLIENT_CERT" + + // HTTPClientKey defines an environment variable name which sets the + // client key file to use for talking to Consul over TLS. + HTTPClientKey = "CONSUL_CLIENT_KEY" + + // HTTPTLSServerName defines an environment variable name which sets the + // server name to use as the SNI host when connecting via TLS + HTTPTLSServerName = "CONSUL_TLS_SERVER_NAME" + + // HTTPSSLVerifyEnvName defines an environment variable name which sets + // whether or not to disable certificate checking. + HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY" + + // GRPCAddrEnvName defines an environment variable name which sets the gRPC + // address for consul connect envoy. Note this isn't actually used by the api + // client in this package but is defined here for consistency with all the + // other ENV names we use. + GRPCAddrEnvName = "CONSUL_GRPC_ADDR" +) + +// QueryOptions are used to parameterize a query +type QueryOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // AllowStale allows any Consul server (non-leader) to service + // a read. This allows for lower latency and higher throughput + AllowStale bool + + // RequireConsistent forces the read to be fully consistent. + // This is more expensive but prevents ever performing a stale + // read. + RequireConsistent bool + + // UseCache requests that the agent cache results locally. See + // https://www.consul.io/api/index.html#agent-caching for more details on the + // semantics. + UseCache bool + + // MaxAge limits how old a cached value will be returned if UseCache is true. + // If there is a cached response that is older than the MaxAge, it is treated + // as a cache miss and a new fetch invoked. If the fetch fails, the error is + // returned. Clients that wish to allow for stale results on error can set + // StaleIfError to a longer duration to change this behavior. It is ignored + // if the endpoint supports background refresh caching. See + // https://www.consul.io/api/index.html#agent-caching for more details. + MaxAge time.Duration + + // StaleIfError specifies how stale the client will accept a cached response + // if the servers are unavailable to fetch a fresh one. Only makes sense when + // UseCache is true and MaxAge is set to a lower, non-zero value. It is + // ignored if the endpoint supports background refresh caching. See + // https://www.consul.io/api/index.html#agent-caching for more details. + StaleIfError time.Duration + + // WaitIndex is used to enable a blocking query. Waits + // until the timeout or the next index is reached + WaitIndex uint64 + + // WaitHash is used by some endpoints instead of WaitIndex to perform blocking + // on state based on a hash of the response rather than a monotonic index. + // This is required when the state being blocked on is not stored in Raft, for + // example agent-local proxy configuration. + WaitHash string + + // WaitTime is used to bound the duration of a wait. + // Defaults to that of the Config, but can be overridden. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // Near is used to provide a node name that will sort the results + // in ascending order based on the estimated round trip time from + // that node. Setting this to "_agent" will use the agent's node + // for the sort. + Near string + + // NodeMeta is used to filter results by nodes with the given + // metadata key/value pairs. Currently, only one key/value pair can + // be provided for filtering. + NodeMeta map[string]string + + // RelayFactor is used in keyring operations to cause responses to be + // relayed back to the sender through N other random nodes. Must be + // a value from 0 to 5 (inclusive). + RelayFactor uint8 + + // Connect filters prepared query execution to only include Connect-capable + // services. This currently affects prepared query execution. + Connect bool + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use Context() and WithContext() to manage this. + ctx context.Context + + // Filter requests filtering data prior to it being returned. The string + // is a go-bexpr compatible expression. + Filter string +} + +func (o *QueryOptions) Context() context.Context { + if o != nil && o.ctx != nil { + return o.ctx + } + return context.Background() +} + +func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions { + o2 := new(QueryOptions) + if o != nil { + *o2 = *o + } + o2.ctx = ctx + return o2 +} + +// WriteOptions are used to parameterize a write +type WriteOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // RelayFactor is used in keyring operations to cause responses to be + // relayed back to the sender through N other random nodes. Must be + // a value from 0 to 5 (inclusive). + RelayFactor uint8 + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use Context() and WithContext() to manage this. + ctx context.Context +} + +func (o *WriteOptions) Context() context.Context { + if o != nil && o.ctx != nil { + return o.ctx + } + return context.Background() +} + +func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions { + o2 := new(WriteOptions) + if o != nil { + *o2 = *o + } + o2.ctx = ctx + return o2 +} + +// QueryMeta is used to return meta data about a query +type QueryMeta struct { + // LastIndex. This can be used as a WaitIndex to perform + // a blocking query + LastIndex uint64 + + // LastContentHash. This can be used as a WaitHash to perform a blocking query + // for endpoints that support hash-based blocking. Endpoints that do not + // support it will return an empty hash. + LastContentHash string + + // Time of last contact from the leader for the + // server servicing the request + LastContact time.Duration + + // Is there a known leader + KnownLeader bool + + // How long did the request take + RequestTime time.Duration + + // Is address translation enabled for HTTP responses on this agent + AddressTranslationEnabled bool + + // CacheHit is true if the result was served from agent-local cache. + CacheHit bool + + // CacheAge is set if request was ?cached and indicates how stale the cached + // response is. + CacheAge time.Duration +} + +// WriteMeta is used to return meta data about a write +type WriteMeta struct { + // How long did the request take + RequestTime time.Duration +} + +// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication +type HttpBasicAuth struct { + // Username to use for HTTP Basic Authentication + Username string + + // Password to use for HTTP Basic Authentication + Password string +} + +// Config is used to configure the creation of a client +type Config struct { + // Address is the address of the Consul server + Address string + + // Scheme is the URI scheme for the Consul server + Scheme string + + // Datacenter to use. If not provided, the default agent datacenter is used. + Datacenter string + + // Transport is the Transport to use for the http client. + Transport *http.Transport + + // HttpClient is the client to use. Default will be + // used if not provided. + HttpClient *http.Client + + // HttpAuth is the auth info to use for http access. + HttpAuth *HttpBasicAuth + + // WaitTime limits how long a Watch will block. If not provided, + // the agent default values will be used. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // TokenFile is a file containing the current token to use for this client. + // If provided it is read once at startup and never again. + TokenFile string + + TLSConfig TLSConfig +} + +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +type TLSConfig struct { + // Address is the optional address of the Consul server. The port, if any + // will be removed from here and this will be set to the ServerName of the + // resulting config. + Address string + + // CAFile is the optional path to the CA certificate used for Consul + // communication, defaults to the system bundle if not specified. + CAFile string + + // CAPath is the optional path to a directory of CA certificates to use for + // Consul communication, defaults to the system bundle if not specified. + CAPath string + + // CertFile is the optional path to the certificate for Consul + // communication. If this is set then you need to also set KeyFile. + CertFile string + + // KeyFile is the optional path to the private key for Consul communication. + // If this is set then you need to also set CertFile. + KeyFile string + + // InsecureSkipVerify if set to true will disable TLS host verification. + InsecureSkipVerify bool +} + +// DefaultConfig returns a default configuration for the client. By default this +// will pool and reuse idle connections to Consul. If you have a long-lived +// client object, this is the desired behavior and should make the most efficient +// use of the connections to Consul. If you don't reuse a client object, which +// is not recommended, then you may notice idle connections building up over +// time. To avoid this, use the DefaultNonPooledConfig() instead. +func DefaultConfig() *Config { + return defaultConfig(cleanhttp.DefaultPooledTransport) +} + +// DefaultNonPooledConfig returns a default configuration for the client which +// does not pool connections. This isn't a recommended configuration because it +// will reconnect to Consul on every request, but this is useful to avoid the +// accumulation of idle connections if you make many client objects during the +// lifetime of your application. +func DefaultNonPooledConfig() *Config { + return defaultConfig(cleanhttp.DefaultTransport) +} + +// defaultConfig returns the default configuration for the client, using the +// given function to make the transport. +func defaultConfig(transportFn func() *http.Transport) *Config { + config := &Config{ + Address: "127.0.0.1:8500", + Scheme: "http", + Transport: transportFn(), + } + + if addr := os.Getenv(HTTPAddrEnvName); addr != "" { + config.Address = addr + } + + if tokenFile := os.Getenv(HTTPTokenFileEnvName); tokenFile != "" { + config.TokenFile = tokenFile + } + + if token := os.Getenv(HTTPTokenEnvName); token != "" { + config.Token = token + } + + if auth := os.Getenv(HTTPAuthEnvName); auth != "" { + var username, password string + if strings.Contains(auth, ":") { + split := strings.SplitN(auth, ":", 2) + username = split[0] + password = split[1] + } else { + username = auth + } + + config.HttpAuth = &HttpBasicAuth{ + Username: username, + Password: password, + } + } + + if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" { + enabled, err := strconv.ParseBool(ssl) + if err != nil { + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err) + } + + if enabled { + config.Scheme = "https" + } + } + + if v := os.Getenv(HTTPTLSServerName); v != "" { + config.TLSConfig.Address = v + } + if v := os.Getenv(HTTPCAFile); v != "" { + config.TLSConfig.CAFile = v + } + if v := os.Getenv(HTTPCAPath); v != "" { + config.TLSConfig.CAPath = v + } + if v := os.Getenv(HTTPClientCert); v != "" { + config.TLSConfig.CertFile = v + } + if v := os.Getenv(HTTPClientKey); v != "" { + config.TLSConfig.KeyFile = v + } + if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" { + doVerify, err := strconv.ParseBool(v) + if err != nil { + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err) + } + if !doVerify { + config.TLSConfig.InsecureSkipVerify = true + } + } + + return config +} + +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) { + tlsClientConfig := &tls.Config{ + InsecureSkipVerify: tlsConfig.InsecureSkipVerify, + } + + if tlsConfig.Address != "" { + server := tlsConfig.Address + hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]") + if hasPort { + var err error + server, _, err = net.SplitHostPort(server) + if err != nil { + return nil, err + } + } + tlsClientConfig.ServerName = server + } + + if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" { + tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile) + if err != nil { + return nil, err + } + tlsClientConfig.Certificates = []tls.Certificate{tlsCert} + } + + if tlsConfig.CAFile != "" || tlsConfig.CAPath != "" { + rootConfig := &rootcerts.Config{ + CAFile: tlsConfig.CAFile, + CAPath: tlsConfig.CAPath, + } + if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil { + return nil, err + } + } + + return tlsClientConfig, nil +} + +func (c *Config) GenerateEnv() []string { + env := make([]string, 0, 10) + + env = append(env, + fmt.Sprintf("%s=%s", HTTPAddrEnvName, c.Address), + fmt.Sprintf("%s=%s", HTTPTokenEnvName, c.Token), + fmt.Sprintf("%s=%s", HTTPTokenFileEnvName, c.TokenFile), + fmt.Sprintf("%s=%t", HTTPSSLEnvName, c.Scheme == "https"), + fmt.Sprintf("%s=%s", HTTPCAFile, c.TLSConfig.CAFile), + fmt.Sprintf("%s=%s", HTTPCAPath, c.TLSConfig.CAPath), + fmt.Sprintf("%s=%s", HTTPClientCert, c.TLSConfig.CertFile), + fmt.Sprintf("%s=%s", HTTPClientKey, c.TLSConfig.KeyFile), + fmt.Sprintf("%s=%s", HTTPTLSServerName, c.TLSConfig.Address), + fmt.Sprintf("%s=%t", HTTPSSLVerifyEnvName, !c.TLSConfig.InsecureSkipVerify)) + + if c.HttpAuth != nil { + env = append(env, fmt.Sprintf("%s=%s:%s", HTTPAuthEnvName, c.HttpAuth.Username, c.HttpAuth.Password)) + } else { + env = append(env, fmt.Sprintf("%s=", HTTPAuthEnvName)) + } + + return env +} + +// Client provides a client to the Consul API +type Client struct { + config Config +} + +// NewClient returns a new client +func NewClient(config *Config) (*Client, error) { + // bootstrap the config + defConfig := DefaultConfig() + + if len(config.Address) == 0 { + config.Address = defConfig.Address + } + + if len(config.Scheme) == 0 { + config.Scheme = defConfig.Scheme + } + + if config.Transport == nil { + config.Transport = defConfig.Transport + } + + if config.TLSConfig.Address == "" { + config.TLSConfig.Address = defConfig.TLSConfig.Address + } + + if config.TLSConfig.CAFile == "" { + config.TLSConfig.CAFile = defConfig.TLSConfig.CAFile + } + + if config.TLSConfig.CAPath == "" { + config.TLSConfig.CAPath = defConfig.TLSConfig.CAPath + } + + if config.TLSConfig.CertFile == "" { + config.TLSConfig.CertFile = defConfig.TLSConfig.CertFile + } + + if config.TLSConfig.KeyFile == "" { + config.TLSConfig.KeyFile = defConfig.TLSConfig.KeyFile + } + + if !config.TLSConfig.InsecureSkipVerify { + config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify + } + + if config.HttpClient == nil { + var err error + config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig) + if err != nil { + return nil, err + } + } + + parts := strings.SplitN(config.Address, "://", 2) + if len(parts) == 2 { + switch parts[0] { + case "http": + config.Scheme = "http" + case "https": + config.Scheme = "https" + case "unix": + trans := cleanhttp.DefaultTransport() + trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", parts[1]) + } + config.HttpClient = &http.Client{ + Transport: trans, + } + default: + return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0]) + } + config.Address = parts[1] + } + + // If the TokenFile is set, always use that, even if a Token is configured. + // This is because when TokenFile is set it is read into the Token field. + // We want any derived clients to have to re-read the token file. + if config.TokenFile != "" { + data, err := ioutil.ReadFile(config.TokenFile) + if err != nil { + return nil, fmt.Errorf("Error loading token file: %s", err) + } + + if token := strings.TrimSpace(string(data)); token != "" { + config.Token = token + } + } + if config.Token == "" { + config.Token = defConfig.Token + } + + return &Client{config: *config}, nil +} + +// NewHttpClient returns an http client configured with the given Transport and TLS +// config. +func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) { + client := &http.Client{ + Transport: transport, + } + + // TODO (slackpad) - Once we get some run time on the HTTP/2 support we + // should turn it on by default if TLS is enabled. We would basically + // just need to call http2.ConfigureTransport(transport) here. We also + // don't want to introduce another external dependency on + // golang.org/x/net/http2 at this time. For a complete recipe for how + // to enable HTTP/2 support on a transport suitable for the API client + // library see agent/http_test.go:TestHTTPServer_H2. + + if transport.TLSClientConfig == nil { + tlsClientConfig, err := SetupTLSConfig(&tlsConf) + + if err != nil { + return nil, err + } + + transport.TLSClientConfig = tlsClientConfig + } + + return client, nil +} + +// request is used to help build up a request +type request struct { + config *Config + method string + url *url.URL + params url.Values + body io.Reader + header http.Header + obj interface{} + ctx context.Context +} + +// setQueryOptions is used to annotate the request with +// additional query options +func (r *request) setQueryOptions(q *QueryOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.AllowStale { + r.params.Set("stale", "") + } + if q.RequireConsistent { + r.params.Set("consistent", "") + } + if q.WaitIndex != 0 { + r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) + } + if q.WaitTime != 0 { + r.params.Set("wait", durToMsec(q.WaitTime)) + } + if q.WaitHash != "" { + r.params.Set("hash", q.WaitHash) + } + if q.Token != "" { + r.header.Set("X-Consul-Token", q.Token) + } + if q.Near != "" { + r.params.Set("near", q.Near) + } + if q.Filter != "" { + r.params.Set("filter", q.Filter) + } + if len(q.NodeMeta) > 0 { + for key, value := range q.NodeMeta { + r.params.Add("node-meta", key+":"+value) + } + } + if q.RelayFactor != 0 { + r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) + } + if q.Connect { + r.params.Set("connect", "true") + } + if q.UseCache && !q.RequireConsistent { + r.params.Set("cached", "") + + cc := []string{} + if q.MaxAge > 0 { + cc = append(cc, fmt.Sprintf("max-age=%.0f", q.MaxAge.Seconds())) + } + if q.StaleIfError > 0 { + cc = append(cc, fmt.Sprintf("stale-if-error=%.0f", q.StaleIfError.Seconds())) + } + if len(cc) > 0 { + r.header.Set("Cache-Control", strings.Join(cc, ", ")) + } + } + r.ctx = q.ctx +} + +// durToMsec converts a duration to a millisecond specified string. If the +// user selected a positive value that rounds to 0 ms, then we will use 1 ms +// so they get a short delay, otherwise Consul will translate the 0 ms into +// a huge default delay. +func durToMsec(dur time.Duration) string { + ms := dur / time.Millisecond + if dur > 0 && ms == 0 { + ms = 1 + } + return fmt.Sprintf("%dms", ms) +} + +// serverError is a string we look for to detect 500 errors. +const serverError = "Unexpected response code: 500" + +// IsRetryableError returns true for 500 errors from the Consul servers, and +// network connection errors. These are usually retryable at a later time. +// This applies to reads but NOT to writes. This may return true for errors +// on writes that may have still gone through, so do not use this to retry +// any write operations. +func IsRetryableError(err error) bool { + if err == nil { + return false + } + + if _, ok := err.(net.Error); ok { + return true + } + + // TODO (slackpad) - Make a real error type here instead of using + // a string check. + return strings.Contains(err.Error(), serverError) +} + +// setWriteOptions is used to annotate the request with +// additional write options +func (r *request) setWriteOptions(q *WriteOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.Token != "" { + r.header.Set("X-Consul-Token", q.Token) + } + if q.RelayFactor != 0 { + r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) + } + r.ctx = q.ctx +} + +// toHTTP converts the request to an HTTP request +func (r *request) toHTTP() (*http.Request, error) { + // Encode the query parameters + r.url.RawQuery = r.params.Encode() + + // Check if we should encode the body + if r.body == nil && r.obj != nil { + b, err := encodeBody(r.obj) + if err != nil { + return nil, err + } + r.body = b + } + + // Create the HTTP request + req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) + if err != nil { + return nil, err + } + + req.URL.Host = r.url.Host + req.URL.Scheme = r.url.Scheme + req.Host = r.url.Host + req.Header = r.header + + // Setup auth + if r.config.HttpAuth != nil { + req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) + } + if r.ctx != nil { + return req.WithContext(r.ctx), nil + } + + return req, nil +} + +// newRequest is used to create a new request +func (c *Client) newRequest(method, path string) *request { + r := &request{ + config: &c.config, + method: method, + url: &url.URL{ + Scheme: c.config.Scheme, + Host: c.config.Address, + Path: path, + }, + params: make(map[string][]string), + header: make(http.Header), + } + if c.config.Datacenter != "" { + r.params.Set("dc", c.config.Datacenter) + } + if c.config.WaitTime != 0 { + r.params.Set("wait", durToMsec(r.config.WaitTime)) + } + if c.config.Token != "" { + r.header.Set("X-Consul-Token", r.config.Token) + } + return r +} + +// doRequest runs a request with our client +func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { + req, err := r.toHTTP() + if err != nil { + return 0, nil, err + } + start := time.Now() + resp, err := c.config.HttpClient.Do(req) + diff := time.Since(start) + return diff, resp, err +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + r := c.newRequest("GET", endpoint) + r.setQueryOptions(q) + rtt, resp, err := c.doRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, out); err != nil { + return nil, err + } + return qm, nil +} + +// write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + r := c.newRequest("PUT", endpoint) + r.setWriteOptions(q) + r.obj = in + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + if out != nil { + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + } else if _, err := ioutil.ReadAll(resp.Body); err != nil { + return nil, err + } + return wm, nil +} + +// parseQueryMeta is used to help parse query meta-data +// +// TODO(rb): bug? the error from this function is never handled +func parseQueryMeta(resp *http.Response, q *QueryMeta) error { + header := resp.Header + + // Parse the X-Consul-Index (if it's set - hash based blocking queries don't + // set this) + if indexStr := header.Get("X-Consul-Index"); indexStr != "" { + index, err := strconv.ParseUint(indexStr, 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) + } + q.LastIndex = index + } + q.LastContentHash = header.Get("X-Consul-ContentHash") + + // Parse the X-Consul-LastContact + last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) + } + q.LastContact = time.Duration(last) * time.Millisecond + + // Parse the X-Consul-KnownLeader + switch header.Get("X-Consul-KnownLeader") { + case "true": + q.KnownLeader = true + default: + q.KnownLeader = false + } + + // Parse X-Consul-Translate-Addresses + switch header.Get("X-Consul-Translate-Addresses") { + case "true": + q.AddressTranslationEnabled = true + default: + q.AddressTranslationEnabled = false + } + + // Parse Cache info + if cacheStr := header.Get("X-Cache"); cacheStr != "" { + q.CacheHit = strings.EqualFold(cacheStr, "HIT") + } + if ageStr := header.Get("Age"); ageStr != "" { + age, err := strconv.ParseUint(ageStr, 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse Age Header: %v", err) + } + q.CacheAge = time.Duration(age) * time.Second + } + + return nil +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + dec := json.NewDecoder(resp.Body) + return dec.Decode(out) +} + +// encodeBody is used to encode a request body +func encodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(obj); err != nil { + return nil, err + } + return buf, nil +} + +// requireOK is used to wrap doRequest and check for a 200 +func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { + if e != nil { + if resp != nil { + resp.Body.Close() + } + return d, nil, e + } + if resp.StatusCode != 200 { + return d, nil, generateUnexpectedResponseCodeError(resp) + } + return d, resp, nil +} + +func (req *request) filterQuery(filter string) { + if filter == "" { + return + } + + req.params.Set("filter", filter) +} + +// generateUnexpectedResponseCodeError consumes the rest of the body, closes +// the body stream and generates an error indicating the status code was +// unexpected. +func generateUnexpectedResponseCodeError(resp *http.Response) error { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + resp.Body.Close() + return fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) +} + +func requireNotFoundOrOK(d time.Duration, resp *http.Response, e error) (bool, time.Duration, *http.Response, error) { + if e != nil { + if resp != nil { + resp.Body.Close() + } + return false, d, nil, e + } + switch resp.StatusCode { + case 200: + return true, d, resp, nil + case 404: + return false, d, resp, nil + default: + return false, d, nil, generateUnexpectedResponseCodeError(resp) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go new file mode 100644 index 000000000..c175c3fff --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -0,0 +1,244 @@ +package api + +type Weights struct { + Passing int + Warning int +} + +type Node struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + Meta map[string]string + CreateIndex uint64 + ModifyIndex uint64 +} + +type CatalogService struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + NodeMeta map[string]string + ServiceID string + ServiceName string + ServiceAddress string + ServiceTags []string + ServiceMeta map[string]string + ServicePort int + ServiceWeights Weights + ServiceEnableTagOverride bool + // DEPRECATED (ProxyDestination) - remove the next comment! + // We forgot to ever add ServiceProxyDestination here so no need to deprecate! + ServiceProxy *AgentServiceConnectProxyConfig + CreateIndex uint64 + Checks HealthChecks + ModifyIndex uint64 +} + +type CatalogNode struct { + Node *Node + Services map[string]*AgentService +} + +type CatalogRegistration struct { + ID string + Node string + Address string + TaggedAddresses map[string]string + NodeMeta map[string]string + Datacenter string + Service *AgentService + Check *AgentCheck + Checks HealthChecks + SkipNodeUpdate bool +} + +type CatalogDeregistration struct { + Node string + Address string // Obsolete. + Datacenter string + ServiceID string + CheckID string +} + +// Catalog can be used to query the Catalog endpoints +type Catalog struct { + c *Client +} + +// Catalog returns a handle to the catalog endpoints +func (c *Client) Catalog() *Catalog { + return &Catalog{c} +} + +func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/register") + r.setWriteOptions(q) + r.obj = reg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/deregister") + r.setWriteOptions(q) + r.obj = dereg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +// Datacenters is used to query for all the known datacenters +func (c *Catalog) Datacenters() ([]string, error) { + r := c.c.newRequest("GET", "/v1/catalog/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []string + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to query all the known nodes +func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Node + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Services is used to query for all known services +func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/services") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out map[string][]string + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query catalog entries for a given service +func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + var tags []string + if tag != "" { + tags = []string{tag} + } + return c.service(service, tags, q, false) +} + +// Supports multiple tags for filtering +func (c *Catalog) ServiceMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + return c.service(service, tags, q, false) +} + +// Connect is used to query catalog entries for a given Connect-enabled service +func (c *Catalog) Connect(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + var tags []string + if tag != "" { + tags = []string{tag} + } + return c.service(service, tags, q, true) +} + +// Supports multiple tags for filtering +func (c *Catalog) ConnectMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + return c.service(service, tags, q, true) +} + +func (c *Catalog) service(service string, tags []string, q *QueryOptions, connect bool) ([]*CatalogService, *QueryMeta, error) { + path := "/v1/catalog/service/" + service + if connect { + path = "/v1/catalog/connect/" + service + } + r := c.c.newRequest("GET", path) + r.setQueryOptions(q) + if len(tags) > 0 { + for _, tag := range tags { + r.params.Add("tag", tag) + } + } + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CatalogService + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Node is used to query for service information about a single node +func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *CatalogNode + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go new file mode 100644 index 000000000..0c18963fd --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry.go @@ -0,0 +1,255 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "strconv" + "strings" + + "github.com/mitchellh/mapstructure" +) + +const ( + ServiceDefaults string = "service-defaults" + ProxyDefaults string = "proxy-defaults" + ProxyConfigGlobal string = "global" +) + +type ConfigEntry interface { + GetKind() string + GetName() string + GetCreateIndex() uint64 + GetModifyIndex() uint64 +} + +type ServiceConfigEntry struct { + Kind string + Name string + Protocol string + CreateIndex uint64 + ModifyIndex uint64 +} + +func (s *ServiceConfigEntry) GetKind() string { + return s.Kind +} + +func (s *ServiceConfigEntry) GetName() string { + return s.Name +} + +func (s *ServiceConfigEntry) GetCreateIndex() uint64 { + return s.CreateIndex +} + +func (s *ServiceConfigEntry) GetModifyIndex() uint64 { + return s.ModifyIndex +} + +type ProxyConfigEntry struct { + Kind string + Name string + Config map[string]interface{} + CreateIndex uint64 + ModifyIndex uint64 +} + +func (p *ProxyConfigEntry) GetKind() string { + return p.Kind +} + +func (p *ProxyConfigEntry) GetName() string { + return p.Name +} + +func (p *ProxyConfigEntry) GetCreateIndex() uint64 { + return p.CreateIndex +} + +func (p *ProxyConfigEntry) GetModifyIndex() uint64 { + return p.ModifyIndex +} + +type rawEntryListResponse struct { + kind string + Entries []map[string]interface{} +} + +func makeConfigEntry(kind, name string) (ConfigEntry, error) { + switch kind { + case ServiceDefaults: + return &ServiceConfigEntry{Name: name}, nil + case ProxyDefaults: + return &ProxyConfigEntry{Name: name}, nil + default: + return nil, fmt.Errorf("invalid config entry kind: %s", kind) + } +} + +func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) { + var entry ConfigEntry + + kindVal, ok := raw["Kind"] + if !ok { + kindVal, ok = raw["kind"] + } + if !ok { + return nil, fmt.Errorf("Payload does not contain a kind/Kind key at the top level") + } + + if kindStr, ok := kindVal.(string); ok { + newEntry, err := makeConfigEntry(kindStr, "") + if err != nil { + return nil, err + } + entry = newEntry + } else { + return nil, fmt.Errorf("Kind value in payload is not a string") + } + + decodeConf := &mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeDurationHookFunc(), + Result: &entry, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decodeConf) + if err != nil { + return nil, err + } + + return entry, decoder.Decode(raw) +} + +func DecodeConfigEntryFromJSON(data []byte) (ConfigEntry, error) { + var raw map[string]interface{} + if err := json.Unmarshal(data, &raw); err != nil { + return nil, err + } + + return DecodeConfigEntry(raw) +} + +// Config can be used to query the Config endpoints +type ConfigEntries struct { + c *Client +} + +// Config returns a handle to the Config endpoints +func (c *Client) ConfigEntries() *ConfigEntries { + return &ConfigEntries{c} +} + +func (conf *ConfigEntries) Get(kind string, name string, q *QueryOptions) (ConfigEntry, *QueryMeta, error) { + if kind == "" || name == "" { + return nil, nil, fmt.Errorf("Both kind and name parameters must not be empty") + } + + entry, err := makeConfigEntry(kind, name) + if err != nil { + return nil, nil, err + } + + r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s/%s", kind, name)) + r.setQueryOptions(q) + rtt, resp, err := requireOK(conf.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, entry); err != nil { + return nil, nil, err + } + + return entry, qm, nil +} + +func (conf *ConfigEntries) List(kind string, q *QueryOptions) ([]ConfigEntry, *QueryMeta, error) { + if kind == "" { + return nil, nil, fmt.Errorf("The kind parameter must not be empty") + } + + r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s", kind)) + r.setQueryOptions(q) + rtt, resp, err := requireOK(conf.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var raw []map[string]interface{} + if err := decodeBody(resp, &raw); err != nil { + return nil, nil, err + } + + var entries []ConfigEntry + for _, rawEntry := range raw { + entry, err := DecodeConfigEntry(rawEntry) + if err != nil { + return nil, nil, err + } + entries = append(entries, entry) + } + + return entries, qm, nil +} + +func (conf *ConfigEntries) Set(entry ConfigEntry, w *WriteOptions) (bool, *WriteMeta, error) { + return conf.set(entry, nil, w) +} + +func (conf *ConfigEntries) CAS(entry ConfigEntry, index uint64, w *WriteOptions) (bool, *WriteMeta, error) { + return conf.set(entry, map[string]string{"cas": strconv.FormatUint(index, 10)}, w) +} + +func (conf *ConfigEntries) set(entry ConfigEntry, params map[string]string, w *WriteOptions) (bool, *WriteMeta, error) { + r := conf.c.newRequest("PUT", "/v1/config") + r.setWriteOptions(w) + for param, value := range params { + r.params.Set(param, value) + } + r.obj = entry + rtt, resp, err := requireOK(conf.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + + wm := &WriteMeta{RequestTime: rtt} + return res, wm, nil +} + +func (conf *ConfigEntries) Delete(kind string, name string, w *WriteOptions) (*WriteMeta, error) { + if kind == "" || name == "" { + return nil, fmt.Errorf("Both kind and name parameters must not be empty") + } + + r := conf.c.newRequest("DELETE", fmt.Sprintf("/v1/config/%s/%s", kind, name)) + r.setWriteOptions(w) + rtt, resp, err := requireOK(conf.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/connect.go b/vendor/github.com/hashicorp/consul/api/connect.go new file mode 100644 index 000000000..a40d1e232 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/connect.go @@ -0,0 +1,12 @@ +package api + +// Connect can be used to work with endpoints related to Connect, the +// feature for securely connecting services within Consul. +type Connect struct { + c *Client +} + +// Connect returns a handle to the connect-related endpoints +func (c *Client) Connect() *Connect { + return &Connect{c} +} diff --git a/vendor/github.com/hashicorp/consul/api/connect_ca.go b/vendor/github.com/hashicorp/consul/api/connect_ca.go new file mode 100644 index 000000000..600a3e0db --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/connect_ca.go @@ -0,0 +1,174 @@ +package api + +import ( + "fmt" + "time" + + "github.com/mitchellh/mapstructure" +) + +// CAConfig is the structure for the Connect CA configuration. +type CAConfig struct { + // Provider is the CA provider implementation to use. + Provider string + + // Configuration is arbitrary configuration for the provider. This + // should only contain primitive values and containers (such as lists + // and maps). + Config map[string]interface{} + + CreateIndex uint64 + ModifyIndex uint64 +} + +// CommonCAProviderConfig is the common options available to all CA providers. +type CommonCAProviderConfig struct { + LeafCertTTL time.Duration + SkipValidate bool + CSRMaxPerSecond float32 + CSRMaxConcurrent int +} + +// ConsulCAProviderConfig is the config for the built-in Consul CA provider. +type ConsulCAProviderConfig struct { + CommonCAProviderConfig `mapstructure:",squash"` + + PrivateKey string + RootCert string + RotationPeriod time.Duration +} + +// ParseConsulCAConfig takes a raw config map and returns a parsed +// ConsulCAProviderConfig. +func ParseConsulCAConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) { + var config ConsulCAProviderConfig + decodeConf := &mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeDurationHookFunc(), + Result: &config, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decodeConf) + if err != nil { + return nil, err + } + + if err := decoder.Decode(raw); err != nil { + return nil, fmt.Errorf("error decoding config: %s", err) + } + + return &config, nil +} + +// CARootList is the structure for the results of listing roots. +type CARootList struct { + ActiveRootID string + TrustDomain string + Roots []*CARoot +} + +// CARoot represents a root CA certificate that is trusted. +type CARoot struct { + // ID is a globally unique ID (UUID) representing this CA root. + ID string + + // Name is a human-friendly name for this CA root. This value is + // opaque to Consul and is not used for anything internally. + Name string + + // RootCertPEM is the PEM-encoded public certificate. + RootCertPEM string `json:"RootCert"` + + // Active is true if this is the current active CA. This must only + // be true for exactly one CA. For any method that modifies roots in the + // state store, tests should be written to verify that multiple roots + // cannot be active. + Active bool + + CreateIndex uint64 + ModifyIndex uint64 +} + +// LeafCert is a certificate that has been issued by a Connect CA. +type LeafCert struct { + // SerialNumber is the unique serial number for this certificate. + // This is encoded in standard hex separated by :. + SerialNumber string + + // CertPEM and PrivateKeyPEM are the PEM-encoded certificate and private + // key for that cert, respectively. This should not be stored in the + // state store, but is present in the sign API response. + CertPEM string `json:",omitempty"` + PrivateKeyPEM string `json:",omitempty"` + + // Service is the name of the service for which the cert was issued. + // ServiceURI is the cert URI value. + Service string + ServiceURI string + + // ValidAfter and ValidBefore are the validity periods for the + // certificate. + ValidAfter time.Time + ValidBefore time.Time + + CreateIndex uint64 + ModifyIndex uint64 +} + +// CARoots queries the list of available roots. +func (h *Connect) CARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/ca/roots") + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out CARootList + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// CAGetConfig returns the current CA configuration. +func (h *Connect) CAGetConfig(q *QueryOptions) (*CAConfig, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/ca/configuration") + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out CAConfig + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// CASetConfig sets the current CA configuration. +func (h *Connect) CASetConfig(conf *CAConfig, q *WriteOptions) (*WriteMeta, error) { + r := h.c.newRequest("PUT", "/v1/connect/ca/configuration") + r.setWriteOptions(q) + r.obj = conf + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/connect_intention.go b/vendor/github.com/hashicorp/consul/api/connect_intention.go new file mode 100644 index 000000000..a996c03e5 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/connect_intention.go @@ -0,0 +1,302 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "time" +) + +// Intention defines an intention for the Connect Service Graph. This defines +// the allowed or denied behavior of a connection between two services using +// Connect. +type Intention struct { + // ID is the UUID-based ID for the intention, always generated by Consul. + ID string + + // Description is a human-friendly description of this intention. + // It is opaque to Consul and is only stored and transferred in API + // requests. + Description string + + // SourceNS, SourceName are the namespace and name, respectively, of + // the source service. Either of these may be the wildcard "*", but only + // the full value can be a wildcard. Partial wildcards are not allowed. + // The source may also be a non-Consul service, as specified by SourceType. + // + // DestinationNS, DestinationName is the same, but for the destination + // service. The same rules apply. The destination is always a Consul + // service. + SourceNS, SourceName string + DestinationNS, DestinationName string + + // SourceType is the type of the value for the source. + SourceType IntentionSourceType + + // Action is whether this is a whitelist or blacklist intention. + Action IntentionAction + + // DefaultAddr, DefaultPort of the local listening proxy (if any) to + // make this connection. + DefaultAddr string + DefaultPort int + + // Meta is arbitrary metadata associated with the intention. This is + // opaque to Consul but is served in API responses. + Meta map[string]string + + // Precedence is the order that the intention will be applied, with + // larger numbers being applied first. This is a read-only field, on + // any intention update it is updated. + Precedence int + + // CreatedAt and UpdatedAt keep track of when this record was created + // or modified. + CreatedAt, UpdatedAt time.Time + + CreateIndex uint64 + ModifyIndex uint64 +} + +// String returns human-friendly output describing ths intention. +func (i *Intention) String() string { + return fmt.Sprintf("%s => %s (%s)", + i.SourceString(), + i.DestinationString(), + i.Action) +} + +// SourceString returns the namespace/name format for the source, or +// just "name" if the namespace is the default namespace. +func (i *Intention) SourceString() string { + return i.partString(i.SourceNS, i.SourceName) +} + +// DestinationString returns the namespace/name format for the source, or +// just "name" if the namespace is the default namespace. +func (i *Intention) DestinationString() string { + return i.partString(i.DestinationNS, i.DestinationName) +} + +func (i *Intention) partString(ns, n string) string { + // For now we omit the default namespace from the output. In the future + // we might want to look at this and show this in a multi-namespace world. + if ns != "" && ns != IntentionDefaultNamespace { + n = ns + "/" + n + } + + return n +} + +// IntentionDefaultNamespace is the default namespace value. +const IntentionDefaultNamespace = "default" + +// IntentionAction is the action that the intention represents. This +// can be "allow" or "deny" to whitelist or blacklist intentions. +type IntentionAction string + +const ( + IntentionActionAllow IntentionAction = "allow" + IntentionActionDeny IntentionAction = "deny" +) + +// IntentionSourceType is the type of the source within an intention. +type IntentionSourceType string + +const ( + // IntentionSourceConsul is a service within the Consul catalog. + IntentionSourceConsul IntentionSourceType = "consul" +) + +// IntentionMatch are the arguments for the intention match API. +type IntentionMatch struct { + By IntentionMatchType + Names []string +} + +// IntentionMatchType is the target for a match request. For example, +// matching by source will look for all intentions that match the given +// source value. +type IntentionMatchType string + +const ( + IntentionMatchSource IntentionMatchType = "source" + IntentionMatchDestination IntentionMatchType = "destination" +) + +// IntentionCheck are the arguments for the intention check API. For +// more documentation see the IntentionCheck function. +type IntentionCheck struct { + // Source and Destination are the source and destination values to + // check. The destination is always a Consul service, but the source + // may be other values as defined by the SourceType. + Source, Destination string + + // SourceType is the type of the value for the source. + SourceType IntentionSourceType +} + +// Intentions returns the list of intentions. +func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions") + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Intention + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// IntentionGet retrieves a single intention. +func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions/"+id) + r.setQueryOptions(q) + rtt, resp, err := h.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == 404 { + return nil, qm, nil + } else if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + return nil, nil, fmt.Errorf( + "Unexpected response %d: %s", resp.StatusCode, buf.String()) + } + + var out Intention + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// IntentionDelete deletes a single intention. +func (h *Connect) IntentionDelete(id string, q *WriteOptions) (*WriteMeta, error) { + r := h.c.newRequest("DELETE", "/v1/connect/intentions/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + return qm, nil +} + +// IntentionMatch returns the list of intentions that match a given source +// or destination. The returned intentions are ordered by precedence where +// result[0] is the highest precedence (if that matches, then that rule overrides +// all other rules). +// +// Matching can be done for multiple names at the same time. The resulting +// map is keyed by the given names. Casing is preserved. +func (h *Connect) IntentionMatch(args *IntentionMatch, q *QueryOptions) (map[string][]*Intention, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions/match") + r.setQueryOptions(q) + r.params.Set("by", string(args.By)) + for _, name := range args.Names { + r.params.Add("name", name) + } + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out map[string][]*Intention + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// IntentionCheck returns whether a given source/destination would be allowed +// or not given the current set of intentions and the configuration of Consul. +func (h *Connect) IntentionCheck(args *IntentionCheck, q *QueryOptions) (bool, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions/check") + r.setQueryOptions(q) + r.params.Set("source", args.Source) + r.params.Set("destination", args.Destination) + if args.SourceType != "" { + r.params.Set("source-type", string(args.SourceType)) + } + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out struct{ Allowed bool } + if err := decodeBody(resp, &out); err != nil { + return false, nil, err + } + return out.Allowed, qm, nil +} + +// IntentionCreate will create a new intention. The ID in the given +// structure must be empty and a generate ID will be returned on +// success. +func (c *Connect) IntentionCreate(ixn *Intention, q *WriteOptions) (string, *WriteMeta, error) { + r := c.c.newRequest("POST", "/v1/connect/intentions") + r.setWriteOptions(q) + r.obj = ixn + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// IntentionUpdate will update an existing intention. The ID in the given +// structure must be non-empty. +func (c *Connect) IntentionUpdate(ixn *Intention, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/connect/intentions/"+ixn.ID) + r.setWriteOptions(q) + r.obj = ixn + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go new file mode 100644 index 000000000..53318f11d --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/coordinate.go @@ -0,0 +1,106 @@ +package api + +import ( + "github.com/hashicorp/serf/coordinate" +) + +// CoordinateEntry represents a node and its associated network coordinate. +type CoordinateEntry struct { + Node string + Segment string + Coord *coordinate.Coordinate +} + +// CoordinateDatacenterMap has the coordinates for servers in a given datacenter +// and area. Network coordinates are only compatible within the same area. +type CoordinateDatacenterMap struct { + Datacenter string + AreaID string + Coordinates []CoordinateEntry +} + +// Coordinate can be used to query the coordinate endpoints +type Coordinate struct { + c *Client +} + +// Coordinate returns a handle to the coordinate endpoints +func (c *Client) Coordinate() *Coordinate { + return &Coordinate{c} +} + +// Datacenters is used to return the coordinates of all the servers in the WAN +// pool. +func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) { + r := c.c.newRequest("GET", "/v1/coordinate/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*CoordinateDatacenterMap + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to return the coordinates of all the nodes in the LAN pool. +func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/coordinate/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CoordinateEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Update inserts or updates the LAN coordinate of a node. +func (c *Coordinate) Update(coord *CoordinateEntry, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/coordinate/update") + r.setWriteOptions(q) + r.obj = coord + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +// Node is used to return the coordinates of a single in the LAN pool. +func (c *Coordinate) Node(node string, q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/coordinate/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CoordinateEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/debug.go b/vendor/github.com/hashicorp/consul/api/debug.go new file mode 100644 index 000000000..238046853 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/debug.go @@ -0,0 +1,106 @@ +package api + +import ( + "fmt" + "io/ioutil" + "strconv" +) + +// Debug can be used to query the /debug/pprof endpoints to gather +// profiling information about the target agent.Debug +// +// The agent must have enable_debug set to true for profiling to be enabled +// and for these endpoints to function. +type Debug struct { + c *Client +} + +// Debug returns a handle that exposes the internal debug endpoints. +func (c *Client) Debug() *Debug { + return &Debug{c} +} + +// Heap returns a pprof heap dump +func (d *Debug) Heap() ([]byte, error) { + r := d.c.newRequest("GET", "/debug/pprof/heap") + _, resp, err := d.c.doRequest(r) + if err != nil { + return nil, fmt.Errorf("error making request: %s", err) + } + defer resp.Body.Close() + + // We return a raw response because we're just passing through a response + // from the pprof handlers + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error decoding body: %s", err) + } + + return body, nil +} + +// Profile returns a pprof CPU profile for the specified number of seconds +func (d *Debug) Profile(seconds int) ([]byte, error) { + r := d.c.newRequest("GET", "/debug/pprof/profile") + + // Capture a profile for the specified number of seconds + r.params.Set("seconds", strconv.Itoa(seconds)) + + _, resp, err := d.c.doRequest(r) + if err != nil { + return nil, fmt.Errorf("error making request: %s", err) + } + defer resp.Body.Close() + + // We return a raw response because we're just passing through a response + // from the pprof handlers + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error decoding body: %s", err) + } + + return body, nil +} + +// Trace returns an execution trace +func (d *Debug) Trace(seconds int) ([]byte, error) { + r := d.c.newRequest("GET", "/debug/pprof/trace") + + // Capture a trace for the specified number of seconds + r.params.Set("seconds", strconv.Itoa(seconds)) + + _, resp, err := d.c.doRequest(r) + if err != nil { + return nil, fmt.Errorf("error making request: %s", err) + } + defer resp.Body.Close() + + // We return a raw response because we're just passing through a response + // from the pprof handlers + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error decoding body: %s", err) + } + + return body, nil +} + +// Goroutine returns a pprof goroutine profile +func (d *Debug) Goroutine() ([]byte, error) { + r := d.c.newRequest("GET", "/debug/pprof/goroutine") + + _, resp, err := d.c.doRequest(r) + if err != nil { + return nil, fmt.Errorf("error making request: %s", err) + } + defer resp.Body.Close() + + // We return a raw response because we're just passing through a response + // from the pprof handlers + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error decoding body: %s", err) + } + + return body, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go new file mode 100644 index 000000000..85b5b069b --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/event.go @@ -0,0 +1,104 @@ +package api + +import ( + "bytes" + "strconv" +) + +// Event can be used to query the Event endpoints +type Event struct { + c *Client +} + +// UserEvent represents an event that was fired by the user +type UserEvent struct { + ID string + Name string + Payload []byte + NodeFilter string + ServiceFilter string + TagFilter string + Version int + LTime uint64 +} + +// Event returns a handle to the event endpoints +func (c *Client) Event() *Event { + return &Event{c} +} + +// Fire is used to fire a new user event. Only the Name, Payload and Filters +// are respected. This returns the ID or an associated error. Cross DC requests +// are supported. +func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { + r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) + r.setWriteOptions(q) + if params.NodeFilter != "" { + r.params.Set("node", params.NodeFilter) + } + if params.ServiceFilter != "" { + r.params.Set("service", params.ServiceFilter) + } + if params.TagFilter != "" { + r.params.Set("tag", params.TagFilter) + } + if params.Payload != nil { + r.body = bytes.NewReader(params.Payload) + } + + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out UserEvent + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// List is used to get the most recent events an agent has received. +// This list can be optionally filtered by the name. This endpoint supports +// quasi-blocking queries. The index is not monotonic, nor does it provide provide +// LastContact or KnownLeader. +func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { + r := e.c.newRequest("GET", "/v1/event/list") + r.setQueryOptions(q) + if name != "" { + r.params.Set("name", name) + } + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*UserEvent + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// IDToIndex is a bit of a hack. This simulates the index generation to +// convert an event ID into a WaitIndex. +func (e *Event) IDToIndex(uuid string) uint64 { + lower := uuid[0:8] + uuid[9:13] + uuid[14:18] + upper := uuid[19:23] + uuid[24:36] + lowVal, err := strconv.ParseUint(lower, 16, 64) + if err != nil { + panic("Failed to convert " + lower) + } + highVal, err := strconv.ParseUint(upper, 16, 64) + if err != nil { + panic("Failed to convert " + upper) + } + return lowVal ^ highVal +} diff --git a/vendor/github.com/hashicorp/consul/api/go.mod b/vendor/github.com/hashicorp/consul/api/go.mod new file mode 100644 index 000000000..e19821891 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/go.mod @@ -0,0 +1,16 @@ +module github.com/hashicorp/consul/api + +go 1.12 + +replace github.com/hashicorp/consul/sdk => ../sdk + +require ( + github.com/hashicorp/consul/sdk v0.1.1 + github.com/hashicorp/go-cleanhttp v0.5.1 + github.com/hashicorp/go-rootcerts v1.0.0 + github.com/hashicorp/go-uuid v1.0.1 + github.com/hashicorp/serf v0.8.2 + github.com/mitchellh/mapstructure v1.1.2 + github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/github.com/hashicorp/consul/api/go.sum b/vendor/github.com/hashicorp/consul/api/go.sum new file mode 100644 index 000000000..372ebc141 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/go.sum @@ -0,0 +1,76 @@ +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go new file mode 100644 index 000000000..9faf6b665 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -0,0 +1,330 @@ +package api + +import ( + "encoding/json" + "fmt" + "strings" + "time" +) + +const ( + // HealthAny is special, and is used as a wild card, + // not as a specific state. + HealthAny = "any" + HealthPassing = "passing" + HealthWarning = "warning" + HealthCritical = "critical" + HealthMaint = "maintenance" +) + +const ( + // NodeMaint is the special key set by a node in maintenance mode. + NodeMaint = "_node_maintenance" + + // ServiceMaintPrefix is the prefix for a service in maintenance mode. + ServiceMaintPrefix = "_service_maintenance:" +) + +// HealthCheck is used to represent a single check +type HealthCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string + ServiceTags []string + + Definition HealthCheckDefinition + + CreateIndex uint64 + ModifyIndex uint64 +} + +// HealthCheckDefinition is used to store the details about +// a health check's execution. +type HealthCheckDefinition struct { + HTTP string + Header map[string][]string + Method string + TLSSkipVerify bool + TCP string + IntervalDuration time.Duration `json:"-"` + TimeoutDuration time.Duration `json:"-"` + DeregisterCriticalServiceAfterDuration time.Duration `json:"-"` + + // DEPRECATED in Consul 1.4.1. Use the above time.Duration fields instead. + Interval ReadableDuration + Timeout ReadableDuration + DeregisterCriticalServiceAfter ReadableDuration +} + +func (d *HealthCheckDefinition) MarshalJSON() ([]byte, error) { + type Alias HealthCheckDefinition + out := &struct { + Interval string + Timeout string + DeregisterCriticalServiceAfter string + *Alias + }{ + Interval: d.Interval.String(), + Timeout: d.Timeout.String(), + DeregisterCriticalServiceAfter: d.DeregisterCriticalServiceAfter.String(), + Alias: (*Alias)(d), + } + + if d.IntervalDuration != 0 { + out.Interval = d.IntervalDuration.String() + } else if d.Interval != 0 { + out.Interval = d.Interval.String() + } + if d.TimeoutDuration != 0 { + out.Timeout = d.TimeoutDuration.String() + } else if d.Timeout != 0 { + out.Timeout = d.Timeout.String() + } + if d.DeregisterCriticalServiceAfterDuration != 0 { + out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfterDuration.String() + } else if d.DeregisterCriticalServiceAfter != 0 { + out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfter.String() + } + + return json.Marshal(out) +} + +func (d *HealthCheckDefinition) UnmarshalJSON(data []byte) error { + type Alias HealthCheckDefinition + aux := &struct { + Interval string + Timeout string + DeregisterCriticalServiceAfter string + *Alias + }{ + Alias: (*Alias)(d), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + // Parse the values into both the time.Duration and old ReadableDuration fields. + var err error + if aux.Interval != "" { + if d.IntervalDuration, err = time.ParseDuration(aux.Interval); err != nil { + return err + } + d.Interval = ReadableDuration(d.IntervalDuration) + } + if aux.Timeout != "" { + if d.TimeoutDuration, err = time.ParseDuration(aux.Timeout); err != nil { + return err + } + d.Timeout = ReadableDuration(d.TimeoutDuration) + } + if aux.DeregisterCriticalServiceAfter != "" { + if d.DeregisterCriticalServiceAfterDuration, err = time.ParseDuration(aux.DeregisterCriticalServiceAfter); err != nil { + return err + } + d.DeregisterCriticalServiceAfter = ReadableDuration(d.DeregisterCriticalServiceAfterDuration) + } + return nil +} + +// HealthChecks is a collection of HealthCheck structs. +type HealthChecks []*HealthCheck + +// AggregatedStatus returns the "best" status for the list of health checks. +// Because a given entry may have many service and node-level health checks +// attached, this function determines the best representative of the status as +// as single string using the following heuristic: +// +// maintenance > critical > warning > passing +// +func (c HealthChecks) AggregatedStatus() string { + var passing, warning, critical, maintenance bool + for _, check := range c { + id := string(check.CheckID) + if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) { + maintenance = true + continue + } + + switch check.Status { + case HealthPassing: + passing = true + case HealthWarning: + warning = true + case HealthCritical: + critical = true + default: + return "" + } + } + + switch { + case maintenance: + return HealthMaint + case critical: + return HealthCritical + case warning: + return HealthWarning + case passing: + return HealthPassing + default: + return HealthPassing + } +} + +// ServiceEntry is used for the health service endpoint +type ServiceEntry struct { + Node *Node + Service *AgentService + Checks HealthChecks +} + +// Health can be used to query the Health endpoints +type Health struct { + c *Client +} + +// Health returns a handle to the health endpoints +func (c *Client) Health() *Health { + return &Health{c} +} + +// Node is used to query for checks belonging to a given node +func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Checks is used to return the checks associated with a service +func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/checks/"+service) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query health information along with service info +// for a given service. It can optionally do server-side filtering on a tag +// or nodes with passing health checks only. +func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + var tags []string + if tag != "" { + tags = []string{tag} + } + return h.service(service, tags, passingOnly, q, false) +} + +func (h *Health) ServiceMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + return h.service(service, tags, passingOnly, q, false) +} + +// Connect is equivalent to Service except that it will only return services +// which are Connect-enabled and will returns the connection address for Connect +// client's to use which may be a proxy in front of the named service. If +// passingOnly is true only instances where both the service and any proxy are +// healthy will be returned. +func (h *Health) Connect(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + var tags []string + if tag != "" { + tags = []string{tag} + } + return h.service(service, tags, passingOnly, q, true) +} + +func (h *Health) ConnectMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + return h.service(service, tags, passingOnly, q, true) +} + +func (h *Health) service(service string, tags []string, passingOnly bool, q *QueryOptions, connect bool) ([]*ServiceEntry, *QueryMeta, error) { + path := "/v1/health/service/" + service + if connect { + path = "/v1/health/connect/" + service + } + r := h.c.newRequest("GET", path) + r.setQueryOptions(q) + if len(tags) > 0 { + for _, tag := range tags { + r.params.Add("tag", tag) + } + } + if passingOnly { + r.params.Set(HealthPassing, "1") + } + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*ServiceEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// State is used to retrieve all the checks in a given state. +// The wildcard "any" state can also be used for all checks. +func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + switch state { + case HealthAny: + case HealthWarning: + case HealthCritical: + case HealthPassing: + default: + return nil, nil, fmt.Errorf("Unsupported state: %v", state) + } + r := h.c.newRequest("GET", "/v1/health/state/"+state) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go new file mode 100644 index 000000000..bd45a067c --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/kv.go @@ -0,0 +1,286 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +// KVPair is used to represent a single K/V entry +type KVPair struct { + // Key is the name of the key. It is also part of the URL path when accessed + // via the API. + Key string + + // CreateIndex holds the index corresponding the creation of this KVPair. This + // is a read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 + + // LockIndex holds the index corresponding to a lock on this key, if any. This + // is a read-only field. + LockIndex uint64 + + // Flags are any user-defined flags on the key. It is up to the implementer + // to check these values, since Consul does not treat them specially. + Flags uint64 + + // Value is the value for the key. This can be any value, but it will be + // base64 encoded upon transport. + Value []byte + + // Session is a string representing the ID of the session. Any other + // interactions with this key over the same session must specify the same + // session ID. + Session string +} + +// KVPairs is a list of KVPair objects +type KVPairs []*KVPair + +// KV is used to manipulate the K/V API +type KV struct { + c *Client +} + +// KV is used to return a handle to the K/V apis +func (c *Client) KV() *KV { + return &KV{c} +} + +// Get is used to lookup a single key. The returned pointer +// to the KVPair will be nil if the key does not exist. +func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { + resp, qm, err := k.getInternal(key, nil, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to lookup all keys under a prefix +func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { + resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Keys is used to list all the keys under a prefix. Optionally, +// a separator can be used to limit the responses. +func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { + params := map[string]string{"keys": ""} + if separator != "" { + params["separator"] = separator + } + resp, qm, err := k.getInternal(prefix, params, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []string + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { + r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/")) + r.setQueryOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == 404 { + resp.Body.Close() + return nil, qm, nil + } else if resp.StatusCode != 200 { + resp.Body.Close() + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + return resp, qm, nil +} + +// Put is used to write a new value. Only the +// Key, Flags and Value is respected. +func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { + params := make(map[string]string, 1) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + _, wm, err := k.put(p.Key, params, p.Value, q) + return wm, err +} + +// CAS is used for a Check-And-Set operation. The Key, +// ModifyIndex, Flags and Value are respected. Returns true +// on success or false on failures. +func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) + return k.put(p.Key, params, p.Value, q) +} + +// Acquire is used for a lock acquisition operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["acquire"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +// Release is used for a lock release operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["release"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { + if len(key) > 0 && key[0] == '/' { + return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key) + } + + r := k.c.newRequest("PUT", "/v1/kv/"+key) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + r.body = bytes.NewReader(body) + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + return res, qm, nil +} + +// Delete is used to delete a single key +func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(key, nil, w) + return qm, err +} + +// DeleteCAS is used for a Delete Check-And-Set operation. The Key +// and ModifyIndex are respected. Returns true on success or false on failures. +func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := map[string]string{ + "cas": strconv.FormatUint(p.ModifyIndex, 10), + } + return k.deleteInternal(p.Key, params, q) +} + +// DeleteTree is used to delete all keys under a prefix +func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w) + return qm, err +} + +func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { + r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/")) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + return res, qm, nil +} + +// The Txn function has been deprecated from the KV object; please see the Txn +// object for more information about Transactions. +func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) { + var ops TxnOps + for _, op := range txn { + ops = append(ops, &TxnOp{KV: op}) + } + + respOk, txnResp, qm, err := k.c.txn(ops, q) + if err != nil { + return false, nil, nil, err + } + + // Convert from the internal format. + kvResp := KVTxnResponse{ + Errors: txnResp.Errors, + } + for _, result := range txnResp.Results { + kvResp.Results = append(kvResp.Results, result.KV) + } + return respOk, &kvResp, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go new file mode 100644 index 000000000..82339cb74 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/lock.go @@ -0,0 +1,386 @@ +package api + +import ( + "fmt" + "sync" + "time" +) + +const ( + // DefaultLockSessionName is the Session Name we assign if none is provided + DefaultLockSessionName = "Consul API Lock" + + // DefaultLockSessionTTL is the default session TTL if no Session is provided + // when creating a new Lock. This is used because we do not have another + // other check to depend upon. + DefaultLockSessionTTL = "15s" + + // DefaultLockWaitTime is how long we block for at a time to check if lock + // acquisition is possible. This affects the minimum time it takes to cancel + // a Lock acquisition. + DefaultLockWaitTime = 15 * time.Second + + // DefaultLockRetryTime is how long we wait after a failed lock acquisition + // before attempting to do the lock again. This is so that once a lock-delay + // is in effect, we do not hot loop retrying the acquisition. + DefaultLockRetryTime = 5 * time.Second + + // DefaultMonitorRetryTime is how long we wait after a failed monitor check + // of a lock (500 response code). This allows the monitor to ride out brief + // periods of unavailability, subject to the MonitorRetries setting in the + // lock options which is by default set to 0, disabling this feature. This + // affects locks and semaphores. + DefaultMonitorRetryTime = 2 * time.Second + + // LockFlagValue is a magic flag we set to indicate a key + // is being used for a lock. It is used to detect a potential + // conflict with a semaphore. + LockFlagValue = 0x2ddccbc058a50c18 +) + +var ( + // ErrLockHeld is returned if we attempt to double lock + ErrLockHeld = fmt.Errorf("Lock already held") + + // ErrLockNotHeld is returned if we attempt to unlock a lock + // that we do not hold. + ErrLockNotHeld = fmt.Errorf("Lock not held") + + // ErrLockInUse is returned if we attempt to destroy a lock + // that is in use. + ErrLockInUse = fmt.Errorf("Lock in use") + + // ErrLockConflict is returned if the flags on a key + // used for a lock do not match expectation + ErrLockConflict = fmt.Errorf("Existing key does not match lock use") +) + +// Lock is used to implement client-side leader election. It is follows the +// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html. +type Lock struct { + c *Client + opts *LockOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// LockOptions is used to parameterize the Lock behavior. +type LockOptions struct { + Key string // Must be set and have write permissions + Value []byte // Optional, value to associate with the lock + Session string // Optional, created if not specified + SessionOpts *SessionEntry // Optional, options to use when creating a session + SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given) + SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given) + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime + LockTryOnce bool // Optional, defaults to false which means try forever +} + +// LockKey returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockKey(key string) (*Lock, error) { + opts := &LockOptions{ + Key: key, + } + return c.LockOpts(opts) +} + +// LockOpts returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { + if opts.Key == "" { + return nil, fmt.Errorf("missing key") + } + if opts.SessionName == "" { + opts.SessionName = DefaultLockSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultLockSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.LockWaitTime == 0 { + opts.LockWaitTime = DefaultLockWaitTime + } + l := &Lock{ + c: c, + opts: opts, + } + return l, nil +} + +// Lock attempts to acquire the lock and blocks while doing so. +// Providing a non-nil stopCh can be used to abort the lock attempt. +// Returns a channel that is closed if our lock is lost or an error. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the lock is held until Unlock() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the lock being lost. +func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return nil, ErrLockHeld + } + + // Check if we need to create a session first + l.lockSession = l.opts.Session + if l.lockSession == "" { + s, err := l.createSession() + if err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } + + l.sessionRenew = make(chan struct{}) + l.lockSession = s + session := l.c.Session() + go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !l.isHeld { + close(l.sessionRenew) + l.sessionRenew = nil + } + }() + } + + // Setup the query options + kv := l.c.KV() + qOpts := &QueryOptions{ + WaitTime: l.opts.LockWaitTime, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if l.opts.LockTryOnce && attempts > 0 { + elapsed := time.Since(start) + if elapsed > l.opts.LockWaitTime { + return nil, nil + } + + // Query wait time should not exceed the lock wait time + qOpts.WaitTime = l.opts.LockWaitTime - elapsed + } + attempts++ + + // Look for an existing lock, blocking until not taken + pair, meta, err := kv.Get(l.opts.Key, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read lock: %v", err) + } + if pair != nil && pair.Flags != LockFlagValue { + return nil, ErrLockConflict + } + locked := false + if pair != nil && pair.Session == l.lockSession { + goto HELD + } + if pair != nil && pair.Session != "" { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Try to acquire the lock + pair = l.lockEntry(l.lockSession) + locked, _, err = kv.Acquire(pair, nil) + if err != nil { + return nil, fmt.Errorf("failed to acquire lock: %v", err) + } + + // Handle the case of not getting the lock + if !locked { + // Determine why the lock failed + qOpts.WaitIndex = 0 + pair, meta, err = kv.Get(l.opts.Key, qOpts) + if pair != nil && pair.Session != "" { + //If the session is not null, this means that a wait can safely happen + //using a long poll + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } else { + // If the session is empty and the lock failed to acquire, then it means + // a lock-delay is in effect and a timed wait must be used + select { + case <-time.After(DefaultLockRetryTime): + goto WAIT + case <-stopCh: + return nil, nil + } + } + } + +HELD: + // Watch to ensure we maintain leadership + leaderCh := make(chan struct{}) + go l.monitorLock(l.lockSession, leaderCh) + + // Set that we own the lock + l.isHeld = true + + // Locked! All done + return leaderCh, nil +} + +// Unlock released the lock. It is an error to call this +// if the lock is not currently held. +func (l *Lock) Unlock() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Ensure the lock is actually held + if !l.isHeld { + return ErrLockNotHeld + } + + // Set that we no longer own the lock + l.isHeld = false + + // Stop the session renew + if l.sessionRenew != nil { + defer func() { + close(l.sessionRenew) + l.sessionRenew = nil + }() + } + + // Get the lock entry, and clear the lock session + lockEnt := l.lockEntry(l.lockSession) + l.lockSession = "" + + // Release the lock explicitly + kv := l.c.KV() + _, _, err := kv.Release(lockEnt, nil) + if err != nil { + return fmt.Errorf("failed to release lock: %v", err) + } + return nil +} + +// Destroy is used to cleanup the lock entry. It is not necessary +// to invoke. It will fail if the lock is in use. +func (l *Lock) Destroy() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return ErrLockHeld + } + + // Look for an existing lock + kv := l.c.KV() + pair, _, err := kv.Get(l.opts.Key, nil) + if err != nil { + return fmt.Errorf("failed to read lock: %v", err) + } + + // Nothing to do if the lock does not exist + if pair == nil { + return nil + } + + // Check for possible flag conflict + if pair.Flags != LockFlagValue { + return ErrLockConflict + } + + // Check if it is in use + if pair.Session != "" { + return ErrLockInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(pair, nil) + if err != nil { + return fmt.Errorf("failed to remove lock: %v", err) + } + if !didRemove { + return ErrLockInUse + } + return nil +} + +// createSession is used to create a new managed session +func (l *Lock) createSession() (string, error) { + session := l.c.Session() + se := l.opts.SessionOpts + if se == nil { + se = &SessionEntry{ + Name: l.opts.SessionName, + TTL: l.opts.SessionTTL, + } + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// lockEntry returns a formatted KVPair for the lock +func (l *Lock) lockEntry(session string) *KVPair { + return &KVPair{ + Key: l.opts.Key, + Value: l.opts.Value, + Session: session, + Flags: LockFlagValue, + } +} + +// monitorLock is a long running routine to monitor a lock ownership +// It closes the stopCh if we lose our leadership. +func (l *Lock) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := l.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + retries := l.opts.MonitorRetries +RETRY: + pair, meta, err := kv.Get(l.opts.Key, opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsRetryableError(err) { + time.Sleep(l.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + if pair != nil && pair.Session == session { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go new file mode 100644 index 000000000..079e22486 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator.go @@ -0,0 +1,11 @@ +package api + +// Operator can be used to perform low-level operator tasks for Consul. +type Operator struct { + c *Client +} + +// Operator returns a handle to the operator endpoints. +func (c *Client) Operator() *Operator { + return &Operator{c} +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go new file mode 100644 index 000000000..5cf7e4973 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_area.go @@ -0,0 +1,194 @@ +package api + +// The /v1/operator/area endpoints are available only in Consul Enterprise and +// interact with its network area subsystem. Network areas are used to link +// together Consul servers in different Consul datacenters. With network areas, +// Consul datacenters can be linked together in ways other than a fully-connected +// mesh, as is required for Consul's WAN. + +import ( + "net" + "time" +) + +// Area defines a network area. +type Area struct { + // ID is this identifier for an area (a UUID). This must be left empty + // when creating a new area. + ID string + + // PeerDatacenter is the peer Consul datacenter that will make up the + // other side of this network area. Network areas always involve a pair + // of datacenters: the datacenter where the area was created, and the + // peer datacenter. This is required. + PeerDatacenter string + + // RetryJoin specifies the address of Consul servers to join to, such as + // an IPs or hostnames with an optional port number. This is optional. + RetryJoin []string + + // UseTLS specifies whether gossip over this area should be encrypted with TLS + // if possible. + UseTLS bool +} + +// AreaJoinResponse is returned when a join occurs and gives the result for each +// address. +type AreaJoinResponse struct { + // The address that was joined. + Address string + + // Whether or not the join was a success. + Joined bool + + // If we couldn't join, this is the message with information. + Error string +} + +// SerfMember is a generic structure for reporting information about members in +// a Serf cluster. This is only used by the area endpoints right now, but this +// could be expanded to other endpoints in the future. +type SerfMember struct { + // ID is the node identifier (a UUID). + ID string + + // Name is the node name. + Name string + + // Addr has the IP address. + Addr net.IP + + // Port is the RPC port. + Port uint16 + + // Datacenter is the DC name. + Datacenter string + + // Role is "client", "server", or "unknown". + Role string + + // Build has the version of the Consul agent. + Build string + + // Protocol is the protocol of the Consul agent. + Protocol int + + // Status is the Serf health status "none", "alive", "leaving", "left", + // or "failed". + Status string + + // RTT is the estimated round trip time from the server handling the + // request to the this member. This will be negative if no RTT estimate + // is available. + RTT time.Duration +} + +// AreaCreate will create a new network area. The ID in the given structure must +// be empty and a generated ID will be returned on success. +func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) { + r := op.c.newRequest("POST", "/v1/operator/area") + r.setWriteOptions(q) + r.obj = area + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// AreaUpdate will update the configuration of the network area with the given ID. +func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (string, *WriteMeta, error) { + r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID) + r.setWriteOptions(q) + r.obj = area + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// AreaGet returns a single network area. +func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) { + var out []*Area + qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// AreaList returns all the available network areas. +func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) { + var out []*Area + qm, err := op.c.query("/v1/operator/area", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// AreaDelete deletes the given network area. +func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) { + r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +// AreaJoin attempts to join the given set of join addresses to the given +// network area. See the Area structure for details about join addresses. +func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) { + r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join") + r.setWriteOptions(q) + r.obj = addresses + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out []*AreaJoinResponse + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, wm, nil +} + +// AreaMembers lists the Serf information about the members in the given area. +func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) { + var out []*SerfMember + qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go new file mode 100644 index 000000000..b179406dc --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go @@ -0,0 +1,219 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// AutopilotConfiguration is used for querying/setting the Autopilot configuration. +// Autopilot helps manage operator tasks related to Consul servers like removing +// failed servers from the Raft quorum. +type AutopilotConfiguration struct { + // CleanupDeadServers controls whether to remove dead servers from the Raft + // peer list when a new server joins + CleanupDeadServers bool + + // LastContactThreshold is the limit on the amount of time a server can go + // without leader contact before being considered unhealthy. + LastContactThreshold *ReadableDuration + + // MaxTrailingLogs is the amount of entries in the Raft Log that a server can + // be behind before being considered unhealthy. + MaxTrailingLogs uint64 + + // ServerStabilizationTime is the minimum amount of time a server must be + // in a stable, healthy state before it can be added to the cluster. Only + // applicable with Raft protocol version 3 or higher. + ServerStabilizationTime *ReadableDuration + + // (Enterprise-only) RedundancyZoneTag is the node tag to use for separating + // servers into zones for redundancy. If left blank, this feature will be disabled. + RedundancyZoneTag string + + // (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration + // strategy of waiting until enough newer-versioned servers have been added to the + // cluster before promoting them to voters. + DisableUpgradeMigration bool + + // (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when + // performing upgrade migrations. If left blank, the Consul version will be used. + UpgradeVersionTag string + + // CreateIndex holds the index corresponding the creation of this configuration. + // This is a read-only field. + CreateIndex uint64 + + // ModifyIndex will be set to the index of the last update when retrieving the + // Autopilot configuration. Resubmitting a configuration with + // AutopilotCASConfiguration will perform a check-and-set operation which ensures + // there hasn't been a subsequent update since the configuration was retrieved. + ModifyIndex uint64 +} + +// ServerHealth is the health (from the leader's point of view) of a server. +type ServerHealth struct { + // ID is the raft ID of the server. + ID string + + // Name is the node name of the server. + Name string + + // Address is the address of the server. + Address string + + // The status of the SerfHealth check for the server. + SerfStatus string + + // Version is the Consul version of the server. + Version string + + // Leader is whether this server is currently the leader. + Leader bool + + // LastContact is the time since this node's last contact with the leader. + LastContact *ReadableDuration + + // LastTerm is the highest leader term this server has a record of in its Raft log. + LastTerm uint64 + + // LastIndex is the last log index this server has a record of in its Raft log. + LastIndex uint64 + + // Healthy is whether or not the server is healthy according to the current + // Autopilot config. + Healthy bool + + // Voter is whether this is a voting server. + Voter bool + + // StableSince is the last time this server's Healthy value changed. + StableSince time.Time +} + +// OperatorHealthReply is a representation of the overall health of the cluster +type OperatorHealthReply struct { + // Healthy is true if all the servers in the cluster are healthy. + Healthy bool + + // FailureTolerance is the number of healthy servers that could be lost without + // an outage occurring. + FailureTolerance int + + // Servers holds the health of each server. + Servers []ServerHealth +} + +// ReadableDuration is a duration type that is serialized to JSON in human readable format. +type ReadableDuration time.Duration + +func NewReadableDuration(dur time.Duration) *ReadableDuration { + d := ReadableDuration(dur) + return &d +} + +func (d *ReadableDuration) String() string { + return d.Duration().String() +} + +func (d *ReadableDuration) Duration() time.Duration { + if d == nil { + return time.Duration(0) + } + return time.Duration(*d) +} + +func (d *ReadableDuration) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil +} + +func (d *ReadableDuration) UnmarshalJSON(raw []byte) error { + if d == nil { + return fmt.Errorf("cannot unmarshal to nil pointer") + } + + str := string(raw) + if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' { + return fmt.Errorf("must be enclosed with quotes: %s", str) + } + dur, err := time.ParseDuration(str[1 : len(str)-1]) + if err != nil { + return err + } + *d = ReadableDuration(dur) + return nil +} + +// AutopilotGetConfiguration is used to query the current Autopilot configuration. +func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out AutopilotConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + + return &out, nil +} + +// AutopilotSetConfiguration is used to set the current Autopilot configuration. +func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") + r.setWriteOptions(q) + r.obj = conf + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// AutopilotCASConfiguration is used to perform a Check-And-Set update on the +// Autopilot configuration. The ModifyIndex value will be respected. Returns +// true on success or false on failures. +func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) { + r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") + r.setWriteOptions(q) + r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10)) + r.obj = conf + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return false, err + } + defer resp.Body.Close() + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + + return res, nil +} + +// AutopilotServerHealth +func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/health") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out OperatorHealthReply + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go new file mode 100644 index 000000000..038d5d5b0 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_keyring.go @@ -0,0 +1,89 @@ +package api + +// keyringRequest is used for performing Keyring operations +type keyringRequest struct { + Key string +} + +// KeyringResponse is returned when listing the gossip encryption keys +type KeyringResponse struct { + // Whether this response is for a WAN ring + WAN bool + + // The datacenter name this request corresponds to + Datacenter string + + // Segment has the network segment this request corresponds to. + Segment string + + // Messages has information or errors from serf + Messages map[string]string `json:",omitempty"` + + // A map of the encryption keys to the number of nodes they're installed on + Keys map[string]int + + // The total number of nodes in this ring + NumNodes int +} + +// KeyringInstall is used to install a new gossip encryption key into the cluster +func (op *Operator) KeyringInstall(key string, q *WriteOptions) error { + r := op.c.newRequest("POST", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringList is used to list the gossip keys installed in the cluster +func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) { + r := op.c.newRequest("GET", "/v1/operator/keyring") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*KeyringResponse + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// KeyringRemove is used to remove a gossip encryption key from the cluster +func (op *Operator) KeyringRemove(key string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringUse is used to change the active gossip encryption key +func (op *Operator) KeyringUse(key string, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go new file mode 100644 index 000000000..a9844df2d --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go @@ -0,0 +1,89 @@ +package api + +// RaftServer has information about a server in the Raft configuration. +type RaftServer struct { + // ID is the unique ID for the server. These are currently the same + // as the address, but they will be changed to a real GUID in a future + // release of Consul. + ID string + + // Node is the node name of the server, as known by Consul, or this + // will be set to "(unknown)" otherwise. + Node string + + // Address is the IP:port of the server, used for Raft communications. + Address string + + // Leader is true if this server is the current cluster leader. + Leader bool + + // Protocol version is the raft protocol version used by the server + ProtocolVersion string + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online, or if + // it's a non-voting server, which will be added in a future release of + // Consul. + Voter bool +} + +// RaftConfiguration is returned when querying for the current Raft configuration. +type RaftConfiguration struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer + + // Index has the Raft index of this configuration. + Index uint64 +} + +// RaftGetConfiguration is used to query the current Raft peer set. +func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { + r := op.c.newRequest("GET", "/v1/operator/raft/configuration") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out RaftConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} + +// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by address in the form of +// "IP:port". +func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + r.setWriteOptions(q) + + r.params.Set("address", string(address)) + + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + + resp.Body.Close() + return nil +} + +// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by ID. +func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + r.setWriteOptions(q) + + r.params.Set("id", string(id)) + + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_segment.go b/vendor/github.com/hashicorp/consul/api/operator_segment.go new file mode 100644 index 000000000..92b05d3c0 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_segment.go @@ -0,0 +1,11 @@ +package api + +// SegmentList returns all the available LAN segments. +func (op *Operator) SegmentList(q *QueryOptions) ([]string, *QueryMeta, error) { + var out []string + qm, err := op.c.query("/v1/operator/segment", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go new file mode 100644 index 000000000..020458116 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go @@ -0,0 +1,217 @@ +package api + +// QueryDatacenterOptions sets options about how we fail over if there are no +// healthy nodes in the local datacenter. +type QueryDatacenterOptions struct { + // NearestN is set to the number of remote datacenters to try, based on + // network coordinates. + NearestN int + + // Datacenters is a fixed list of datacenters to try after NearestN. We + // never try a datacenter multiple times, so those are subtracted from + // this list before proceeding. + Datacenters []string +} + +// QueryDNSOptions controls settings when query results are served over DNS. +type QueryDNSOptions struct { + // TTL is the time to live for the served DNS results. + TTL string +} + +// ServiceQuery is used to query for a set of healthy nodes offering a specific +// service. +type ServiceQuery struct { + // Service is the service to query. + Service string + + // Near allows baking in the name of a node to automatically distance- + // sort from. The magic "_agent" value is supported, which sorts near + // the agent which initiated the request by default. + Near string + + // Failover controls what we do if there are no healthy nodes in the + // local datacenter. + Failover QueryDatacenterOptions + + // IgnoreCheckIDs is an optional list of health check IDs to ignore when + // considering which nodes are healthy. It is useful as an emergency measure + // to temporarily override some health check that is producing false negatives + // for example. + IgnoreCheckIDs []string + + // If OnlyPassing is true then we will only include nodes with passing + // health checks (critical AND warning checks will cause a node to be + // discarded) + OnlyPassing bool + + // Tags are a set of required and/or disallowed tags. If a tag is in + // this list it must be present. If the tag is preceded with "!" then + // it is disallowed. + Tags []string + + // NodeMeta is a map of required node metadata fields. If a key/value + // pair is in this map it must be present on the node in order for the + // service entry to be returned. + NodeMeta map[string]string + + // ServiceMeta is a map of required service metadata fields. If a key/value + // pair is in this map it must be present on the node in order for the + // service entry to be returned. + ServiceMeta map[string]string + + // Connect if true will filter the prepared query results to only + // include Connect-capable services. These include both native services + // and proxies for matching services. Note that if a proxy matches, + // the constraints in the query above (Near, OnlyPassing, etc.) apply + // to the _proxy_ and not the service being proxied. In practice, proxies + // should be directly next to their services so this isn't an issue. + Connect bool +} + +// QueryTemplate carries the arguments for creating a templated query. +type QueryTemplate struct { + // Type specifies the type of the query template. Currently only + // "name_prefix_match" is supported. This field is required. + Type string + + // Regexp allows specifying a regex pattern to match against the name + // of the query being executed. + Regexp string +} + +// PreparedQueryDefinition defines a complete prepared query. +type PreparedQueryDefinition struct { + // ID is this UUID-based ID for the query, always generated by Consul. + ID string + + // Name is an optional friendly name for the query supplied by the + // user. NOTE - if this feature is used then it will reduce the security + // of any read ACL associated with this query/service since this name + // can be used to locate nodes with supplying any ACL. + Name string + + // Session is an optional session to tie this query's lifetime to. If + // this is omitted then the query will not expire. + Session string + + // Token is the ACL token used when the query was created, and it is + // used when a query is subsequently executed. This token, or a token + // with management privileges, must be used to change the query later. + Token string + + // Service defines a service query (leaving things open for other types + // later). + Service ServiceQuery + + // DNS has options that control how the results of this query are + // served over DNS. + DNS QueryDNSOptions + + // Template is used to pass through the arguments for creating a + // prepared query with an attached template. If a template is given, + // interpolations are possible in other struct fields. + Template QueryTemplate +} + +// PreparedQueryExecuteResponse has the results of executing a query. +type PreparedQueryExecuteResponse struct { + // Service is the service that was queried. + Service string + + // Nodes has the nodes that were output by the query. + Nodes []ServiceEntry + + // DNS has the options for serving these results over DNS. + DNS QueryDNSOptions + + // Datacenter is the datacenter that these results came from. + Datacenter string + + // Failovers is a count of how many times we had to query a remote + // datacenter. + Failovers int +} + +// PreparedQuery can be used to query the prepared query endpoints. +type PreparedQuery struct { + c *Client +} + +// PreparedQuery returns a handle to the prepared query endpoints. +func (c *Client) PreparedQuery() *PreparedQuery { + return &PreparedQuery{c} +} + +// Create makes a new prepared query. The ID of the new query is returned. +func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) { + r := c.c.newRequest("POST", "/v1/query") + r.setWriteOptions(q) + r.obj = query + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update makes updates to an existing prepared query. +func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) { + return c.c.write("/v1/query/"+query.ID, query, nil, q) +} + +// List is used to fetch all the prepared queries (always requires a management +// token). +func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Get is used to fetch a specific prepared query. +func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query/"+queryID, &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Delete is used to delete a specific prepared query. +func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("DELETE", "/v1/query/"+queryID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +// Execute is used to execute a specific prepared query. You can execute using +// a query ID or name. +func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) { + var out *PreparedQueryExecuteResponse + qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go new file mode 100644 index 000000000..745a208c9 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/raw.go @@ -0,0 +1,24 @@ +package api + +// Raw can be used to do raw queries against custom endpoints +type Raw struct { + c *Client +} + +// Raw returns a handle to query endpoints +func (c *Client) Raw() *Raw { + return &Raw{c} +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + return raw.c.query(endpoint, out, q) +} + +// Write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + return raw.c.write(endpoint, in, out, q) +} diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go new file mode 100644 index 000000000..bc4f885fe --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/semaphore.go @@ -0,0 +1,514 @@ +package api + +import ( + "encoding/json" + "fmt" + "path" + "sync" + "time" +) + +const ( + // DefaultSemaphoreSessionName is the Session Name we assign if none is provided + DefaultSemaphoreSessionName = "Consul API Semaphore" + + // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided + // when creating a new Semaphore. This is used because we do not have another + // other check to depend upon. + DefaultSemaphoreSessionTTL = "15s" + + // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore + // acquisition is possible. This affects the minimum time it takes to cancel + // a Semaphore acquisition. + DefaultSemaphoreWaitTime = 15 * time.Second + + // DefaultSemaphoreKey is the key used within the prefix to + // use for coordination between all the contenders. + DefaultSemaphoreKey = ".lock" + + // SemaphoreFlagValue is a magic flag we set to indicate a key + // is being used for a semaphore. It is used to detect a potential + // conflict with a lock. + SemaphoreFlagValue = 0xe0f69a2baa414de0 +) + +var ( + // ErrSemaphoreHeld is returned if we attempt to double lock + ErrSemaphoreHeld = fmt.Errorf("Semaphore already held") + + // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore + // that we do not hold. + ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held") + + // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore + // that is in use. + ErrSemaphoreInUse = fmt.Errorf("Semaphore in use") + + // ErrSemaphoreConflict is returned if the flags on a key + // used for a semaphore do not match expectation + ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use") +) + +// Semaphore is used to implement a distributed semaphore +// using the Consul KV primitives. +type Semaphore struct { + c *Client + opts *SemaphoreOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// SemaphoreOptions is used to parameterize the Semaphore +type SemaphoreOptions struct { + Prefix string // Must be set and have write permissions + Limit int // Must be set, and be positive + Value []byte // Optional, value to associate with the contender entry + Session string // Optional, created if not specified + SessionName string // Optional, defaults to DefaultLockSessionName + SessionTTL string // Optional, defaults to DefaultLockSessionTTL + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime + SemaphoreTryOnce bool // Optional, defaults to false which means try forever +} + +// semaphoreLock is written under the DefaultSemaphoreKey and +// is used to coordinate between all the contenders. +type semaphoreLock struct { + // Limit is the integer limit of holders. This is used to + // verify that all the holders agree on the value. + Limit int + + // Holders is a list of all the semaphore holders. + // It maps the session ID to true. It is used as a set effectively. + Holders map[string]bool +} + +// SemaphorePrefix is used to created a Semaphore which will operate +// at the given KV prefix and uses the given limit for the semaphore. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. +func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) { + opts := &SemaphoreOptions{ + Prefix: prefix, + Limit: limit, + } + return c.SemaphoreOpts(opts) +} + +// SemaphoreOpts is used to create a Semaphore with the given options. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. If a Session is not provided, one will be created. +func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { + if opts.Prefix == "" { + return nil, fmt.Errorf("missing prefix") + } + if opts.Limit <= 0 { + return nil, fmt.Errorf("semaphore limit must be positive") + } + if opts.SessionName == "" { + opts.SessionName = DefaultSemaphoreSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultSemaphoreSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.SemaphoreWaitTime == 0 { + opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime + } + s := &Semaphore{ + c: c, + opts: opts, + } + return s, nil +} + +// Acquire attempts to reserve a slot in the semaphore, blocking until +// success, interrupted via the stopCh or an error is encountered. +// Providing a non-nil stopCh can be used to abort the attempt. +// On success, a channel is returned that represents our slot. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the slot is held until Release() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the session being lost. +func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return nil, ErrSemaphoreHeld + } + + // Check if we need to create a session first + s.lockSession = s.opts.Session + if s.lockSession == "" { + sess, err := s.createSession() + if err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } + + s.sessionRenew = make(chan struct{}) + s.lockSession = sess + session := s.c.Session() + go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !s.isHeld { + close(s.sessionRenew) + s.sessionRenew = nil + } + }() + } + + // Create the contender entry + kv := s.c.KV() + made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil) + if err != nil || !made { + return nil, fmt.Errorf("failed to make contender entry: %v", err) + } + + // Setup the query options + qOpts := &QueryOptions{ + WaitTime: s.opts.SemaphoreWaitTime, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if s.opts.SemaphoreTryOnce && attempts > 0 { + elapsed := time.Since(start) + if elapsed > s.opts.SemaphoreWaitTime { + return nil, nil + } + + // Query wait time should not exceed the semaphore wait time + qOpts.WaitTime = s.opts.SemaphoreWaitTime - elapsed + } + attempts++ + + // Read the prefix + pairs, meta, err := kv.List(s.opts.Prefix, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read prefix: %v", err) + } + + // Decode the lock + lockPair := s.findLock(pairs) + if lockPair.Flags != SemaphoreFlagValue { + return nil, ErrSemaphoreConflict + } + lock, err := s.decodeLock(lockPair) + if err != nil { + return nil, err + } + + // Verify we agree with the limit + if lock.Limit != s.opts.Limit { + return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)", + lock.Limit, s.opts.Limit) + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if the lock is held + if len(lock.Holders) >= lock.Limit { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Create a new lock with us as a holder + lock.Holders[s.lockSession] = true + newLock, err := s.encodeLock(lock, lockPair.ModifyIndex) + if err != nil { + return nil, err + } + + // Attempt the acquisition + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return nil, fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + // Update failed, could have been a race with another contender, + // retry the operation + goto WAIT + } + + // Watch to ensure we maintain ownership of the slot + lockCh := make(chan struct{}) + go s.monitorLock(s.lockSession, lockCh) + + // Set that we own the lock + s.isHeld = true + + // Acquired! All done + return lockCh, nil +} + +// Release is used to voluntarily give up our semaphore slot. It is +// an error to call this if the semaphore has not been acquired. +func (s *Semaphore) Release() error { + // Hold the lock as we try to release + s.l.Lock() + defer s.l.Unlock() + + // Ensure the lock is actually held + if !s.isHeld { + return ErrSemaphoreNotHeld + } + + // Set that we no longer own the lock + s.isHeld = false + + // Stop the session renew + if s.sessionRenew != nil { + defer func() { + close(s.sessionRenew) + s.sessionRenew = nil + }() + } + + // Get and clear the lock session + lockSession := s.lockSession + s.lockSession = "" + + // Remove ourselves as a lock holder + kv := s.c.KV() + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) +READ: + pair, _, err := kv.Get(key, nil) + if err != nil { + return err + } + if pair == nil { + pair = &KVPair{} + } + lock, err := s.decodeLock(pair) + if err != nil { + return err + } + + // Create a new lock without us as a holder + if _, ok := lock.Holders[lockSession]; ok { + delete(lock.Holders, lockSession) + newLock, err := s.encodeLock(lock, pair.ModifyIndex) + if err != nil { + return err + } + + // Swap the locks + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + goto READ + } + } + + // Destroy the contender entry + contenderKey := path.Join(s.opts.Prefix, lockSession) + if _, err := kv.Delete(contenderKey, nil); err != nil { + return err + } + return nil +} + +// Destroy is used to cleanup the semaphore entry. It is not necessary +// to invoke. It will fail if the semaphore is in use. +func (s *Semaphore) Destroy() error { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return ErrSemaphoreHeld + } + + // List for the semaphore + kv := s.c.KV() + pairs, _, err := kv.List(s.opts.Prefix, nil) + if err != nil { + return fmt.Errorf("failed to read prefix: %v", err) + } + + // Find the lock pair, bail if it doesn't exist + lockPair := s.findLock(pairs) + if lockPair.ModifyIndex == 0 { + return nil + } + if lockPair.Flags != SemaphoreFlagValue { + return ErrSemaphoreConflict + } + + // Decode the lock + lock, err := s.decodeLock(lockPair) + if err != nil { + return err + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if there are any holders + if len(lock.Holders) > 0 { + return ErrSemaphoreInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(lockPair, nil) + if err != nil { + return fmt.Errorf("failed to remove semaphore: %v", err) + } + if !didRemove { + return ErrSemaphoreInUse + } + return nil +} + +// createSession is used to create a new managed session +func (s *Semaphore) createSession() (string, error) { + session := s.c.Session() + se := &SessionEntry{ + Name: s.opts.SessionName, + TTL: s.opts.SessionTTL, + Behavior: SessionBehaviorDelete, + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// contenderEntry returns a formatted KVPair for the contender +func (s *Semaphore) contenderEntry(session string) *KVPair { + return &KVPair{ + Key: path.Join(s.opts.Prefix, session), + Value: s.opts.Value, + Session: session, + Flags: SemaphoreFlagValue, + } +} + +// findLock is used to find the KV Pair which is used for coordination +func (s *Semaphore) findLock(pairs KVPairs) *KVPair { + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) + for _, pair := range pairs { + if pair.Key == key { + return pair + } + } + return &KVPair{Flags: SemaphoreFlagValue} +} + +// decodeLock is used to decode a semaphoreLock from an +// entry in Consul +func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) { + // Handle if there is no lock + if pair == nil || pair.Value == nil { + return &semaphoreLock{ + Limit: s.opts.Limit, + Holders: make(map[string]bool), + }, nil + } + + l := &semaphoreLock{} + if err := json.Unmarshal(pair.Value, l); err != nil { + return nil, fmt.Errorf("lock decoding failed: %v", err) + } + return l, nil +} + +// encodeLock is used to encode a semaphoreLock into a KVPair +// that can be PUT +func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) { + enc, err := json.Marshal(l) + if err != nil { + return nil, fmt.Errorf("lock encoding failed: %v", err) + } + pair := &KVPair{ + Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey), + Value: enc, + Flags: SemaphoreFlagValue, + ModifyIndex: oldIndex, + } + return pair, nil +} + +// pruneDeadHolders is used to remove all the dead lock holders +func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) { + // Gather all the live holders + alive := make(map[string]struct{}, len(pairs)) + for _, pair := range pairs { + if pair.Session != "" { + alive[pair.Session] = struct{}{} + } + } + + // Remove any holders that are dead + for holder := range lock.Holders { + if _, ok := alive[holder]; !ok { + delete(lock.Holders, holder) + } + } +} + +// monitorLock is a long running routine to monitor a semaphore ownership +// It closes the stopCh if we lose our slot. +func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := s.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + retries := s.opts.MonitorRetries +RETRY: + pairs, meta, err := kv.List(s.opts.Prefix, opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsRetryableError(err) { + time.Sleep(s.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + lockPair := s.findLock(pairs) + lock, err := s.decodeLock(lockPair) + if err != nil { + return + } + s.pruneDeadHolders(lock, pairs) + if _, ok := lock.Holders[session]; ok { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go new file mode 100644 index 000000000..1613f11a6 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/session.go @@ -0,0 +1,224 @@ +package api + +import ( + "errors" + "fmt" + "time" +) + +const ( + // SessionBehaviorRelease is the default behavior and causes + // all associated locks to be released on session invalidation. + SessionBehaviorRelease = "release" + + // SessionBehaviorDelete is new in Consul 0.5 and changes the + // behavior to delete all associated locks on session invalidation. + // It can be used in a way similar to Ephemeral Nodes in ZooKeeper. + SessionBehaviorDelete = "delete" +) + +var ErrSessionExpired = errors.New("session expired") + +// SessionEntry represents a session in consul +type SessionEntry struct { + CreateIndex uint64 + ID string + Name string + Node string + Checks []string + LockDelay time.Duration + Behavior string + TTL string +} + +// Session can be used to query the Session endpoints +type Session struct { + c *Client +} + +// Session returns a handle to the session endpoints +func (c *Client) Session() *Session { + return &Session{c} +} + +// CreateNoChecks is like Create but is used specifically to create +// a session with no associated health checks. +func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + body := make(map[string]interface{}) + body["Checks"] = []string{} + if se != nil { + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(body, q) + +} + +// Create makes a new session. Providing a session entry can +// customize the session. It can also be nil to use defaults. +func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + var obj interface{} + if se != nil { + body := make(map[string]interface{}) + obj = body + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if len(se.Checks) > 0 { + body["Checks"] = se.Checks + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(obj, q) +} + +func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { + var out struct{ ID string } + wm, err := s.c.write("/v1/session/create", obj, &out, q) + if err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Destroy invalidates a given session +func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Renew renews the TTL on a given session +func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { + r := s.c.newRequest("PUT", "/v1/session/renew/"+id) + r.setWriteOptions(q) + rtt, resp, err := s.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + + if resp.StatusCode == 404 { + return nil, wm, nil + } else if resp.StatusCode != 200 { + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + + var entries []*SessionEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + if len(entries) > 0 { + return entries[0], wm, nil + } + return nil, wm, nil +} + +// RenewPeriodic is used to periodically invoke Session.Renew on a +// session until a doneCh is closed. This is meant to be used in a long running +// goroutine to ensure a session stays valid. +func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh <-chan struct{}) error { + ctx := q.Context() + + ttl, err := time.ParseDuration(initialTTL) + if err != nil { + return err + } + + waitDur := ttl / 2 + lastRenewTime := time.Now() + var lastErr error + for { + if time.Since(lastRenewTime) > ttl { + return lastErr + } + select { + case <-time.After(waitDur): + entry, _, err := s.Renew(id, q) + if err != nil { + waitDur = time.Second + lastErr = err + continue + } + if entry == nil { + return ErrSessionExpired + } + + // Handle the server updating the TTL + ttl, _ = time.ParseDuration(entry.TTL) + waitDur = ttl / 2 + lastRenewTime = time.Now() + + case <-doneCh: + // Attempt a session destroy + s.Destroy(id, q) + return nil + + case <-ctx.Done(): + // Bail immediately since attempting the destroy would + // use the canceled context in q, which would just bail. + return ctx.Err() + } + } +} + +// Info looks up a single session +func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/info/"+id, &entries, q) + if err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List gets sessions for a node +func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/node/"+node, &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// List gets all active sessions +func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/list", &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go new file mode 100644 index 000000000..e902377dd --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/snapshot.go @@ -0,0 +1,47 @@ +package api + +import ( + "io" +) + +// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of +// Consul's internal state and restore snapshots for disaster recovery. +type Snapshot struct { + c *Client +} + +// Snapshot returns a handle that exposes the snapshot endpoints. +func (c *Client) Snapshot() *Snapshot { + return &Snapshot{c} +} + +// Save requests a new snapshot and provides an io.ReadCloser with the snapshot +// data to save. If this doesn't return an error, then it's the responsibility +// of the caller to close it. Only a subset of the QueryOptions are supported: +// Datacenter, AllowStale, and Token. +func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) { + r := s.c.newRequest("GET", "/v1/snapshot") + r.setQueryOptions(q) + + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + return resp.Body, qm, nil +} + +// Restore streams in an existing snapshot and attempts to restore it. +func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error { + r := s.c.newRequest("PUT", "/v1/snapshot") + r.body = in + r.setWriteOptions(q) + _, _, err := requireOK(s.c.doRequest(r)) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go new file mode 100644 index 000000000..74ef61a67 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/status.go @@ -0,0 +1,43 @@ +package api + +// Status can be used to query the Status endpoints +type Status struct { + c *Client +} + +// Status returns a handle to the status endpoints +func (c *Client) Status() *Status { + return &Status{c} +} + +// Leader is used to query for a known leader +func (s *Status) Leader() (string, error) { + r := s.c.newRequest("GET", "/v1/status/leader") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + var leader string + if err := decodeBody(resp, &leader); err != nil { + return "", err + } + return leader, nil +} + +// Peers is used to query for a known raft peers +func (s *Status) Peers() ([]string, error) { + r := s.c.newRequest("GET", "/v1/status/peers") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var peers []string + if err := decodeBody(resp, &peers); err != nil { + return nil, err + } + return peers, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/txn.go b/vendor/github.com/hashicorp/consul/api/txn.go new file mode 100644 index 000000000..65d7a16ea --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/txn.go @@ -0,0 +1,230 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "net/http" +) + +// Txn is used to manipulate the Txn API +type Txn struct { + c *Client +} + +// Txn is used to return a handle to the K/V apis +func (c *Client) Txn() *Txn { + return &Txn{c} +} + +// TxnOp is the internal format we send to Consul. Currently only K/V and +// check operations are supported. +type TxnOp struct { + KV *KVTxnOp + Node *NodeTxnOp + Service *ServiceTxnOp + Check *CheckTxnOp +} + +// TxnOps is a list of transaction operations. +type TxnOps []*TxnOp + +// TxnResult is the internal format we receive from Consul. +type TxnResult struct { + KV *KVPair + Node *Node + Service *CatalogService + Check *HealthCheck +} + +// TxnResults is a list of TxnResult objects. +type TxnResults []*TxnResult + +// TxnError is used to return information about an operation in a transaction. +type TxnError struct { + OpIndex int + What string +} + +// TxnErrors is a list of TxnError objects. +type TxnErrors []*TxnError + +// TxnResponse is the internal format we receive from Consul. +type TxnResponse struct { + Results TxnResults + Errors TxnErrors +} + +// KVOp constants give possible operations available in a transaction. +type KVOp string + +const ( + KVSet KVOp = "set" + KVDelete KVOp = "delete" + KVDeleteCAS KVOp = "delete-cas" + KVDeleteTree KVOp = "delete-tree" + KVCAS KVOp = "cas" + KVLock KVOp = "lock" + KVUnlock KVOp = "unlock" + KVGet KVOp = "get" + KVGetTree KVOp = "get-tree" + KVCheckSession KVOp = "check-session" + KVCheckIndex KVOp = "check-index" + KVCheckNotExists KVOp = "check-not-exists" +) + +// KVTxnOp defines a single operation inside a transaction. +type KVTxnOp struct { + Verb KVOp + Key string + Value []byte + Flags uint64 + Index uint64 + Session string +} + +// KVTxnOps defines a set of operations to be performed inside a single +// transaction. +type KVTxnOps []*KVTxnOp + +// KVTxnResponse has the outcome of a transaction. +type KVTxnResponse struct { + Results []*KVPair + Errors TxnErrors +} + +// NodeOp constants give possible operations available in a transaction. +type NodeOp string + +const ( + NodeGet NodeOp = "get" + NodeSet NodeOp = "set" + NodeCAS NodeOp = "cas" + NodeDelete NodeOp = "delete" + NodeDeleteCAS NodeOp = "delete-cas" +) + +// NodeTxnOp defines a single operation inside a transaction. +type NodeTxnOp struct { + Verb NodeOp + Node Node +} + +// ServiceOp constants give possible operations available in a transaction. +type ServiceOp string + +const ( + ServiceGet ServiceOp = "get" + ServiceSet ServiceOp = "set" + ServiceCAS ServiceOp = "cas" + ServiceDelete ServiceOp = "delete" + ServiceDeleteCAS ServiceOp = "delete-cas" +) + +// ServiceTxnOp defines a single operation inside a transaction. +type ServiceTxnOp struct { + Verb ServiceOp + Node string + Service AgentService +} + +// CheckOp constants give possible operations available in a transaction. +type CheckOp string + +const ( + CheckGet CheckOp = "get" + CheckSet CheckOp = "set" + CheckCAS CheckOp = "cas" + CheckDelete CheckOp = "delete" + CheckDeleteCAS CheckOp = "delete-cas" +) + +// CheckTxnOp defines a single operation inside a transaction. +type CheckTxnOp struct { + Verb CheckOp + Check HealthCheck +} + +// Txn is used to apply multiple Consul operations in a single, atomic transaction. +// +// Note that Go will perform the required base64 encoding on the values +// automatically because the type is a byte slice. Transactions are defined as a +// list of operations to perform, using the different fields in the TxnOp structure +// to define operations. If any operation fails, none of the changes are applied +// to the state store. +// +// Even though this is generally a write operation, we take a QueryOptions input +// and return a QueryMeta output. If the transaction contains only read ops, then +// Consul will fast-path it to a different endpoint internally which supports +// consistency controls, but not blocking. If there are write operations then +// the request will always be routed through raft and any consistency settings +// will be ignored. +// +// Here's an example: +// +// ops := KVTxnOps{ +// &KVTxnOp{ +// Verb: KVLock, +// Key: "test/lock", +// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e", +// Value: []byte("hello"), +// }, +// &KVTxnOp{ +// Verb: KVGet, +// Key: "another/key", +// }, +// &CheckTxnOp{ +// Verb: CheckSet, +// HealthCheck: HealthCheck{ +// Node: "foo", +// CheckID: "redis:a", +// Name: "Redis Health Check", +// Status: "passing", +// }, +// } +// } +// ok, response, _, err := kv.Txn(&ops, nil) +// +// If there is a problem making the transaction request then an error will be +// returned. Otherwise, the ok value will be true if the transaction succeeded +// or false if it was rolled back. The response is a structured return value which +// will have the outcome of the transaction. Its Results member will have entries +// for each operation. For KV operations, Deleted keys will have a nil entry in the +// results, and to save space, the Value of each key in the Results will be nil +// unless the operation is a KVGet. If the transaction was rolled back, the Errors +// member will have entries referencing the index of the operation that failed +// along with an error message. +func (t *Txn) Txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) { + return t.c.txn(txn, q) +} + +func (c *Client) txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) { + r := c.newRequest("PUT", "/v1/txn") + r.setQueryOptions(q) + + r.obj = txn + rtt, resp, err := c.doRequest(r) + if err != nil { + return false, nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict { + var txnResp TxnResponse + if err := decodeBody(resp, &txnResp); err != nil { + return false, nil, nil, err + } + + return resp.StatusCode == http.StatusOK, &txnResp, qm, nil + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String()) +} diff --git a/vendor/github.com/hashicorp/consul/api/watch/funcs.go b/vendor/github.com/hashicorp/consul/api/watch/funcs.go new file mode 100644 index 000000000..e9b2d8115 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/watch/funcs.go @@ -0,0 +1,349 @@ +package watch + +import ( + "context" + "fmt" + + consulapi "github.com/hashicorp/consul/api" +) + +// watchFactory is a function that can create a new WatchFunc +// from a parameter configuration +type watchFactory func(params map[string]interface{}) (WatcherFunc, error) + +// watchFuncFactory maps each type to a factory function +var watchFuncFactory map[string]watchFactory + +func init() { + watchFuncFactory = map[string]watchFactory{ + "key": keyWatch, + "keyprefix": keyPrefixWatch, + "services": servicesWatch, + "nodes": nodesWatch, + "service": serviceWatch, + "checks": checksWatch, + "event": eventWatch, + "connect_roots": connectRootsWatch, + "connect_leaf": connectLeafWatch, + "connect_proxy_config": connectProxyConfigWatch, + "agent_service": agentServiceWatch, + } +} + +// keyWatch is used to return a key watching function +func keyWatch(params map[string]interface{}) (WatcherFunc, error) { + stale := false + if err := assignValueBool(params, "stale", &stale); err != nil { + return nil, err + } + + var key string + if err := assignValue(params, "key", &key); err != nil { + return nil, err + } + if key == "" { + return nil, fmt.Errorf("Must specify a single key to watch") + } + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { + kv := p.client.KV() + opts := makeQueryOptionsWithContext(p, stale) + defer p.cancelFunc() + pair, meta, err := kv.Get(key, &opts) + if err != nil { + return nil, nil, err + } + if pair == nil { + return WaitIndexVal(meta.LastIndex), nil, err + } + return WaitIndexVal(meta.LastIndex), pair, err + } + return fn, nil +} + +// keyPrefixWatch is used to return a key prefix watching function +func keyPrefixWatch(params map[string]interface{}) (WatcherFunc, error) { + stale := false + if err := assignValueBool(params, "stale", &stale); err != nil { + return nil, err + } + + var prefix string + if err := assignValue(params, "prefix", &prefix); err != nil { + return nil, err + } + if prefix == "" { + return nil, fmt.Errorf("Must specify a single prefix to watch") + } + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { + kv := p.client.KV() + opts := makeQueryOptionsWithContext(p, stale) + defer p.cancelFunc() + pairs, meta, err := kv.List(prefix, &opts) + if err != nil { + return nil, nil, err + } + return WaitIndexVal(meta.LastIndex), pairs, err + } + return fn, nil +} + +// servicesWatch is used to watch the list of available services +func servicesWatch(params map[string]interface{}) (WatcherFunc, error) { + stale := false + if err := assignValueBool(params, "stale", &stale); err != nil { + return nil, err + } + + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { + catalog := p.client.Catalog() + opts := makeQueryOptionsWithContext(p, stale) + defer p.cancelFunc() + services, meta, err := catalog.Services(&opts) + if err != nil { + return nil, nil, err + } + return WaitIndexVal(meta.LastIndex), services, err + } + return fn, nil +} + +// nodesWatch is used to watch the list of available nodes +func nodesWatch(params map[string]interface{}) (WatcherFunc, error) { + stale := false + if err := assignValueBool(params, "stale", &stale); err != nil { + return nil, err + } + + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { + catalog := p.client.Catalog() + opts := makeQueryOptionsWithContext(p, stale) + defer p.cancelFunc() + nodes, meta, err := catalog.Nodes(&opts) + if err != nil { + return nil, nil, err + } + return WaitIndexVal(meta.LastIndex), nodes, err + } + return fn, nil +} + +// serviceWatch is used to watch a specific service for changes +func serviceWatch(params map[string]interface{}) (WatcherFunc, error) { + stale := false + if err := assignValueBool(params, "stale", &stale); err != nil { + return nil, err + } + + var ( + service string + tags []string + ) + if err := assignValue(params, "service", &service); err != nil { + return nil, err + } + if service == "" { + return nil, fmt.Errorf("Must specify a single service to watch") + } + if err := assignValueStringSlice(params, "tag", &tags); err != nil { + return nil, err + } + + passingOnly := false + if err := assignValueBool(params, "passingonly", &passingOnly); err != nil { + return nil, err + } + + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { + health := p.client.Health() + opts := makeQueryOptionsWithContext(p, stale) + defer p.cancelFunc() + nodes, meta, err := health.ServiceMultipleTags(service, tags, passingOnly, &opts) + if err != nil { + return nil, nil, err + } + return WaitIndexVal(meta.LastIndex), nodes, err + } + return fn, nil +} + +// checksWatch is used to watch a specific checks in a given state +func checksWatch(params map[string]interface{}) (WatcherFunc, error) { + stale := false + if err := assignValueBool(params, "stale", &stale); err != nil { + return nil, err + } + + var service, state string + if err := assignValue(params, "service", &service); err != nil { + return nil, err + } + if err := assignValue(params, "state", &state); err != nil { + return nil, err + } + if service != "" && state != "" { + return nil, fmt.Errorf("Cannot specify service and state") + } + if service == "" && state == "" { + state = "any" + } + + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { + health := p.client.Health() + opts := makeQueryOptionsWithContext(p, stale) + defer p.cancelFunc() + var checks []*consulapi.HealthCheck + var meta *consulapi.QueryMeta + var err error + if state != "" { + checks, meta, err = health.State(state, &opts) + } else { + checks, meta, err = health.Checks(service, &opts) + } + if err != nil { + return nil, nil, err + } + return WaitIndexVal(meta.LastIndex), checks, err + } + return fn, nil +} + +// eventWatch is used to watch for events, optionally filtering on name +func eventWatch(params map[string]interface{}) (WatcherFunc, error) { + // The stale setting doesn't apply to events. + + var name string + if err := assignValue(params, "name", &name); err != nil { + return nil, err + } + + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { + event := p.client.Event() + opts := makeQueryOptionsWithContext(p, false) + defer p.cancelFunc() + events, meta, err := event.List(name, &opts) + if err != nil { + return nil, nil, err + } + + // Prune to only the new events + for i := 0; i < len(events); i++ { + if WaitIndexVal(event.IDToIndex(events[i].ID)).Equal(p.lastParamVal) { + events = events[i+1:] + break + } + } + return WaitIndexVal(meta.LastIndex), events, err + } + return fn, nil +} + +// connectRootsWatch is used to watch for changes to Connect Root certificates. +func connectRootsWatch(params map[string]interface{}) (WatcherFunc, error) { + // We don't support stale since roots are cached locally in the agent. + + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { + agent := p.client.Agent() + opts := makeQueryOptionsWithContext(p, false) + defer p.cancelFunc() + + roots, meta, err := agent.ConnectCARoots(&opts) + if err != nil { + return nil, nil, err + } + + return WaitIndexVal(meta.LastIndex), roots, err + } + return fn, nil +} + +// connectLeafWatch is used to watch for changes to Connect Leaf certificates +// for given local service id. +func connectLeafWatch(params map[string]interface{}) (WatcherFunc, error) { + // We don't support stale since certs are cached locally in the agent. + + var serviceName string + if err := assignValue(params, "service", &serviceName); err != nil { + return nil, err + } + + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { + agent := p.client.Agent() + opts := makeQueryOptionsWithContext(p, false) + defer p.cancelFunc() + + leaf, meta, err := agent.ConnectCALeaf(serviceName, &opts) + if err != nil { + return nil, nil, err + } + + return WaitIndexVal(meta.LastIndex), leaf, err + } + return fn, nil +} + +// connectProxyConfigWatch is used to watch for changes to Connect managed proxy +// configuration. Note that this state is agent-local so the watch mechanism +// uses `hash` rather than `index` for deciding whether to block. +func connectProxyConfigWatch(params map[string]interface{}) (WatcherFunc, error) { + // We don't support consistency modes since it's agent local data + + var proxyServiceID string + if err := assignValue(params, "proxy_service_id", &proxyServiceID); err != nil { + return nil, err + } + + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { + agent := p.client.Agent() + opts := makeQueryOptionsWithContext(p, false) + defer p.cancelFunc() + + config, _, err := agent.ConnectProxyConfig(proxyServiceID, &opts) + if err != nil { + return nil, nil, err + } + + // Return string ContentHash since we don't have Raft indexes to block on. + return WaitHashVal(config.ContentHash), config, err + } + return fn, nil +} + +// agentServiceWatch is used to watch for changes to a single service instance +// on the local agent. Note that this state is agent-local so the watch +// mechanism uses `hash` rather than `index` for deciding whether to block. +func agentServiceWatch(params map[string]interface{}) (WatcherFunc, error) { + // We don't support consistency modes since it's agent local data + + var serviceID string + if err := assignValue(params, "service_id", &serviceID); err != nil { + return nil, err + } + + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { + agent := p.client.Agent() + opts := makeQueryOptionsWithContext(p, false) + defer p.cancelFunc() + + svc, _, err := agent.Service(serviceID, &opts) + if err != nil { + return nil, nil, err + } + + // Return string ContentHash since we don't have Raft indexes to block on. + return WaitHashVal(svc.ContentHash), svc, err + } + return fn, nil +} + +func makeQueryOptionsWithContext(p *Plan, stale bool) consulapi.QueryOptions { + ctx, cancel := context.WithCancel(context.Background()) + p.setCancelFunc(cancel) + opts := consulapi.QueryOptions{AllowStale: stale} + switch param := p.lastParamVal.(type) { + case WaitIndexVal: + opts.WaitIndex = uint64(param) + case WaitHashVal: + opts.WaitHash = string(param) + } + return *opts.WithContext(ctx) +} diff --git a/vendor/github.com/hashicorp/consul/api/watch/plan.go b/vendor/github.com/hashicorp/consul/api/watch/plan.go new file mode 100644 index 000000000..f0e8a6832 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/watch/plan.go @@ -0,0 +1,167 @@ +package watch + +import ( + "context" + "fmt" + "log" + "os" + "reflect" + "time" + + consulapi "github.com/hashicorp/consul/api" +) + +const ( + // retryInterval is the base retry value + retryInterval = 5 * time.Second + + // maximum back off time, this is to prevent + // exponential runaway + maxBackoffTime = 180 * time.Second +) + +func (p *Plan) Run(address string) error { + return p.RunWithConfig(address, nil) +} + +// Run is used to run a watch plan +func (p *Plan) RunWithConfig(address string, conf *consulapi.Config) error { + // Setup the client + p.address = address + if conf == nil { + conf = consulapi.DefaultConfig() + } + conf.Address = address + conf.Datacenter = p.Datacenter + conf.Token = p.Token + client, err := consulapi.NewClient(conf) + if err != nil { + return fmt.Errorf("Failed to connect to agent: %v", err) + } + + // Create the logger + output := p.LogOutput + if output == nil { + output = os.Stderr + } + logger := log.New(output, "", log.LstdFlags) + + return p.RunWithClientAndLogger(client, logger) +} + +// RunWithClientAndLogger runs a watch plan using an external client and +// log.Logger instance. Using this, the plan's Datacenter, Token and LogOutput +// fields are ignored and the passed client is expected to be configured as +// needed. +func (p *Plan) RunWithClientAndLogger(client *consulapi.Client, + logger *log.Logger) error { + + p.client = client + + // Loop until we are canceled + failures := 0 +OUTER: + for !p.shouldStop() { + // Invoke the handler + blockParamVal, result, err := p.Watcher(p) + + // Check if we should terminate since the function + // could have blocked for a while + if p.shouldStop() { + break + } + + // Handle an error in the watch function + if err != nil { + // Perform an exponential backoff + failures++ + if blockParamVal == nil { + p.lastParamVal = nil + } else { + p.lastParamVal = blockParamVal.Next(p.lastParamVal) + } + retry := retryInterval * time.Duration(failures*failures) + if retry > maxBackoffTime { + retry = maxBackoffTime + } + logger.Printf("[ERR] consul.watch: Watch (type: %s) errored: %v, retry in %v", + p.Type, err, retry) + select { + case <-time.After(retry): + continue OUTER + case <-p.stopCh: + return nil + } + } + + // Clear the failures + failures = 0 + + // If the index is unchanged do nothing + if p.lastParamVal != nil && p.lastParamVal.Equal(blockParamVal) { + continue + } + + // Update the index, look for change + oldParamVal := p.lastParamVal + p.lastParamVal = blockParamVal.Next(oldParamVal) + if oldParamVal != nil && reflect.DeepEqual(p.lastResult, result) { + continue + } + + // Handle the updated result + p.lastResult = result + // If a hybrid handler exists use that + if p.HybridHandler != nil { + p.HybridHandler(blockParamVal, result) + } else if p.Handler != nil { + idx, ok := blockParamVal.(WaitIndexVal) + if !ok { + logger.Printf("[ERR] consul.watch: Handler only supports index-based " + + " watches but non index-based watch run. Skipping Handler.") + } + p.Handler(uint64(idx), result) + } + } + return nil +} + +// Stop is used to stop running the watch plan +func (p *Plan) Stop() { + p.stopLock.Lock() + defer p.stopLock.Unlock() + if p.stop { + return + } + p.stop = true + if p.cancelFunc != nil { + p.cancelFunc() + } + close(p.stopCh) +} + +func (p *Plan) shouldStop() bool { + select { + case <-p.stopCh: + return true + default: + return false + } +} + +func (p *Plan) setCancelFunc(cancel context.CancelFunc) { + p.stopLock.Lock() + defer p.stopLock.Unlock() + if p.shouldStop() { + // The watch is stopped and execute the new cancel func to stop watchFactory + cancel() + return + } + p.cancelFunc = cancel +} + +func (p *Plan) IsStopped() bool { + p.stopLock.Lock() + defer p.stopLock.Unlock() + return p.stop +} diff --git a/vendor/github.com/hashicorp/consul/api/watch/watch.go b/vendor/github.com/hashicorp/consul/api/watch/watch.go new file mode 100644 index 000000000..3690a20c9 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/watch/watch.go @@ -0,0 +1,289 @@ +package watch + +import ( + "context" + "fmt" + "io" + "sync" + "time" + + consulapi "github.com/hashicorp/consul/api" + "github.com/mitchellh/mapstructure" +) + +const DefaultTimeout = 10 * time.Second + +// Plan is the parsed version of a watch specification. A watch provides +// the details of a query, which generates a view into the Consul data store. +// This view is watched for changes and a handler is invoked to take any +// appropriate actions. +type Plan struct { + Datacenter string + Token string + Type string + HandlerType string + Exempt map[string]interface{} + + Watcher WatcherFunc + // Handler is kept for backward compatibility but only supports watches based + // on index param. To support hash based watches, set HybridHandler instead. + Handler HandlerFunc + HybridHandler HybridHandlerFunc + LogOutput io.Writer + + address string + client *consulapi.Client + lastParamVal BlockingParamVal + lastResult interface{} + + stop bool + stopCh chan struct{} + stopLock sync.Mutex + cancelFunc context.CancelFunc +} + +type HttpHandlerConfig struct { + Path string `mapstructure:"path"` + Method string `mapstructure:"method"` + Timeout time.Duration `mapstructure:"-"` + TimeoutRaw string `mapstructure:"timeout"` + Header map[string][]string `mapstructure:"header"` + TLSSkipVerify bool `mapstructure:"tls_skip_verify"` +} + +// BlockingParamVal is an interface representing the common operations needed for +// different styles of blocking. It's used to abstract the core watch plan from +// whether we are performing index-based or hash-based blocking. +type BlockingParamVal interface { + // Equal returns whether the other param value should be considered equal + // (i.e. representing no change in the watched resource). Equal must not panic + // if other is nil. + Equal(other BlockingParamVal) bool + + // Next is called when deciding which value to use on the next blocking call. + // It assumes the BlockingParamVal value it is called on is the most recent one + // returned and passes the previous one which may be nil as context. This + // allows types to customize logic around ordering without assuming there is + // an order. For example WaitIndexVal can check that the index didn't go + // backwards and if it did then reset to 0. Most other cases should just + // return themselves (the most recent value) to be used in the next request. + Next(previous BlockingParamVal) BlockingParamVal +} + +// WaitIndexVal is a type representing a Consul index that implements +// BlockingParamVal. +type WaitIndexVal uint64 + +// Equal implements BlockingParamVal +func (idx WaitIndexVal) Equal(other BlockingParamVal) bool { + if otherIdx, ok := other.(WaitIndexVal); ok { + return idx == otherIdx + } + return false +} + +// Next implements BlockingParamVal +func (idx WaitIndexVal) Next(previous BlockingParamVal) BlockingParamVal { + if previous == nil { + return idx + } + prevIdx, ok := previous.(WaitIndexVal) + if ok && prevIdx > idx { + // This value is smaller than the previous index, reset. + return WaitIndexVal(0) + } + return idx +} + +// WaitHashVal is a type representing a Consul content hash that implements +// BlockingParamVal. +type WaitHashVal string + +// Equal implements BlockingParamVal +func (h WaitHashVal) Equal(other BlockingParamVal) bool { + if otherHash, ok := other.(WaitHashVal); ok { + return h == otherHash + } + return false +} + +// Next implements BlockingParamVal +func (h WaitHashVal) Next(previous BlockingParamVal) BlockingParamVal { + return h +} + +// WatcherFunc is used to watch for a diff. +type WatcherFunc func(*Plan) (BlockingParamVal, interface{}, error) + +// HandlerFunc is used to handle new data. It only works for index-based watches +// (which is almost all end points currently) and is kept for backwards +// compatibility until more places can make use of hash-based watches too. +type HandlerFunc func(uint64, interface{}) + +// HybridHandlerFunc is used to handle new data. It can support either +// index-based or hash-based watches via the BlockingParamVal. +type HybridHandlerFunc func(BlockingParamVal, interface{}) + +// Parse takes a watch query and compiles it into a WatchPlan or an error +func Parse(params map[string]interface{}) (*Plan, error) { + return ParseExempt(params, nil) +} + +// ParseExempt takes a watch query and compiles it into a WatchPlan or an error +// Any exempt parameters are stored in the Exempt map +func ParseExempt(params map[string]interface{}, exempt []string) (*Plan, error) { + plan := &Plan{ + stopCh: make(chan struct{}), + Exempt: make(map[string]interface{}), + } + + // Parse the generic parameters + if err := assignValue(params, "datacenter", &plan.Datacenter); err != nil { + return nil, err + } + if err := assignValue(params, "token", &plan.Token); err != nil { + return nil, err + } + if err := assignValue(params, "type", &plan.Type); err != nil { + return nil, err + } + // Ensure there is a watch type + if plan.Type == "" { + return nil, fmt.Errorf("Watch type must be specified") + } + + // Get the specific handler + if err := assignValue(params, "handler_type", &plan.HandlerType); err != nil { + return nil, err + } + switch plan.HandlerType { + case "http": + if _, ok := params["http_handler_config"]; !ok { + return nil, fmt.Errorf("Handler type 'http' requires 'http_handler_config' to be set") + } + config, err := parseHttpHandlerConfig(params["http_handler_config"]) + if err != nil { + return nil, fmt.Errorf(fmt.Sprintf("Failed to parse 'http_handler_config': %v", err)) + } + plan.Exempt["http_handler_config"] = config + delete(params, "http_handler_config") + + case "script": + // Let the caller check for configuration in exempt parameters + } + + // Look for a factory function + factory := watchFuncFactory[plan.Type] + if factory == nil { + return nil, fmt.Errorf("Unsupported watch type: %s", plan.Type) + } + + // Get the watch func + fn, err := factory(params) + if err != nil { + return nil, err + } + plan.Watcher = fn + + // Remove the exempt parameters + if len(exempt) > 0 { + for _, ex := range exempt { + val, ok := params[ex] + if ok { + plan.Exempt[ex] = val + delete(params, ex) + } + } + } + + // Ensure all parameters are consumed + if len(params) != 0 { + var bad []string + for key := range params { + bad = append(bad, key) + } + return nil, fmt.Errorf("Invalid parameters: %v", bad) + } + return plan, nil +} + +// assignValue is used to extract a value ensuring it is a string +func assignValue(params map[string]interface{}, name string, out *string) error { + if raw, ok := params[name]; ok { + val, ok := raw.(string) + if !ok { + return fmt.Errorf("Expecting %s to be a string", name) + } + *out = val + delete(params, name) + } + return nil +} + +// assignValueBool is used to extract a value ensuring it is a bool +func assignValueBool(params map[string]interface{}, name string, out *bool) error { + if raw, ok := params[name]; ok { + val, ok := raw.(bool) + if !ok { + return fmt.Errorf("Expecting %s to be a boolean", name) + } + *out = val + delete(params, name) + } + return nil +} + +// assignValueStringSlice is used to extract a value ensuring it is either a string or a slice of strings +func assignValueStringSlice(params map[string]interface{}, name string, out *[]string) error { + if raw, ok := params[name]; ok { + var tmp []string + switch raw.(type) { + case string: + tmp = make([]string, 1, 1) + tmp[0] = raw.(string) + case []string: + l := len(raw.([]string)) + tmp = make([]string, l, l) + copy(tmp, raw.([]string)) + case []interface{}: + l := len(raw.([]interface{})) + tmp = make([]string, l, l) + for i, v := range raw.([]interface{}) { + if s, ok := v.(string); ok { + tmp[i] = s + } else { + return fmt.Errorf("Index %d of %s expected to be string", i, name) + } + } + default: + return fmt.Errorf("Expecting %s to be a string or []string", name) + } + *out = tmp + delete(params, name) + } + return nil +} + +// Parse the 'http_handler_config' parameters +func parseHttpHandlerConfig(configParams interface{}) (*HttpHandlerConfig, error) { + var config HttpHandlerConfig + if err := mapstructure.Decode(configParams, &config); err != nil { + return nil, err + } + + if config.Path == "" { + return nil, fmt.Errorf("Requires 'path' to be set") + } + if config.Method == "" { + config.Method = "POST" + } + if config.TimeoutRaw == "" { + config.Timeout = DefaultTimeout + } else if timeout, err := time.ParseDuration(config.TimeoutRaw); err != nil { + return nil, fmt.Errorf(fmt.Sprintf("Failed to parse timeout: %v", err)) + } else { + config.Timeout = timeout + } + + return &config, nil +} diff --git a/vendor/github.com/hashicorp/consul/sdk/freeport/freeport.go b/vendor/github.com/hashicorp/consul/sdk/freeport/freeport.go new file mode 100644 index 000000000..806449ba4 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/sdk/freeport/freeport.go @@ -0,0 +1,139 @@ +// Package freeport provides a helper for allocating free ports across multiple +// processes on the same machine. +package freeport + +import ( + "fmt" + "math/rand" + "net" + "sync" + "time" + + "github.com/mitchellh/go-testing-interface" +) + +const ( + // blockSize is the size of the allocated port block. ports are given out + // consecutively from that block with roll-over for the lifetime of the + // application/test run. + blockSize = 1500 + + // maxBlocks is the number of available port blocks. + // lowPort + maxBlocks * blockSize must be less than 65535. + maxBlocks = 30 + + // lowPort is the lowest port number that should be used. + lowPort = 10000 + + // attempts is how often we try to allocate a port block + // before giving up. + attempts = 10 +) + +var ( + // firstPort is the first port of the allocated block. + firstPort int + + // lockLn is the system-wide mutex for the port block. + lockLn net.Listener + + // mu guards nextPort + mu sync.Mutex + + // once is used to do the initialization on the first call to retrieve free + // ports + once sync.Once + + // port is the last allocated port. + port int +) + +// initialize is used to initialize freeport. +func initialize() { + if lowPort+maxBlocks*blockSize > 65535 { + panic("freeport: block size too big or too many blocks requested") + } + + rand.Seed(time.Now().UnixNano()) + firstPort, lockLn = alloc() +} + +// alloc reserves a port block for exclusive use for the lifetime of the +// application. lockLn serves as a system-wide mutex for the port block and is +// implemented as a TCP listener which is bound to the firstPort and which will +// be automatically released when the application terminates. +func alloc() (int, net.Listener) { + for i := 0; i < attempts; i++ { + block := int(rand.Int31n(int32(maxBlocks))) + firstPort := lowPort + block*blockSize + ln, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", firstPort)) + if err != nil { + continue + } + // log.Printf("[DEBUG] freeport: allocated port block %d (%d-%d)", block, firstPort, firstPort+blockSize-1) + return firstPort, ln + } + panic("freeport: cannot allocate port block") +} + +func tcpAddr(ip string, port int) *net.TCPAddr { + return &net.TCPAddr{IP: net.ParseIP(ip), Port: port} +} + +// Get wraps the Free function and panics on any failure retrieving ports. +func Get(n int) (ports []int) { + ports, err := Free(n) + if err != nil { + panic(err) + } + + return ports +} + +// GetT is suitable for use when retrieving unused ports in tests. If there is +// an error retrieving free ports, the test will be failed. +func GetT(t testing.T, n int) (ports []int) { + ports, err := Free(n) + if err != nil { + t.Fatalf("Failed retrieving free port: %v", err) + } + + return ports +} + +// Free returns a list of free ports from the allocated port block. It is safe +// to call this method concurrently. Ports have been tested to be available on +// 127.0.0.1 TCP but there is no guarantee that they will remain free in the +// future. +func Free(n int) (ports []int, err error) { + mu.Lock() + defer mu.Unlock() + + if n > blockSize-1 { + return nil, fmt.Errorf("freeport: block size too small") + } + + // Reserve a port block + once.Do(initialize) + + for len(ports) < n { + port++ + + // roll-over the port + if port < firstPort+1 || port >= firstPort+blockSize { + port = firstPort + 1 + } + + // if the port is in use then skip it + ln, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", port)) + if err != nil { + // log.Println("[DEBUG] freeport: port already in use: ", port) + continue + } + ln.Close() + + ports = append(ports, port) + } + // log.Println("[DEBUG] freeport: free ports:", ports) + return ports, nil +} diff --git a/vendor/github.com/hashicorp/consul/sdk/testutil/README.md b/vendor/github.com/hashicorp/consul/sdk/testutil/README.md new file mode 100644 index 000000000..2462d55df --- /dev/null +++ b/vendor/github.com/hashicorp/consul/sdk/testutil/README.md @@ -0,0 +1,78 @@ +Consul Testing Utilities +======================== + +This package provides some generic helpers to facilitate testing in Consul. + +TestServer +========== + +TestServer is a harness for managing Consul agents and initializing them with +test data. Using it, you can form test clusters, create services, add health +checks, manipulate the K/V store, etc. This test harness is completely decoupled +from Consul's core and API client, meaning it can be easily imported and used in +external unit tests for various applications. It works by invoking the Consul +CLI, which means it is a requirement to have Consul installed in the `$PATH`. + +Following is an example usage: + +```go +package my_program + +import ( + "testing" + + "github.com/hashicorp/consul/consul/structs" + "github.com/hashicorp/consul/sdk/testutil" +) + +func TestFoo_bar(t *testing.T) { + // Create a test Consul server + srv1, err := testutil.NewTestServer() + if err != nil { + t.Fatal(err) + } + defer srv1.Stop() + + // Create a secondary server, passing in configuration + // to avoid bootstrapping as we are forming a cluster. + srv2, err := testutil.NewTestServerConfig(t, func(c *testutil.TestServerConfig) { + c.Bootstrap = false + }) + if err != nil { + t.Fatal(err) + } + defer srv2.Stop() + + // Join the servers together + srv1.JoinLAN(t, srv2.LANAddr) + + // Create a test key/value pair + srv1.SetKV(t, "foo", []byte("bar")) + + // Create lots of test key/value pairs + srv1.PopulateKV(t, map[string][]byte{ + "bar": []byte("123"), + "baz": []byte("456"), + }) + + // Create a service + srv1.AddService(t, "redis", structs.HealthPassing, []string{"master"}) + + // Create a service that will be accessed in target source code + srv1.AddAccessibleService("redis", structs.HealthPassing, "127.0.0.1", 6379, []string{"master"}) + + // Create a service check + srv1.AddCheck(t, "service:redis", "redis", structs.HealthPassing) + + // Create a node check + srv1.AddCheck(t, "mem", "", structs.HealthCritical) + + // The HTTPAddr field contains the address of the Consul + // API on the new test server instance. + println(srv1.HTTPAddr) + + // All functions also have a wrapper method to limit the passing of "t" + wrap := srv1.Wrap(t) + wrap.SetKV("foo", []byte("bar")) +} +``` diff --git a/vendor/github.com/hashicorp/consul/sdk/testutil/io.go b/vendor/github.com/hashicorp/consul/sdk/testutil/io.go new file mode 100644 index 000000000..a137fc6a3 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/sdk/testutil/io.go @@ -0,0 +1,68 @@ +package testutil + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + "testing" +) + +// tmpdir is the base directory for all temporary directories +// and files created with TempDir and TempFile. This could be +// achieved by setting a system environment variable but then +// the test execution would depend on whether or not the +// environment variable is set. +// +// On macOS the temp base directory is quite long and that +// triggers a problem with some tests that bind to UNIX sockets +// where the filename seems to be too long. Using a shorter name +// fixes this and makes the paths more readable. +// +// It also provides a single base directory for cleanup. +var tmpdir = "/tmp/consul-test" + +func init() { + if err := os.MkdirAll(tmpdir, 0755); err != nil { + fmt.Printf("Cannot create %s. Reverting to /tmp\n", tmpdir) + tmpdir = "/tmp" + } +} + +// TempDir creates a temporary directory within tmpdir +// with the name 'testname-name'. If the directory cannot +// be created t.Fatal is called. +func TempDir(t *testing.T, name string) string { + if t != nil && t.Name() != "" { + name = t.Name() + "-" + name + } + name = strings.Replace(name, "/", "_", -1) + d, err := ioutil.TempDir(tmpdir, name) + if err != nil { + if t == nil { + panic(err) + } + t.Fatalf("err: %s", err) + } + return d +} + +// TempFile creates a temporary file within tmpdir +// with the name 'testname-name'. If the file cannot +// be created t.Fatal is called. If a temporary directory +// has been created before consider storing the file +// inside this directory to avoid double cleanup. +func TempFile(t *testing.T, name string) *os.File { + if t != nil && t.Name() != "" { + name = t.Name() + "-" + name + } + name = strings.Replace(name, "/", "_", -1) + f, err := ioutil.TempFile(tmpdir, name) + if err != nil { + if t == nil { + panic(err) + } + t.Fatalf("err: %s", err) + } + return f +} diff --git a/vendor/github.com/hashicorp/consul/sdk/testutil/retry/retry.go b/vendor/github.com/hashicorp/consul/sdk/testutil/retry/retry.go new file mode 100644 index 000000000..2ef3c4c0e --- /dev/null +++ b/vendor/github.com/hashicorp/consul/sdk/testutil/retry/retry.go @@ -0,0 +1,207 @@ +// Package retry provides support for repeating operations in tests. +// +// A sample retry operation looks like this: +// +// func TestX(t *testing.T) { +// retry.Run(t, func(r *retry.R) { +// if err := foo(); err != nil { +// r.Fatal("f: ", err) +// } +// }) +// } +// +package retry + +import ( + "bytes" + "fmt" + "runtime" + "strings" + "sync" + "time" +) + +// Failer is an interface compatible with testing.T. +type Failer interface { + // Log is called for the final test output + Log(args ...interface{}) + + // FailNow is called when the retrying is abandoned. + FailNow() +} + +// R provides context for the retryer. +type R struct { + fail bool + output []string +} + +func (r *R) FailNow() { + r.fail = true + runtime.Goexit() +} + +func (r *R) Fatal(args ...interface{}) { + r.log(fmt.Sprint(args...)) + r.FailNow() +} + +func (r *R) Fatalf(format string, args ...interface{}) { + r.log(fmt.Sprintf(format, args...)) + r.FailNow() +} + +func (r *R) Error(args ...interface{}) { + r.log(fmt.Sprint(args...)) + r.fail = true +} + +func (r *R) Errorf(format string, args ...interface{}) { + r.log(fmt.Sprintf(format, args...)) + r.fail = true +} + +func (r *R) Check(err error) { + if err != nil { + r.log(err.Error()) + r.FailNow() + } +} + +func (r *R) log(s string) { + r.output = append(r.output, decorate(s)) +} + +func decorate(s string) string { + _, file, line, ok := runtime.Caller(3) + if ok { + n := strings.LastIndex(file, "/") + if n >= 0 { + file = file[n+1:] + } + } else { + file = "???" + line = 1 + } + return fmt.Sprintf("%s:%d: %s", file, line, s) +} + +func Run(t Failer, f func(r *R)) { + run(DefaultFailer(), t, f) +} + +func RunWith(r Retryer, t Failer, f func(r *R)) { + run(r, t, f) +} + +func dedup(a []string) string { + if len(a) == 0 { + return "" + } + m := map[string]int{} + for _, s := range a { + m[s] = m[s] + 1 + } + var b bytes.Buffer + for _, s := range a { + if _, ok := m[s]; ok { + b.WriteString(s) + b.WriteRune('\n') + delete(m, s) + } + } + return string(b.Bytes()) +} + +func run(r Retryer, t Failer, f func(r *R)) { + rr := &R{} + fail := func() { + out := dedup(rr.output) + if out != "" { + t.Log(out) + } + t.FailNow() + } + for r.NextOr(fail) { + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + f(rr) + }() + wg.Wait() + if rr.fail { + rr.fail = false + continue + } + break + } +} + +// DefaultFailer provides default retry.Run() behavior for unit tests. +func DefaultFailer() *Timer { + return &Timer{Timeout: 7 * time.Second, Wait: 25 * time.Millisecond} +} + +// TwoSeconds repeats an operation for two seconds and waits 25ms in between. +func TwoSeconds() *Timer { + return &Timer{Timeout: 2 * time.Second, Wait: 25 * time.Millisecond} +} + +// ThreeTimes repeats an operation three times and waits 25ms in between. +func ThreeTimes() *Counter { + return &Counter{Count: 3, Wait: 25 * time.Millisecond} +} + +// Retryer provides an interface for repeating operations +// until they succeed or an exit condition is met. +type Retryer interface { + // NextOr returns true if the operation should be repeated. + // Otherwise, it calls fail and returns false. + NextOr(fail func()) bool +} + +// Counter repeats an operation a given number of +// times and waits between subsequent operations. +type Counter struct { + Count int + Wait time.Duration + + count int +} + +func (r *Counter) NextOr(fail func()) bool { + if r.count == r.Count { + fail() + return false + } + if r.count > 0 { + time.Sleep(r.Wait) + } + r.count++ + return true +} + +// Timer repeats an operation for a given amount +// of time and waits between subsequent operations. +type Timer struct { + Timeout time.Duration + Wait time.Duration + + // stop is the timeout deadline. + // Set on the first invocation of Next(). + stop time.Time +} + +func (r *Timer) NextOr(fail func()) bool { + if r.stop.IsZero() { + r.stop = time.Now().Add(r.Timeout) + return true + } + if time.Now().After(r.stop) { + fail() + return false + } + time.Sleep(r.Wait) + return true +} diff --git a/vendor/github.com/hashicorp/consul/sdk/testutil/server.go b/vendor/github.com/hashicorp/consul/sdk/testutil/server.go new file mode 100644 index 000000000..4005eea49 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/sdk/testutil/server.go @@ -0,0 +1,473 @@ +package testutil + +// TestServer is a test helper. It uses a fork/exec model to create +// a test Consul server instance in the background and initialize it +// with some data and/or services. The test server can then be used +// to run a unit test, and offers an easy API to tear itself down +// when the test has completed. The only prerequisite is to have a consul +// binary available on the $PATH. +// +// This package does not use Consul's official API client. This is +// because we use TestServer to test the API client, which would +// otherwise cause an import cycle. + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "testing" + "time" + + "github.com/hashicorp/consul/sdk/freeport" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-uuid" + "github.com/pkg/errors" +) + +// TestPerformanceConfig configures the performance parameters. +type TestPerformanceConfig struct { + RaftMultiplier uint `json:"raft_multiplier,omitempty"` +} + +// TestPortConfig configures the various ports used for services +// provided by the Consul server. +type TestPortConfig struct { + DNS int `json:"dns,omitempty"` + HTTP int `json:"http,omitempty"` + HTTPS int `json:"https,omitempty"` + SerfLan int `json:"serf_lan,omitempty"` + SerfWan int `json:"serf_wan,omitempty"` + Server int `json:"server,omitempty"` + ProxyMinPort int `json:"proxy_min_port,omitempty"` + ProxyMaxPort int `json:"proxy_max_port,omitempty"` +} + +// TestAddressConfig contains the bind addresses for various +// components of the Consul server. +type TestAddressConfig struct { + HTTP string `json:"http,omitempty"` +} + +// TestNetworkSegment contains the configuration for a network segment. +type TestNetworkSegment struct { + Name string `json:"name"` + Bind string `json:"bind"` + Port int `json:"port"` + Advertise string `json:"advertise"` +} + +// TestServerConfig is the main server configuration struct. +type TestServerConfig struct { + NodeName string `json:"node_name"` + NodeID string `json:"node_id"` + NodeMeta map[string]string `json:"node_meta,omitempty"` + Performance *TestPerformanceConfig `json:"performance,omitempty"` + Bootstrap bool `json:"bootstrap,omitempty"` + Server bool `json:"server,omitempty"` + DataDir string `json:"data_dir,omitempty"` + Datacenter string `json:"datacenter,omitempty"` + Segments []TestNetworkSegment `json:"segments"` + DisableCheckpoint bool `json:"disable_update_check"` + LogLevel string `json:"log_level,omitempty"` + Bind string `json:"bind_addr,omitempty"` + Addresses *TestAddressConfig `json:"addresses,omitempty"` + Ports *TestPortConfig `json:"ports,omitempty"` + RaftProtocol int `json:"raft_protocol,omitempty"` + ACLMasterToken string `json:"acl_master_token,omitempty"` + ACLDatacenter string `json:"acl_datacenter,omitempty"` + PrimaryDatacenter string `json:"primary_datacenter,omitempty"` + ACLDefaultPolicy string `json:"acl_default_policy,omitempty"` + ACLEnforceVersion8 bool `json:"acl_enforce_version_8"` + ACL TestACLs `json:"acl,omitempty"` + Encrypt string `json:"encrypt,omitempty"` + CAFile string `json:"ca_file,omitempty"` + CertFile string `json:"cert_file,omitempty"` + KeyFile string `json:"key_file,omitempty"` + VerifyIncoming bool `json:"verify_incoming,omitempty"` + VerifyIncomingRPC bool `json:"verify_incoming_rpc,omitempty"` + VerifyIncomingHTTPS bool `json:"verify_incoming_https,omitempty"` + VerifyOutgoing bool `json:"verify_outgoing,omitempty"` + EnableScriptChecks bool `json:"enable_script_checks,omitempty"` + Connect map[string]interface{} `json:"connect,omitempty"` + EnableDebug bool `json:"enable_debug,omitempty"` + ReadyTimeout time.Duration `json:"-"` + Stdout, Stderr io.Writer `json:"-"` + Args []string `json:"-"` +} + +type TestACLs struct { + Enabled bool `json:"enabled,omitempty"` + TokenReplication bool `json:"enable_token_replication,omitempty"` + PolicyTTL string `json:"policy_ttl,omitempty"` + TokenTTL string `json:"token_ttl,omitempty"` + DownPolicy string `json:"down_policy,omitempty"` + DefaultPolicy string `json:"default_policy,omitempty"` + EnableKeyListPolicy bool `json:"enable_key_list_policy,omitempty"` + Tokens TestTokens `json:"tokens,omitempty"` + DisabledTTL string `json:"disabled_ttl,omitempty"` +} + +type TestTokens struct { + Master string `json:"master,omitempty"` + Replication string `json:"replication,omitempty"` + AgentMaster string `json:"agent_master,omitempty"` + Default string `json:"default,omitempty"` + Agent string `json:"agent,omitempty"` +} + +// ServerConfigCallback is a function interface which can be +// passed to NewTestServerConfig to modify the server config. +type ServerConfigCallback func(c *TestServerConfig) + +// defaultServerConfig returns a new TestServerConfig struct +// with all of the listen ports incremented by one. +func defaultServerConfig() *TestServerConfig { + nodeID, err := uuid.GenerateUUID() + if err != nil { + panic(err) + } + + ports := freeport.Get(6) + return &TestServerConfig{ + NodeName: "node-" + nodeID, + NodeID: nodeID, + DisableCheckpoint: true, + Performance: &TestPerformanceConfig{ + RaftMultiplier: 1, + }, + Bootstrap: true, + Server: true, + LogLevel: "debug", + Bind: "127.0.0.1", + Addresses: &TestAddressConfig{}, + Ports: &TestPortConfig{ + DNS: ports[0], + HTTP: ports[1], + HTTPS: ports[2], + SerfLan: ports[3], + SerfWan: ports[4], + Server: ports[5], + }, + ReadyTimeout: 10 * time.Second, + Connect: map[string]interface{}{ + "enabled": true, + "ca_config": map[string]interface{}{ + // const TestClusterID causes import cycle so hard code it here. + "cluster_id": "11111111-2222-3333-4444-555555555555", + }, + "proxy": map[string]interface{}{ + "allow_managed_api_registration": true, + }, + }, + } +} + +// TestService is used to serialize a service definition. +type TestService struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Tags []string `json:",omitempty"` + Address string `json:",omitempty"` + Port int `json:",omitempty"` +} + +// TestCheck is used to serialize a check definition. +type TestCheck struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + ServiceID string `json:",omitempty"` + TTL string `json:",omitempty"` +} + +// TestKVResponse is what we use to decode KV data. +type TestKVResponse struct { + Value string +} + +// TestServer is the main server wrapper struct. +type TestServer struct { + cmd *exec.Cmd + Config *TestServerConfig + + HTTPAddr string + HTTPSAddr string + LANAddr string + WANAddr string + + HTTPClient *http.Client + + tmpdir string +} + +// NewTestServer is an easy helper method to create a new Consul +// test server with the most basic configuration. +func NewTestServer() (*TestServer, error) { + return NewTestServerConfigT(nil, nil) +} + +func NewTestServerConfig(cb ServerConfigCallback) (*TestServer, error) { + return NewTestServerConfigT(nil, cb) +} + +// NewTestServerConfig creates a new TestServer, and makes a call to an optional +// callback function to modify the configuration. If there is an error +// configuring or starting the server, the server will NOT be running when the +// function returns (thus you do not need to stop it). +func NewTestServerConfigT(t *testing.T, cb ServerConfigCallback) (*TestServer, error) { + return newTestServerConfigT(t, cb) +} + +// newTestServerConfigT is the internal helper for NewTestServerConfigT. +func newTestServerConfigT(t *testing.T, cb ServerConfigCallback) (*TestServer, error) { + path, err := exec.LookPath("consul") + if err != nil || path == "" { + return nil, fmt.Errorf("consul not found on $PATH - download and install " + + "consul or skip this test") + } + + tmpdir := TempDir(t, "consul") + cfg := defaultServerConfig() + cfg.DataDir = filepath.Join(tmpdir, "data") + if cb != nil { + cb(cfg) + } + + b, err := json.Marshal(cfg) + if err != nil { + return nil, errors.Wrap(err, "failed marshaling json") + } + + log.Printf("CONFIG JSON: %s", string(b)) + configFile := filepath.Join(tmpdir, "config.json") + if err := ioutil.WriteFile(configFile, b, 0644); err != nil { + defer os.RemoveAll(tmpdir) + return nil, errors.Wrap(err, "failed writing config content") + } + + stdout := io.Writer(os.Stdout) + if cfg.Stdout != nil { + stdout = cfg.Stdout + } + stderr := io.Writer(os.Stderr) + if cfg.Stderr != nil { + stderr = cfg.Stderr + } + + // Start the server + args := []string{"agent", "-config-file", configFile} + args = append(args, cfg.Args...) + cmd := exec.Command("consul", args...) + cmd.Stdout = stdout + cmd.Stderr = stderr + if err := cmd.Start(); err != nil { + return nil, errors.Wrap(err, "failed starting command") + } + + httpAddr := fmt.Sprintf("127.0.0.1:%d", cfg.Ports.HTTP) + client := cleanhttp.DefaultClient() + if strings.HasPrefix(cfg.Addresses.HTTP, "unix://") { + httpAddr = cfg.Addresses.HTTP + tr := cleanhttp.DefaultTransport() + tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", httpAddr[len("unix://"):]) + } + client = &http.Client{Transport: tr} + } + + server := &TestServer{ + Config: cfg, + cmd: cmd, + + HTTPAddr: httpAddr, + HTTPSAddr: fmt.Sprintf("127.0.0.1:%d", cfg.Ports.HTTPS), + LANAddr: fmt.Sprintf("127.0.0.1:%d", cfg.Ports.SerfLan), + WANAddr: fmt.Sprintf("127.0.0.1:%d", cfg.Ports.SerfWan), + + HTTPClient: client, + + tmpdir: tmpdir, + } + + // Wait for the server to be ready + if cfg.Bootstrap { + err = server.waitForLeader() + } else { + err = server.waitForAPI() + } + if err != nil { + defer server.Stop() + return nil, errors.Wrap(err, "failed waiting for server to start") + } + return server, nil +} + +// Stop stops the test Consul server, and removes the Consul data +// directory once we are done. +func (s *TestServer) Stop() error { + defer os.RemoveAll(s.tmpdir) + + // There was no process + if s.cmd == nil { + return nil + } + + if s.cmd.Process != nil { + if err := s.cmd.Process.Signal(os.Interrupt); err != nil { + return errors.Wrap(err, "failed to kill consul server") + } + } + + // wait for the process to exit to be sure that the data dir can be + // deleted on all platforms. + return s.cmd.Wait() +} + +type failer struct { + failed bool +} + +func (f *failer) Log(args ...interface{}) { fmt.Println(args...) } +func (f *failer) FailNow() { f.failed = true } + +// waitForAPI waits for only the agent HTTP endpoint to start +// responding. This is an indication that the agent has started, +// but will likely return before a leader is elected. +func (s *TestServer) waitForAPI() error { + f := &failer{} + retry.Run(f, func(r *retry.R) { + resp, err := s.HTTPClient.Get(s.url("/v1/agent/self")) + if err != nil { + r.Fatal(err) + } + defer resp.Body.Close() + if err := s.requireOK(resp); err != nil { + r.Fatal("failed OK response", err) + } + }) + if f.failed { + return errors.New("failed waiting for API") + } + return nil +} + +// waitForLeader waits for the Consul server's HTTP API to become +// available, and then waits for a known leader and an index of +// 1 or more to be observed to confirm leader election is done. +// It then waits to ensure the anti-entropy sync has completed. +func (s *TestServer) waitForLeader() error { + f := &failer{} + timer := &retry.Timer{ + Timeout: s.Config.ReadyTimeout, + Wait: 250 * time.Millisecond, + } + var index int64 + retry.RunWith(timer, f, func(r *retry.R) { + // Query the API and check the status code. + url := s.url(fmt.Sprintf("/v1/catalog/nodes?index=%d", index)) + resp, err := s.HTTPClient.Get(url) + if err != nil { + r.Fatal("failed http get", err) + } + defer resp.Body.Close() + if err := s.requireOK(resp); err != nil { + r.Fatal("failed OK response", err) + } + + // Ensure we have a leader and a node registration. + if leader := resp.Header.Get("X-Consul-KnownLeader"); leader != "true" { + r.Fatalf("Consul leader status: %#v", leader) + } + index, err = strconv.ParseInt(resp.Header.Get("X-Consul-Index"), 10, 64) + if err != nil { + r.Fatal("bad consul index", err) + } + if index == 0 { + r.Fatal("consul index is 0") + } + + // Watch for the anti-entropy sync to finish. + var v []map[string]interface{} + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&v); err != nil { + r.Fatal(err) + } + if len(v) < 1 { + r.Fatal("No nodes") + } + taggedAddresses, ok := v[0]["TaggedAddresses"].(map[string]interface{}) + if !ok { + r.Fatal("Missing tagged addresses") + } + if _, ok := taggedAddresses["lan"]; !ok { + r.Fatal("No lan tagged addresses") + } + }) + if f.failed { + return errors.New("failed waiting for leader") + } + return nil +} + +// WaitForSerfCheck ensures we have a node with serfHealth check registered +// Behavior mirrors testrpc.WaitForTestAgent but avoids the dependency cycle in api pkg +func (s *TestServer) WaitForSerfCheck(t *testing.T) { + retry.Run(t, func(r *retry.R) { + // Query the API and check the status code. + url := s.url("/v1/catalog/nodes?index=0") + resp, err := s.HTTPClient.Get(url) + if err != nil { + r.Fatal("failed http get", err) + } + defer resp.Body.Close() + if err := s.requireOK(resp); err != nil { + r.Fatal("failed OK response", err) + } + + // Watch for the anti-entropy sync to finish. + var payload []map[string]interface{} + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&payload); err != nil { + r.Fatal(err) + } + if len(payload) < 1 { + r.Fatal("No nodes") + } + + // Ensure the serfHealth check is registered + url = s.url(fmt.Sprintf("/v1/health/node/%s", payload[0]["Node"])) + resp, err = s.HTTPClient.Get(url) + if err != nil { + r.Fatal("failed http get", err) + } + defer resp.Body.Close() + if err := s.requireOK(resp); err != nil { + r.Fatal("failed OK response", err) + } + dec = json.NewDecoder(resp.Body) + if err = dec.Decode(&payload); err != nil { + r.Fatal(err) + } + + var found bool + for _, check := range payload { + if check["CheckID"].(string) == "serfHealth" { + found = true + break + } + } + if !found { + r.Fatal("missing serfHealth registration") + } + }) +} diff --git a/vendor/github.com/hashicorp/consul/sdk/testutil/server_methods.go b/vendor/github.com/hashicorp/consul/sdk/testutil/server_methods.go new file mode 100644 index 000000000..dec512054 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/sdk/testutil/server_methods.go @@ -0,0 +1,256 @@ +package testutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "testing" + + "github.com/pkg/errors" +) + +// copied from testutil to break circular dependency +const ( + HealthAny = "any" + HealthPassing = "passing" + HealthWarning = "warning" + HealthCritical = "critical" + HealthMaint = "maintenance" +) + +// JoinLAN is used to join local datacenters together. +func (s *TestServer) JoinLAN(t *testing.T, addr string) { + resp := s.put(t, "/v1/agent/join/"+addr, nil) + defer resp.Body.Close() +} + +// JoinWAN is used to join remote datacenters together. +func (s *TestServer) JoinWAN(t *testing.T, addr string) { + resp := s.put(t, "/v1/agent/join/"+addr+"?wan=1", nil) + resp.Body.Close() +} + +// SetKV sets an individual key in the K/V store. +func (s *TestServer) SetKV(t *testing.T, key string, val []byte) { + resp := s.put(t, "/v1/kv/"+key, bytes.NewBuffer(val)) + resp.Body.Close() +} + +// SetKVString sets an individual key in the K/V store, but accepts a string +// instead of []byte. +func (s *TestServer) SetKVString(t *testing.T, key string, val string) { + resp := s.put(t, "/v1/kv/"+key, bytes.NewBufferString(val)) + resp.Body.Close() +} + +// GetKV retrieves a single key and returns its value +func (s *TestServer) GetKV(t *testing.T, key string) []byte { + resp := s.get(t, "/v1/kv/"+key) + defer resp.Body.Close() + + raw, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read body: %s", err) + } + + var result []*TestKVResponse + if err := json.Unmarshal(raw, &result); err != nil { + t.Fatalf("failed to unmarshal: %s", err) + } + if len(result) < 1 { + t.Fatalf("key does not exist: %s", key) + } + + v, err := base64.StdEncoding.DecodeString(result[0].Value) + if err != nil { + t.Fatalf("failed to base64 decode: %s", err) + } + + return v +} + +// GetKVString retrieves a value from the store, but returns as a string instead +// of []byte. +func (s *TestServer) GetKVString(t *testing.T, key string) string { + return string(s.GetKV(t, key)) +} + +// PopulateKV fills the Consul KV with data from a generic map. +func (s *TestServer) PopulateKV(t *testing.T, data map[string][]byte) { + for k, v := range data { + s.SetKV(t, k, v) + } +} + +// ListKV returns a list of keys present in the KV store. This will list all +// keys under the given prefix recursively and return them as a slice. +func (s *TestServer) ListKV(t *testing.T, prefix string) []string { + resp := s.get(t, "/v1/kv/"+prefix+"?keys") + defer resp.Body.Close() + + raw, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read body: %s", err) + } + + var result []string + if err := json.Unmarshal(raw, &result); err != nil { + t.Fatalf("failed to unmarshal: %s", err) + } + return result +} + +// AddService adds a new service to the Consul instance. It also +// automatically adds a health check with the given status, which +// can be one of "passing", "warning", or "critical". +func (s *TestServer) AddService(t *testing.T, name, status string, tags []string) { + s.AddAddressableService(t, name, status, "", 0, tags) // set empty address and 0 as port for non-accessible service +} + +// AddAddressableService adds a new service to the Consul instance by +// passing "address" and "port". It is helpful when you need to prepare a fakeService +// that maybe accessed with in target source code. +// It also automatically adds a health check with the given status, which +// can be one of "passing", "warning", or "critical", just like `AddService` does. +func (s *TestServer) AddAddressableService(t *testing.T, name, status, address string, port int, tags []string) { + svc := &TestService{ + Name: name, + Tags: tags, + Address: address, + Port: port, + } + payload, err := s.encodePayload(svc) + if err != nil { + t.Fatal(err) + } + s.put(t, "/v1/agent/service/register", payload) + + chkName := "service:" + name + chk := &TestCheck{ + Name: chkName, + ServiceID: name, + TTL: "10m", + } + payload, err = s.encodePayload(chk) + if err != nil { + t.Fatal(err) + } + s.put(t, "/v1/agent/check/register", payload) + + switch status { + case HealthPassing: + s.put(t, "/v1/agent/check/pass/"+chkName, nil) + case HealthWarning: + s.put(t, "/v1/agent/check/warn/"+chkName, nil) + case HealthCritical: + s.put(t, "/v1/agent/check/fail/"+chkName, nil) + default: + t.Fatalf("Unrecognized status: %s", status) + } +} + +// AddCheck adds a check to the Consul instance. If the serviceID is +// left empty (""), then the check will be associated with the node. +// The check status may be "passing", "warning", or "critical". +func (s *TestServer) AddCheck(t *testing.T, name, serviceID, status string) { + chk := &TestCheck{ + ID: name, + Name: name, + TTL: "10m", + } + if serviceID != "" { + chk.ServiceID = serviceID + } + + payload, err := s.encodePayload(chk) + if err != nil { + t.Fatal(err) + } + s.put(t, "/v1/agent/check/register", payload) + + switch status { + case HealthPassing: + s.put(t, "/v1/agent/check/pass/"+name, nil) + case HealthWarning: + s.put(t, "/v1/agent/check/warn/"+name, nil) + case HealthCritical: + s.put(t, "/v1/agent/check/fail/"+name, nil) + default: + t.Fatalf("Unrecognized status: %s", status) + } +} + +// put performs a new HTTP PUT request. +func (s *TestServer) put(t *testing.T, path string, body io.Reader) *http.Response { + req, err := http.NewRequest("PUT", s.url(path), body) + if err != nil { + t.Fatalf("failed to create PUT request: %s", err) + } + resp, err := s.HTTPClient.Do(req) + if err != nil { + t.Fatalf("failed to make PUT request: %s", err) + } + if err := s.requireOK(resp); err != nil { + defer resp.Body.Close() + t.Fatalf("not OK PUT: %s", err) + } + return resp +} + +// get performs a new HTTP GET request. +func (s *TestServer) get(t *testing.T, path string) *http.Response { + resp, err := s.HTTPClient.Get(s.url(path)) + if err != nil { + t.Fatalf("failed to create GET request: %s", err) + } + if err := s.requireOK(resp); err != nil { + defer resp.Body.Close() + t.Fatalf("not OK GET: %s", err) + } + return resp +} + +// encodePayload returns a new io.Reader wrapping the encoded contents +// of the payload, suitable for passing directly to a new request. +func (s *TestServer) encodePayload(payload interface{}) (io.Reader, error) { + var encoded bytes.Buffer + enc := json.NewEncoder(&encoded) + if err := enc.Encode(payload); err != nil { + return nil, errors.Wrap(err, "failed to encode payload") + } + return &encoded, nil +} + +// url is a helper function which takes a relative URL and +// makes it into a proper URL against the local Consul server. +func (s *TestServer) url(path string) string { + if s == nil { + log.Fatal("s is nil") + } + if s.Config == nil { + log.Fatal("s.Config is nil") + } + if s.Config.Ports == nil { + log.Fatal("s.Config.Ports is nil") + } + if s.Config.Ports.HTTP == 0 { + log.Fatal("s.Config.Ports.HTTP is 0") + } + if path == "" { + log.Fatal("path is empty") + } + return fmt.Sprintf("http://127.0.0.1:%d%s", s.Config.Ports.HTTP, path) +} + +// requireOK checks the HTTP response code and ensures it is acceptable. +func (s *TestServer) requireOK(resp *http.Response) error { + if resp.StatusCode != 200 { + return fmt.Errorf("Bad status code: %d", resp.StatusCode) + } + return nil +} diff --git a/vendor/github.com/hashicorp/consul/sdk/testutil/server_wrapper.go b/vendor/github.com/hashicorp/consul/sdk/testutil/server_wrapper.go new file mode 100644 index 000000000..17615da8d --- /dev/null +++ b/vendor/github.com/hashicorp/consul/sdk/testutil/server_wrapper.go @@ -0,0 +1,65 @@ +package testutil + +import "testing" + +type WrappedServer struct { + s *TestServer + t *testing.T +} + +// Wrap wraps the test server in a `testing.t` for convenience. +// +// For example, the following code snippets are equivalent. +// +// server.JoinLAN(t, "1.2.3.4") +// server.Wrap(t).JoinLAN("1.2.3.4") +// +// This is useful when you are calling multiple functions and save the wrapped +// value as another variable to reduce the inclusion of "t". +func (s *TestServer) Wrap(t *testing.T) *WrappedServer { + return &WrappedServer{s, t} +} + +func (w *WrappedServer) JoinLAN(addr string) { + w.s.JoinLAN(w.t, addr) +} + +func (w *WrappedServer) JoinWAN(addr string) { + w.s.JoinWAN(w.t, addr) +} + +func (w *WrappedServer) SetKV(key string, val []byte) { + w.s.SetKV(w.t, key, val) +} + +func (w *WrappedServer) SetKVString(key string, val string) { + w.s.SetKVString(w.t, key, val) +} + +func (w *WrappedServer) GetKV(key string) []byte { + return w.s.GetKV(w.t, key) +} + +func (w *WrappedServer) GetKVString(key string) string { + return w.s.GetKVString(w.t, key) +} + +func (w *WrappedServer) PopulateKV(data map[string][]byte) { + w.s.PopulateKV(w.t, data) +} + +func (w *WrappedServer) ListKV(prefix string) []string { + return w.s.ListKV(w.t, prefix) +} + +func (w *WrappedServer) AddService(name, status string, tags []string) { + w.s.AddService(w.t, name, status, tags) +} + +func (w *WrappedServer) AddAddressableService(name, status, address string, port int, tags []string) { + w.s.AddAddressableService(w.t, name, status, address, port, tags) +} + +func (w *WrappedServer) AddCheck(name, serviceID, status string) { + w.s.AddCheck(w.t, name, serviceID, status) +} diff --git a/vendor/github.com/hashicorp/consul/sdk/testutil/testlog.go b/vendor/github.com/hashicorp/consul/sdk/testutil/testlog.go new file mode 100644 index 000000000..6daee3593 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/sdk/testutil/testlog.go @@ -0,0 +1,42 @@ +package testutil + +import ( + "fmt" + "io" + "log" + "os" + "strings" + "testing" +) + +var sendTestLogsToStdout bool + +func init() { + sendTestLogsToStdout = os.Getenv("NOLOGBUFFER") == "1" +} + +func TestLogger(t testing.TB) *log.Logger { + return log.New(&testWriter{t}, "test: ", log.LstdFlags) +} + +func TestLoggerWithName(t testing.TB, name string) *log.Logger { + return log.New(&testWriter{t}, "test["+name+"]: ", log.LstdFlags) +} + +func TestWriter(t testing.TB) io.Writer { + return &testWriter{t} +} + +type testWriter struct { + t testing.TB +} + +func (tw *testWriter) Write(p []byte) (n int, err error) { + tw.t.Helper() + if sendTestLogsToStdout { + fmt.Fprint(os.Stdout, strings.TrimSpace(string(p))+"\n") + } else { + tw.t.Log(strings.TrimSpace(string(p))) + } + return len(p), nil +} diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md index 1153e2853..9b6845e98 100644 --- a/vendor/github.com/hashicorp/go-hclog/README.md +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -128,6 +128,21 @@ stdLogger.Printf("[DEBUG] %+v", stdLogger) ... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]} ``` +Alternatively, you may configure the system-wide logger: + +```go +// log the standard logger from 'import "log"' +log.SetOutput(appLogger.Writer(&hclog.StandardLoggerOptions{InferLevels: true})) +log.SetPrefix("") +log.SetFlags(0) + +log.Printf("[DEBUG] %d", 42) +``` + +```text +... [DEBUG] my-app: 42 +``` + Notice that if `appLogger` is initialized with the `INFO` log level _and_ you specify `InferLevels: true`, you will not see any output here. You must change `appLogger` to `DEBUG` to see output. See the docs for more information. diff --git a/vendor/github.com/hashicorp/go-hclog/context.go b/vendor/github.com/hashicorp/go-hclog/context.go new file mode 100644 index 000000000..7815f5019 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/context.go @@ -0,0 +1,38 @@ +package hclog + +import ( + "context" +) + +// WithContext inserts a logger into the context and is retrievable +// with FromContext. The optional args can be set with the same syntax as +// Logger.With to set fields on the inserted logger. This will not modify +// the logger argument in-place. +func WithContext(ctx context.Context, logger Logger, args ...interface{}) context.Context { + // While we could call logger.With even with zero args, we have this + // check to avoid unnecessary allocations around creating a copy of a + // logger. + if len(args) > 0 { + logger = logger.With(args...) + } + + return context.WithValue(ctx, contextKey, logger) +} + +// FromContext returns a logger from the context. This will return L() +// (the default logger) if no logger is found in the context. Therefore, +// this will never return a nil value. +func FromContext(ctx context.Context) Logger { + logger, _ := ctx.Value(contextKey).(Logger) + if logger == nil { + return L() + } + + return logger +} + +// Unexported new type so that our context key never collides with another. +type contextKeyType struct{} + +// contextKey is the key used for the context to store the logger. +var contextKey = contextKeyType{} diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go index 55ce43960..e5f7f95ff 100644 --- a/vendor/github.com/hashicorp/go-hclog/global.go +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -8,16 +8,16 @@ var ( protect sync.Once def Logger - // The options used to create the Default logger. These are - // read only when the Default logger is created, so set them - // as soon as the process starts. + // DefaultOptions is used to create the Default logger. These are read + // only when the Default logger is created, so set them as soon as the + // process starts. DefaultOptions = &LoggerOptions{ Level: DefaultLevel, Output: DefaultOutput, } ) -// Return a logger that is held globally. This can be a good starting +// Default returns a globally held logger. This can be a good starting // place, and then you can use .With() and .Name() to create sub-loggers // to be used in more specific contexts. func Default() Logger { @@ -28,7 +28,7 @@ func Default() Logger { return def } -// A short alias for Default() +// L is a short alias for Default(). func L() Logger { return Default() } diff --git a/vendor/github.com/hashicorp/go-hclog/go.mod b/vendor/github.com/hashicorp/go-hclog/go.mod new file mode 100644 index 000000000..0d079a654 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/go.mod @@ -0,0 +1,7 @@ +module github.com/hashicorp/go-hclog + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 +) diff --git a/vendor/github.com/hashicorp/go-hclog/go.sum b/vendor/github.com/hashicorp/go-hclog/go.sum new file mode 100644 index 000000000..e03ee77d9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/go.sum @@ -0,0 +1,6 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/hashicorp/go-hclog/int.go b/vendor/github.com/hashicorp/go-hclog/int.go deleted file mode 100644 index 0166c3dd2..000000000 --- a/vendor/github.com/hashicorp/go-hclog/int.go +++ /dev/null @@ -1,420 +0,0 @@ -package hclog - -import ( - "bufio" - "encoding" - "encoding/json" - "fmt" - "log" - "os" - "runtime" - "strconv" - "strings" - "sync" - "time" -) - -var ( - _levelToBracket = map[Level]string{ - Debug: "[DEBUG]", - Trace: "[TRACE]", - Info: "[INFO ]", - Warn: "[WARN ]", - Error: "[ERROR]", - } -) - -// Given the options (nil for defaults), create a new Logger -func New(opts *LoggerOptions) Logger { - if opts == nil { - opts = &LoggerOptions{} - } - - output := opts.Output - if output == nil { - output = os.Stderr - } - - level := opts.Level - if level == NoLevel { - level = DefaultLevel - } - - mtx := opts.Mutex - if mtx == nil { - mtx = new(sync.Mutex) - } - - ret := &intLogger{ - m: mtx, - json: opts.JSONFormat, - caller: opts.IncludeLocation, - name: opts.Name, - timeFormat: TimeFormat, - w: bufio.NewWriter(output), - level: level, - } - if opts.TimeFormat != "" { - ret.timeFormat = opts.TimeFormat - } - return ret -} - -// The internal logger implementation. Internal in that it is defined entirely -// by this package. -type intLogger struct { - json bool - caller bool - name string - timeFormat string - - // this is a pointer so that it's shared by any derived loggers, since - // those derived loggers share the bufio.Writer as well. - m *sync.Mutex - w *bufio.Writer - level Level - - implied []interface{} -} - -// Make sure that intLogger is a Logger -var _ Logger = &intLogger{} - -// The time format to use for logging. This is a version of RFC3339 that -// contains millisecond precision -const TimeFormat = "2006-01-02T15:04:05.000Z0700" - -// Log a message and a set of key/value pairs if the given level is at -// or more severe that the threshold configured in the Logger. -func (z *intLogger) Log(level Level, msg string, args ...interface{}) { - if level < z.level { - return - } - - t := time.Now() - - z.m.Lock() - defer z.m.Unlock() - - if z.json { - z.logJson(t, level, msg, args...) - } else { - z.log(t, level, msg, args...) - } - - z.w.Flush() -} - -// Cleanup a path by returning the last 2 segments of the path only. -func trimCallerPath(path string) string { - // lovely borrowed from zap - // nb. To make sure we trim the path correctly on Windows too, we - // counter-intuitively need to use '/' and *not* os.PathSeparator here, - // because the path given originates from Go stdlib, specifically - // runtime.Caller() which (as of Mar/17) returns forward slashes even on - // Windows. - // - // See https://github.com/golang/go/issues/3335 - // and https://github.com/golang/go/issues/18151 - // - // for discussion on the issue on Go side. - // - - // Find the last separator. - // - idx := strings.LastIndexByte(path, '/') - if idx == -1 { - return path - } - - // Find the penultimate separator. - idx = strings.LastIndexByte(path[:idx], '/') - if idx == -1 { - return path - } - - return path[idx+1:] -} - -// Non-JSON logging format function -func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) { - z.w.WriteString(t.Format(z.timeFormat)) - z.w.WriteByte(' ') - - s, ok := _levelToBracket[level] - if ok { - z.w.WriteString(s) - } else { - z.w.WriteString("[UNKN ]") - } - - if z.caller { - if _, file, line, ok := runtime.Caller(3); ok { - z.w.WriteByte(' ') - z.w.WriteString(trimCallerPath(file)) - z.w.WriteByte(':') - z.w.WriteString(strconv.Itoa(line)) - z.w.WriteByte(':') - } - } - - z.w.WriteByte(' ') - - if z.name != "" { - z.w.WriteString(z.name) - z.w.WriteString(": ") - } - - z.w.WriteString(msg) - - args = append(z.implied, args...) - - var stacktrace CapturedStacktrace - - if args != nil && len(args) > 0 { - if len(args)%2 != 0 { - cs, ok := args[len(args)-1].(CapturedStacktrace) - if ok { - args = args[:len(args)-1] - stacktrace = cs - } else { - args = append(args, "") - } - } - - z.w.WriteByte(':') - - FOR: - for i := 0; i < len(args); i = i + 2 { - var val string - - switch st := args[i+1].(type) { - case string: - val = st - case int: - val = strconv.FormatInt(int64(st), 10) - case int64: - val = strconv.FormatInt(int64(st), 10) - case int32: - val = strconv.FormatInt(int64(st), 10) - case int16: - val = strconv.FormatInt(int64(st), 10) - case int8: - val = strconv.FormatInt(int64(st), 10) - case uint: - val = strconv.FormatUint(uint64(st), 10) - case uint64: - val = strconv.FormatUint(uint64(st), 10) - case uint32: - val = strconv.FormatUint(uint64(st), 10) - case uint16: - val = strconv.FormatUint(uint64(st), 10) - case uint8: - val = strconv.FormatUint(uint64(st), 10) - case CapturedStacktrace: - stacktrace = st - continue FOR - case Format: - val = fmt.Sprintf(st[0].(string), st[1:]...) - default: - val = fmt.Sprintf("%v", st) - } - - z.w.WriteByte(' ') - z.w.WriteString(args[i].(string)) - z.w.WriteByte('=') - - if strings.ContainsAny(val, " \t\n\r") { - z.w.WriteByte('"') - z.w.WriteString(val) - z.w.WriteByte('"') - } else { - z.w.WriteString(val) - } - } - } - - z.w.WriteString("\n") - - if stacktrace != "" { - z.w.WriteString(string(stacktrace)) - } -} - -// JSON logging function -func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interface{}) { - vals := map[string]interface{}{ - "@message": msg, - "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), - } - - var levelStr string - switch level { - case Error: - levelStr = "error" - case Warn: - levelStr = "warn" - case Info: - levelStr = "info" - case Debug: - levelStr = "debug" - case Trace: - levelStr = "trace" - default: - levelStr = "all" - } - - vals["@level"] = levelStr - - if z.name != "" { - vals["@module"] = z.name - } - - if z.caller { - if _, file, line, ok := runtime.Caller(3); ok { - vals["@caller"] = fmt.Sprintf("%s:%d", file, line) - } - } - - args = append(z.implied, args...) - - if args != nil && len(args) > 0 { - if len(args)%2 != 0 { - cs, ok := args[len(args)-1].(CapturedStacktrace) - if ok { - args = args[:len(args)-1] - vals["stacktrace"] = cs - } else { - args = append(args, "") - } - } - - for i := 0; i < len(args); i = i + 2 { - if _, ok := args[i].(string); !ok { - // As this is the logging function not much we can do here - // without injecting into logs... - continue - } - val := args[i+1] - switch sv := val.(type) { - case error: - // Check if val is of type error. If error type doesn't - // implement json.Marshaler or encoding.TextMarshaler - // then set val to err.Error() so that it gets marshaled - switch sv.(type) { - case json.Marshaler, encoding.TextMarshaler: - default: - val = sv.Error() - } - case Format: - val = fmt.Sprintf(sv[0].(string), sv[1:]...) - } - - vals[args[i].(string)] = val - } - } - - err := json.NewEncoder(z.w).Encode(vals) - if err != nil { - panic(err) - } -} - -// Emit the message and args at DEBUG level -func (z *intLogger) Debug(msg string, args ...interface{}) { - z.Log(Debug, msg, args...) -} - -// Emit the message and args at TRACE level -func (z *intLogger) Trace(msg string, args ...interface{}) { - z.Log(Trace, msg, args...) -} - -// Emit the message and args at INFO level -func (z *intLogger) Info(msg string, args ...interface{}) { - z.Log(Info, msg, args...) -} - -// Emit the message and args at WARN level -func (z *intLogger) Warn(msg string, args ...interface{}) { - z.Log(Warn, msg, args...) -} - -// Emit the message and args at ERROR level -func (z *intLogger) Error(msg string, args ...interface{}) { - z.Log(Error, msg, args...) -} - -// Indicate that the logger would emit TRACE level logs -func (z *intLogger) IsTrace() bool { - return z.level == Trace -} - -// Indicate that the logger would emit DEBUG level logs -func (z *intLogger) IsDebug() bool { - return z.level <= Debug -} - -// Indicate that the logger would emit INFO level logs -func (z *intLogger) IsInfo() bool { - return z.level <= Info -} - -// Indicate that the logger would emit WARN level logs -func (z *intLogger) IsWarn() bool { - return z.level <= Warn -} - -// Indicate that the logger would emit ERROR level logs -func (z *intLogger) IsError() bool { - return z.level <= Error -} - -// Return a sub-Logger for which every emitted log message will contain -// the given key/value pairs. This is used to create a context specific -// Logger. -func (z *intLogger) With(args ...interface{}) Logger { - var nz intLogger = *z - - nz.implied = make([]interface{}, 0, len(z.implied)+len(args)) - nz.implied = append(nz.implied, z.implied...) - nz.implied = append(nz.implied, args...) - - return &nz -} - -// Create a new sub-Logger that a name decending from the current name. -// This is used to create a subsystem specific Logger. -func (z *intLogger) Named(name string) Logger { - var nz intLogger = *z - - if nz.name != "" { - nz.name = nz.name + "." + name - } else { - nz.name = name - } - - return &nz -} - -// Create a new sub-Logger with an explicit name. This ignores the current -// name. This is used to create a standalone logger that doesn't fall -// within the normal hierarchy. -func (z *intLogger) ResetNamed(name string) Logger { - var nz intLogger = *z - - nz.name = name - - return &nz -} - -// Create a *log.Logger that will send it's data through this Logger. This -// allows packages that expect to be using the standard library log to actually -// use this logger. -func (z *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { - if opts == nil { - opts = &StandardLoggerOptions{} - } - - return log.New(&stdlogAdapter{z, opts.InferLevels}, "", 0) -} diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go new file mode 100644 index 000000000..219656c4c --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -0,0 +1,527 @@ +package hclog + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "io" + "log" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// TimeFormat to use for logging. This is a version of RFC3339 that contains +// contains millisecond precision +const TimeFormat = "2006-01-02T15:04:05.000Z0700" + +// errJsonUnsupportedTypeMsg is included in log json entries, if an arg cannot be serialized to json +const errJsonUnsupportedTypeMsg = "logging contained values that don't serialize to json" + +var ( + _levelToBracket = map[Level]string{ + Debug: "[DEBUG]", + Trace: "[TRACE]", + Info: "[INFO] ", + Warn: "[WARN] ", + Error: "[ERROR]", + } +) + +// Make sure that intLogger is a Logger +var _ Logger = &intLogger{} + +// intLogger is an internal logger implementation. Internal in that it is +// defined entirely by this package. +type intLogger struct { + json bool + caller bool + name string + timeFormat string + + // This is a pointer so that it's shared by any derived loggers, since + // those derived loggers share the bufio.Writer as well. + mutex *sync.Mutex + writer *writer + level *int32 + + implied []interface{} +} + +// New returns a configured logger. +func New(opts *LoggerOptions) Logger { + if opts == nil { + opts = &LoggerOptions{} + } + + output := opts.Output + if output == nil { + output = DefaultOutput + } + + level := opts.Level + if level == NoLevel { + level = DefaultLevel + } + + mutex := opts.Mutex + if mutex == nil { + mutex = new(sync.Mutex) + } + + l := &intLogger{ + json: opts.JSONFormat, + caller: opts.IncludeLocation, + name: opts.Name, + timeFormat: TimeFormat, + mutex: mutex, + writer: newWriter(output), + level: new(int32), + } + + if opts.TimeFormat != "" { + l.timeFormat = opts.TimeFormat + } + + atomic.StoreInt32(l.level, int32(level)) + + return l +} + +// Log a message and a set of key/value pairs if the given level is at +// or more severe that the threshold configured in the Logger. +func (l *intLogger) Log(level Level, msg string, args ...interface{}) { + if level < Level(atomic.LoadInt32(l.level)) { + return + } + + t := time.Now() + + l.mutex.Lock() + defer l.mutex.Unlock() + + if l.json { + l.logJSON(t, level, msg, args...) + } else { + l.log(t, level, msg, args...) + } + + l.writer.Flush(level) +} + +// Cleanup a path by returning the last 2 segments of the path only. +func trimCallerPath(path string) string { + // lovely borrowed from zap + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + + // Find the last separator. + idx := strings.LastIndexByte(path, '/') + if idx == -1 { + return path + } + + // Find the penultimate separator. + idx = strings.LastIndexByte(path[:idx], '/') + if idx == -1 { + return path + } + + return path[idx+1:] +} + +// Non-JSON logging format function +func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) { + l.writer.WriteString(t.Format(l.timeFormat)) + l.writer.WriteByte(' ') + + s, ok := _levelToBracket[level] + if ok { + l.writer.WriteString(s) + } else { + l.writer.WriteString("[?????]") + } + + if l.caller { + if _, file, line, ok := runtime.Caller(3); ok { + l.writer.WriteByte(' ') + l.writer.WriteString(trimCallerPath(file)) + l.writer.WriteByte(':') + l.writer.WriteString(strconv.Itoa(line)) + l.writer.WriteByte(':') + } + } + + l.writer.WriteByte(' ') + + if l.name != "" { + l.writer.WriteString(l.name) + l.writer.WriteString(": ") + } + + l.writer.WriteString(msg) + + args = append(l.implied, args...) + + var stacktrace CapturedStacktrace + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + stacktrace = cs + } else { + args = append(args, "") + } + } + + l.writer.WriteByte(':') + + FOR: + for i := 0; i < len(args); i = i + 2 { + var ( + val string + raw bool + ) + + switch st := args[i+1].(type) { + case string: + val = st + case int: + val = strconv.FormatInt(int64(st), 10) + case int64: + val = strconv.FormatInt(int64(st), 10) + case int32: + val = strconv.FormatInt(int64(st), 10) + case int16: + val = strconv.FormatInt(int64(st), 10) + case int8: + val = strconv.FormatInt(int64(st), 10) + case uint: + val = strconv.FormatUint(uint64(st), 10) + case uint64: + val = strconv.FormatUint(uint64(st), 10) + case uint32: + val = strconv.FormatUint(uint64(st), 10) + case uint16: + val = strconv.FormatUint(uint64(st), 10) + case uint8: + val = strconv.FormatUint(uint64(st), 10) + case CapturedStacktrace: + stacktrace = st + continue FOR + case Format: + val = fmt.Sprintf(st[0].(string), st[1:]...) + default: + v := reflect.ValueOf(st) + if v.Kind() == reflect.Slice { + val = l.renderSlice(v) + raw = true + } else { + val = fmt.Sprintf("%v", st) + } + } + + l.writer.WriteByte(' ') + l.writer.WriteString(args[i].(string)) + l.writer.WriteByte('=') + + if !raw && strings.ContainsAny(val, " \t\n\r") { + l.writer.WriteByte('"') + l.writer.WriteString(val) + l.writer.WriteByte('"') + } else { + l.writer.WriteString(val) + } + } + } + + l.writer.WriteString("\n") + + if stacktrace != "" { + l.writer.WriteString(string(stacktrace)) + } +} + +func (l *intLogger) renderSlice(v reflect.Value) string { + var buf bytes.Buffer + + buf.WriteRune('[') + + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteString(", ") + } + + sv := v.Index(i) + + var val string + + switch sv.Kind() { + case reflect.String: + val = sv.String() + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + val = strconv.FormatInt(sv.Int(), 10) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + val = strconv.FormatUint(sv.Uint(), 10) + default: + val = fmt.Sprintf("%v", sv.Interface()) + } + + if strings.ContainsAny(val, " \t\n\r") { + buf.WriteByte('"') + buf.WriteString(val) + buf.WriteByte('"') + } else { + buf.WriteString(val) + } + } + + buf.WriteRune(']') + + return buf.String() +} + +// JSON logging function +func (l *intLogger) logJSON(t time.Time, level Level, msg string, args ...interface{}) { + vals := l.jsonMapEntry(t, level, msg) + args = append(l.implied, args...) + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + vals["stacktrace"] = cs + } else { + args = append(args, "") + } + } + + for i := 0; i < len(args); i = i + 2 { + if _, ok := args[i].(string); !ok { + // As this is the logging function not much we can do here + // without injecting into logs... + continue + } + val := args[i+1] + switch sv := val.(type) { + case error: + // Check if val is of type error. If error type doesn't + // implement json.Marshaler or encoding.TextMarshaler + // then set val to err.Error() so that it gets marshaled + switch sv.(type) { + case json.Marshaler, encoding.TextMarshaler: + default: + val = sv.Error() + } + case Format: + val = fmt.Sprintf(sv[0].(string), sv[1:]...) + } + + vals[args[i].(string)] = val + } + } + + err := json.NewEncoder(l.writer).Encode(vals) + if err != nil { + if _, ok := err.(*json.UnsupportedTypeError); ok { + plainVal := l.jsonMapEntry(t, level, msg) + plainVal["@warn"] = errJsonUnsupportedTypeMsg + + json.NewEncoder(l.writer).Encode(plainVal) + } + } +} + +func (l intLogger) jsonMapEntry(t time.Time, level Level, msg string) map[string]interface{} { + vals := map[string]interface{}{ + "@message": msg, + "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), + } + + var levelStr string + switch level { + case Error: + levelStr = "error" + case Warn: + levelStr = "warn" + case Info: + levelStr = "info" + case Debug: + levelStr = "debug" + case Trace: + levelStr = "trace" + default: + levelStr = "all" + } + + vals["@level"] = levelStr + + if l.name != "" { + vals["@module"] = l.name + } + + if l.caller { + if _, file, line, ok := runtime.Caller(4); ok { + vals["@caller"] = fmt.Sprintf("%s:%d", file, line) + } + } + return vals +} + +// Emit the message and args at DEBUG level +func (l *intLogger) Debug(msg string, args ...interface{}) { + l.Log(Debug, msg, args...) +} + +// Emit the message and args at TRACE level +func (l *intLogger) Trace(msg string, args ...interface{}) { + l.Log(Trace, msg, args...) +} + +// Emit the message and args at INFO level +func (l *intLogger) Info(msg string, args ...interface{}) { + l.Log(Info, msg, args...) +} + +// Emit the message and args at WARN level +func (l *intLogger) Warn(msg string, args ...interface{}) { + l.Log(Warn, msg, args...) +} + +// Emit the message and args at ERROR level +func (l *intLogger) Error(msg string, args ...interface{}) { + l.Log(Error, msg, args...) +} + +// Indicate that the logger would emit TRACE level logs +func (l *intLogger) IsTrace() bool { + return Level(atomic.LoadInt32(l.level)) == Trace +} + +// Indicate that the logger would emit DEBUG level logs +func (l *intLogger) IsDebug() bool { + return Level(atomic.LoadInt32(l.level)) <= Debug +} + +// Indicate that the logger would emit INFO level logs +func (l *intLogger) IsInfo() bool { + return Level(atomic.LoadInt32(l.level)) <= Info +} + +// Indicate that the logger would emit WARN level logs +func (l *intLogger) IsWarn() bool { + return Level(atomic.LoadInt32(l.level)) <= Warn +} + +// Indicate that the logger would emit ERROR level logs +func (l *intLogger) IsError() bool { + return Level(atomic.LoadInt32(l.level)) <= Error +} + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (l *intLogger) With(args ...interface{}) Logger { + if len(args)%2 != 0 { + panic("With() call requires paired arguments") + } + + sl := *l + + result := make(map[string]interface{}, len(l.implied)+len(args)) + keys := make([]string, 0, len(l.implied)+len(args)) + + // Read existing args, store map and key for consistent sorting + for i := 0; i < len(l.implied); i += 2 { + key := l.implied[i].(string) + keys = append(keys, key) + result[key] = l.implied[i+1] + } + // Read new args, store map and key for consistent sorting + for i := 0; i < len(args); i += 2 { + key := args[i].(string) + _, exists := result[key] + if !exists { + keys = append(keys, key) + } + result[key] = args[i+1] + } + + // Sort keys to be consistent + sort.Strings(keys) + + sl.implied = make([]interface{}, 0, len(l.implied)+len(args)) + for _, k := range keys { + sl.implied = append(sl.implied, k) + sl.implied = append(sl.implied, result[k]) + } + + return &sl +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +func (l *intLogger) Named(name string) Logger { + sl := *l + + if sl.name != "" { + sl.name = sl.name + "." + name + } else { + sl.name = name + } + + return &sl +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. +func (l *intLogger) ResetNamed(name string) Logger { + sl := *l + + sl.name = name + + return &sl +} + +// Update the logging level on-the-fly. This will affect all subloggers as +// well. +func (l *intLogger) SetLevel(level Level) { + atomic.StoreInt32(l.level, int32(level)) +} + +// Create a *log.Logger that will send it's data through this Logger. This +// allows packages that expect to be using the standard library log to actually +// use this logger. +func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(l.StandardWriter(opts), "", 0) +} + +func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{ + log: l, + inferLevels: opts.InferLevels, + forceLevel: opts.ForceLevel, + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/log.go b/vendor/github.com/hashicorp/go-hclog/logger.go similarity index 68% rename from vendor/github.com/hashicorp/go-hclog/log.go rename to vendor/github.com/hashicorp/go-hclog/logger.go index 362924887..080ed7999 100644 --- a/vendor/github.com/hashicorp/go-hclog/log.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -9,38 +9,42 @@ import ( ) var ( - DefaultOutput = os.Stderr - DefaultLevel = Info + //DefaultOutput is used as the default log output. + DefaultOutput io.Writer = os.Stderr + + // DefaultLevel is used as the default log level. + DefaultLevel = Info ) -type Level int +// Level represents a log level. +type Level int32 const ( - // This is a special level used to indicate that no level has been + // NoLevel is a special level used to indicate that no level has been // set and allow for a default to be used. NoLevel Level = 0 - // The most verbose level. Intended to be used for the tracing of actions - // in code, such as function enters/exits, etc. + // Trace is the most verbose level. Intended to be used for the tracing + // of actions in code, such as function enters/exits, etc. Trace Level = 1 - // For programmer lowlevel analysis. + // Debug information for programmer lowlevel analysis. Debug Level = 2 - // For information about steady state operations. + // Info information about steady state operations. Info Level = 3 - // For information about rare but handled events. + // Warn information about rare but handled events. Warn Level = 4 - // For information about unrecoverable events. + // Error information about unrecoverable events. Error Level = 5 ) -// When processing a value of this type, the logger automatically treats the first -// argument as a Printf formatting string and passes the rest as the values to be -// formatted. For example: L.Info(Fmt{"%d beans/day", beans}). This is a simple -// convience type for when formatting is required. +// Format is a simple convience type for when formatting is required. When +// processing a value of this type, the logger automatically treats the first +// argument as a Printf formatting string and passes the rest as the values +// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}). type Format []interface{} // Fmt returns a Format type. This is a convience function for creating a Format @@ -53,7 +57,7 @@ func Fmt(str string, args ...interface{}) Format { // the level string is invalid. This facilitates setting the log level via // config or environment variable by name in a predictable way. func LevelFromString(levelStr string) Level { - // We don't care about case. Accept "INFO" or "info" + // We don't care about case. Accept both "INFO" and "info". levelStr = strings.ToLower(strings.TrimSpace(levelStr)) switch levelStr { case "trace": @@ -71,7 +75,7 @@ func LevelFromString(levelStr string) Level { } } -// The main Logger interface. All code should code against this interface only. +// Logger describes the interface that must be implemeted by all loggers. type Logger interface { // Args are alternating key, val pairs // keys must be strings @@ -121,18 +125,33 @@ type Logger interface { // the current name as well. ResetNamed(name string) Logger + // Updates the level. This should affect all sub-loggers as well. If an + // implementation cannot update the level on the fly, it should no-op. + SetLevel(level Level) + // Return a value that conforms to the stdlib log.Logger interface StandardLogger(opts *StandardLoggerOptions) *log.Logger + + // Return a value that conforms to io.Writer, which can be passed into log.SetOutput() + StandardWriter(opts *StandardLoggerOptions) io.Writer } +// StandardLoggerOptions can be used to configure a new standard logger. type StandardLoggerOptions struct { // Indicate that some minimal parsing should be done on strings to try // and detect their level and re-emit them. // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], // [DEBUG] and strip it off before reapplying it. InferLevels bool + + // ForceLevel is used to force all output from the standard logger to be at + // the specified level. Similar to InferLevels, this will strip any level + // prefix contained in the logged string before applying the forced level. + // If set, this override InferLevels. + ForceLevel Level } +// LoggerOptions can be used to configure a new logger. type LoggerOptions struct { // Name of the subsystem to prefix logs with Name string @@ -140,7 +159,7 @@ type LoggerOptions struct { // The threshold for the logger. Anything less severe is supressed Level Level - // Where to write the logs to. Defaults to os.Stdout if nil + // Where to write the logs to. Defaults to os.Stderr if nil Output io.Writer // An optional mutex pointer in case Output is shared diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go index c10ce6e88..7ad6b351e 100644 --- a/vendor/github.com/hashicorp/go-hclog/nulllogger.go +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -1,8 +1,9 @@ package hclog import ( - "log" + "io" "io/ioutil" + "log" ) // NewNullLogger instantiates a Logger for which all calls @@ -40,6 +41,12 @@ func (l *nullLogger) Named(name string) Logger { return l } func (l *nullLogger) ResetNamed(name string) Logger { return l } +func (l *nullLogger) SetLevel(level Level) {} + func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { - return log.New(ioutil.Discard, "", log.LstdFlags) -} \ No newline at end of file + return log.New(l.StandardWriter(opts), "", log.LstdFlags) +} + +func (l *nullLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return ioutil.Discard +} diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go index 8af1a3be4..9b27bd3d3 100644 --- a/vendor/github.com/hashicorp/go-hclog/stacktrace.go +++ b/vendor/github.com/hashicorp/go-hclog/stacktrace.go @@ -40,12 +40,13 @@ var ( } ) -// A stacktrace gathered by a previous call to log.Stacktrace. If passed -// to a logging function, the stacktrace will be appended. +// CapturedStacktrace represents a stacktrace captured by a previous call +// to log.Stacktrace. If passed to a logging function, the stacktrace +// will be appended. type CapturedStacktrace string -// Gather a stacktrace of the current goroutine and return it to be passed -// to a logging function. +// Stacktrace captures a stacktrace of the current goroutine and returns +// it to be passed to a logging function. func Stacktrace() CapturedStacktrace { return CapturedStacktrace(takeStacktrace()) } diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go index 2bb927fc9..044a46960 100644 --- a/vendor/github.com/hashicorp/go-hclog/stdlog.go +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -9,39 +9,60 @@ import ( // and back into our Logger. This is basically the only way to // build upon *log.Logger. type stdlogAdapter struct { - hl Logger + log Logger inferLevels bool + forceLevel Level } // Take the data, infer the levels if configured, and send it through -// a regular Logger +// a regular Logger. func (s *stdlogAdapter) Write(data []byte) (int, error) { str := string(bytes.TrimRight(data, " \t\n")) - if s.inferLevels { + if s.forceLevel != NoLevel { + // Use pickLevel to strip log levels included in the line since we are + // forcing the level + _, str := s.pickLevel(str) + + // Log at the forced level + switch s.forceLevel { + case Trace: + s.log.Trace(str) + case Debug: + s.log.Debug(str) + case Info: + s.log.Info(str) + case Warn: + s.log.Warn(str) + case Error: + s.log.Error(str) + default: + s.log.Info(str) + } + } else if s.inferLevels { level, str := s.pickLevel(str) switch level { case Trace: - s.hl.Trace(str) + s.log.Trace(str) case Debug: - s.hl.Debug(str) + s.log.Debug(str) case Info: - s.hl.Info(str) + s.log.Info(str) case Warn: - s.hl.Warn(str) + s.log.Warn(str) case Error: - s.hl.Error(str) + s.log.Error(str) default: - s.hl.Info(str) + s.log.Info(str) } } else { - s.hl.Info(str) + s.log.Info(str) } return len(data), nil } -// Detect, based on conventions, what log level this is +// Detect, based on conventions, what log level this is. func (s *stdlogAdapter) pickLevel(str string) (Level, string) { switch { case strings.HasPrefix(str, "[DEBUG]"): diff --git a/vendor/github.com/hashicorp/go-hclog/writer.go b/vendor/github.com/hashicorp/go-hclog/writer.go new file mode 100644 index 000000000..7e8ec729d --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/writer.go @@ -0,0 +1,74 @@ +package hclog + +import ( + "bytes" + "io" +) + +type writer struct { + b bytes.Buffer + w io.Writer +} + +func newWriter(w io.Writer) *writer { + return &writer{w: w} +} + +func (w *writer) Flush(level Level) (err error) { + if lw, ok := w.w.(LevelWriter); ok { + _, err = lw.LevelWrite(level, w.b.Bytes()) + } else { + _, err = w.w.Write(w.b.Bytes()) + } + w.b.Reset() + return err +} + +func (w *writer) Write(p []byte) (int, error) { + return w.b.Write(p) +} + +func (w *writer) WriteByte(c byte) error { + return w.b.WriteByte(c) +} + +func (w *writer) WriteString(s string) (int, error) { + return w.b.WriteString(s) +} + +// LevelWriter is the interface that wraps the LevelWrite method. +type LevelWriter interface { + LevelWrite(level Level, p []byte) (n int, err error) +} + +// LeveledWriter writes all log messages to the standard writer, +// except for log levels that are defined in the overrides map. +type LeveledWriter struct { + standard io.Writer + overrides map[Level]io.Writer +} + +// NewLeveledWriter returns an initialized LeveledWriter. +// +// standard will be used as the default writer for all log levels, +// except for log levels that are defined in the overrides map. +func NewLeveledWriter(standard io.Writer, overrides map[Level]io.Writer) *LeveledWriter { + return &LeveledWriter{ + standard: standard, + overrides: overrides, + } +} + +// Write implements io.Writer. +func (lw *LeveledWriter) Write(p []byte) (int, error) { + return lw.standard.Write(p) +} + +// LevelWrite implements LevelWriter. +func (lw *LeveledWriter) LevelWrite(level Level, p []byte) (int, error) { + w, ok := lw.overrides[level] + if !ok { + w = lw.standard + } + return w.Write(p) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper.go index e6dc0563f..7da3955ed 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/helper.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper.go @@ -45,6 +45,13 @@ const ( // for debugging, set this to false, to catch panic traces. // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. recoverPanicToErr = true + + // if checkStructForEmptyValue, check structs fields to see if an empty value. + // This could be an expensive call, so possibly disable it. + checkStructForEmptyValue = false + + // if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue + derefForIsEmptyValue = false ) type charEncoding uint8 diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go index 58417da95..93f12854f 100644 --- a/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go +++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go @@ -33,8 +33,10 @@ func panicValToErr(panicVal interface{}, err *error) { return } -func isEmptyValueDeref(v reflect.Value, deref bool) bool { +func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool { switch v.Kind() { + case reflect.Invalid: + return true case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 case reflect.Bool: @@ -50,18 +52,21 @@ func isEmptyValueDeref(v reflect.Value, deref bool) bool { if v.IsNil() { return true } - return isEmptyValueDeref(v.Elem(), deref) + return hIsEmptyValue(v.Elem(), deref, checkStruct) } else { return v.IsNil() } case reflect.Struct: + if !checkStruct { + return false + } // return true if all fields are empty. else return false. // we cannot use equality check, because some fields may be maps/slices/etc // and consequently the structs are not comparable. // return v.Interface() == reflect.Zero(v.Type()).Interface() for i, n := 0, v.NumField(); i < n; i++ { - if !isEmptyValueDeref(v.Field(i), deref) { + if !hIsEmptyValue(v.Field(i), deref, checkStruct) { return false } } @@ -71,7 +76,7 @@ func isEmptyValueDeref(v reflect.Value, deref bool) bool { } func isEmptyValue(v reflect.Value) bool { - return isEmptyValueDeref(v, true) + return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue) } func debugf(format string, args ...interface{}) { diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go index c016939a1..a4e5927ce 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -23,6 +23,7 @@ package retryablehttp import ( "bytes" + "context" "fmt" "io" "io/ioutil" @@ -35,7 +36,7 @@ import ( "strings" "time" - "github.com/hashicorp/go-cleanhttp" + cleanhttp "github.com/hashicorp/go-cleanhttp" ) var ( @@ -73,6 +74,35 @@ type Request struct { *http.Request } +// WithContext returns wrapped Request with a shallow copy of underlying *http.Request +// with its context changed to ctx. The provided ctx must be non-nil. +func (r *Request) WithContext(ctx context.Context) *Request { + r.Request = r.Request.WithContext(ctx) + return r +} + +// BodyBytes allows accessing the request body. It is an analogue to +// http.Request's Body variable, but it returns a copy of the underlying data +// rather than consuming it. +// +// This function is not thread-safe; do not call it at the same time as another +// call, or at the same time this request is being used with Client.Do. +func (r *Request) BodyBytes() ([]byte, error) { + if r.body == nil { + return nil, nil + } + body, err := r.body() + if err != nil { + return nil, err + } + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(body) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + // NewRequest creates a new wrapped request. func NewRequest(method, url string, rawBody interface{}) (*Request, error) { var err error @@ -175,18 +205,24 @@ func NewRequest(method, url string, rawBody interface{}) (*Request, error) { return &Request{body, httpReq}, nil } +// Logger interface allows to use other loggers than +// standard log.Logger. +type Logger interface { + Printf(string, ...interface{}) +} + // RequestLogHook allows a function to run before each retry. The HTTP // request which will be made, and the retry number (0 for the initial // request) are available to users. The internal logger is exposed to // consumers. -type RequestLogHook func(*log.Logger, *http.Request, int) +type RequestLogHook func(Logger, *http.Request, int) // ResponseLogHook is like RequestLogHook, but allows running a function // on each HTTP response. This function will be invoked at the end of // every HTTP request executed, regardless of whether a subsequent retry // needs to be performed or not. If the response body is read or closed // from this method, this will affect the response returned from Do(). -type ResponseLogHook func(*log.Logger, *http.Response) +type ResponseLogHook func(Logger, *http.Response) // CheckRetry specifies a policy for handling retries. It is called // following each request with the response and error values returned by @@ -196,7 +232,7 @@ type ResponseLogHook func(*log.Logger, *http.Response) // Client will close any response body when retrying, but if the retry is // aborted it is up to the CheckResponse callback to properly close any // response body before returning. -type CheckRetry func(resp *http.Response, err error) (bool, error) +type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error) // Backoff specifies a policy for how long to wait between retries. // It is called after a failing request to determine the amount of time @@ -213,7 +249,7 @@ type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Resp // like automatic retries to tolerate minor outages. type Client struct { HTTPClient *http.Client // Internal HTTP client. - Logger *log.Logger // Customer logger instance. + Logger Logger // Customer logger instance. RetryWaitMin time.Duration // Minimum time to wait RetryWaitMax time.Duration // Maximum time to wait @@ -253,7 +289,12 @@ func NewClient() *Client { // DefaultRetryPolicy provides a default callback for Client.CheckRetry, which // will retry on connection errors and server errors. -func DefaultRetryPolicy(resp *http.Response, err error) (bool, error) { +func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + if err != nil { return true, err } @@ -344,9 +385,9 @@ func (c *Client) Do(req *Request) (*http.Response, error) { return resp, err } if c, ok := body.(io.ReadCloser); ok { - req.Request.Body = c + req.Body = c } else { - req.Request.Body = ioutil.NopCloser(body) + req.Body = ioutil.NopCloser(body) } } @@ -361,7 +402,7 @@ func (c *Client) Do(req *Request) (*http.Response, error) { } // Check if we should continue with retries. - checkOK, checkErr := c.CheckRetry(resp, err) + checkOK, checkErr := c.CheckRetry(req.Context(), resp, err) if err != nil { if c.Logger != nil { @@ -404,7 +445,11 @@ func (c *Client) Do(req *Request) (*http.Response, error) { if c.Logger != nil { c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) } - time.Sleep(wait) + select { + case <-req.Context().Done(): + return nil, req.Context().Err() + case <-time.After(wait): + } } if c.ErrorHandler != nil { diff --git a/vendor/github.com/hashicorp/go-retryablehttp/go.mod b/vendor/github.com/hashicorp/go-retryablehttp/go.mod new file mode 100644 index 000000000..d28c8c8eb --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/go.mod @@ -0,0 +1,3 @@ +module github.com/hashicorp/go-retryablehttp + +require github.com/hashicorp/go-cleanhttp v0.5.0 diff --git a/vendor/github.com/hashicorp/go-retryablehttp/go.sum b/vendor/github.com/hashicorp/go-retryablehttp/go.sum new file mode 100644 index 000000000..3ed0fd98e --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/go.sum @@ -0,0 +1,2 @@ +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= diff --git a/vendor/github.com/hashicorp/raft/.travis.yml b/vendor/github.com/hashicorp/raft/.travis.yml index 979842878..faeb11ffe 100644 --- a/vendor/github.com/hashicorp/raft/.travis.yml +++ b/vendor/github.com/hashicorp/raft/.travis.yml @@ -5,6 +5,7 @@ go: # - 1.6 - 1.8 - 1.9 + - 1.12 - tip install: make deps diff --git a/vendor/github.com/hashicorp/raft/CHANGELOG.md b/vendor/github.com/hashicorp/raft/CHANGELOG.md index 0cd126664..9d630f213 100644 --- a/vendor/github.com/hashicorp/raft/CHANGELOG.md +++ b/vendor/github.com/hashicorp/raft/CHANGELOG.md @@ -1,12 +1,30 @@ - # UNRELEASED +# 1.1.0 (Mai 23rd, 2019) + +FEATURES + +* Add transfer leadership extension [[GH-306](https://github.com/hashicorp/raft/pull/306)] + +IMPROVEMENTS + +* Move to `go mod` [[GH-323](https://github.com/hashicorp/consul/pull/323)] +* Leveled log [[GH-321](https://github.com/hashicorp/consul/pull/321)] +* Add peer changes to observations [[GH-326](https://github.com/hashicorp/consul/pull/326)] + +BUGFIXES + +* Copy the contents of an InmemSnapshotStore when opening a snapshot [[GH-270](https://github.com/hashicorp/consul/pull/270)] +* Fix logging panic when converting parameters to strings [[GH-332](https://github.com/hashicorp/consul/pull/332)] + +# 1.0.1 (April 12th, 2019) + IMPROVEMENTS * InMemTransport: Add timeout for sending a message [[GH-313](https://github.com/hashicorp/raft/pull/313)] * ensure 'make deps' downloads test dependencies like testify [[GH-310](https://github.com/hashicorp/raft/pull/310)] * Clarifies function of CommitTimeout [[GH-309](https://github.com/hashicorp/raft/pull/309)] - +* Add additional metrics regarding log dispatching and committal [[GH-316](https://github.com/hashicorp/raft/pull/316)] # 1.0.0 (October 3rd, 2017) diff --git a/vendor/github.com/hashicorp/raft/Makefile b/vendor/github.com/hashicorp/raft/Makefile index 46849d88c..85b6f38bc 100644 --- a/vendor/github.com/hashicorp/raft/Makefile +++ b/vendor/github.com/hashicorp/raft/Makefile @@ -1,7 +1,7 @@ DEPS = $(go list -f '{{range .TestImports}}{{.}} {{end}}' ./...) test: - go test -timeout=60s . + go test -timeout=60s -race . integ: test INTEG_TESTS=yes go test -timeout=25s -run=Integ . diff --git a/vendor/github.com/hashicorp/raft/api.go b/vendor/github.com/hashicorp/raft/api.go index 03a99614e..dae7f5dc2 100644 --- a/vendor/github.com/hashicorp/raft/api.go +++ b/vendor/github.com/hashicorp/raft/api.go @@ -4,12 +4,13 @@ import ( "errors" "fmt" "io" - "log" "os" "strconv" "sync" "time" + "github.com/hashicorp/go-hclog" + "github.com/armon/go-metrics" ) @@ -48,6 +49,10 @@ var ( // ErrCantBootstrap is returned when attempt is made to bootstrap a // cluster that already has state present. ErrCantBootstrap = errors.New("bootstrap only works on new clusters") + + // ErrLeadershipTransferInProgress is returned when the leader is rejecting + // client requests because it is attempting to transfer leadership. + ErrLeadershipTransferInProgress = errors.New("leadership transfer in progress") ) // Raft implements a Raft node. @@ -96,6 +101,12 @@ type Raft struct { // leaderState used only while state is leader leaderState leaderState + // candidateFromLeadershipTransfer is used to indicate that this server became + // candidate because the leader tries to transfer leadership. This flag is + // used in RequestVoteRequest to express that a leadership transfer is going + // on. + candidateFromLeadershipTransfer bool + // Stores our local server ID, used to avoid sending RPCs to ourself localID ServerID @@ -103,7 +114,7 @@ type Raft struct { localAddr ServerAddress // Used for our logging - logger *log.Logger + logger hclog.Logger // LogStore provides durable storage for logs logs LogStore @@ -156,6 +167,10 @@ type Raft struct { // is indexed by an artificial ID which is used for deregistration. observersLock sync.RWMutex observers map[uint64]*Observer + + // leadershipTransferCh is used to start a leadership transfer from outside of + // the main thread. + leadershipTransferCh chan *leadershipTransferFuture } // BootstrapCluster initializes a server's storage with the given cluster @@ -394,14 +409,19 @@ func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps Sna } // Ensure we have a LogOutput. - var logger *log.Logger + var logger hclog.Logger if conf.Logger != nil { logger = conf.Logger } else { if conf.LogOutput == nil { conf.LogOutput = os.Stderr } - logger = log.New(conf.LogOutput, "", log.LstdFlags) + + logger = hclog.New(&hclog.LoggerOptions{ + Name: "raft", + Level: hclog.LevelFromString(conf.LogLevel), + Output: conf.LogOutput, + }) } // Try to restore the current term. @@ -437,17 +457,17 @@ func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps Sna // Create Raft struct. r := &Raft{ - protocolVersion: protocolVersion, - applyCh: make(chan *logFuture), - conf: *conf, - fsm: fsm, - fsmMutateCh: make(chan interface{}, 128), - fsmSnapshotCh: make(chan *reqSnapshotFuture), - leaderCh: make(chan bool), - localID: localID, - localAddr: localAddr, - logger: logger, - logs: logs, + protocolVersion: protocolVersion, + applyCh: make(chan *logFuture), + conf: *conf, + fsm: fsm, + fsmMutateCh: make(chan interface{}, 128), + fsmSnapshotCh: make(chan *reqSnapshotFuture), + leaderCh: make(chan bool), + localID: localID, + localAddr: localAddr, + logger: logger, + logs: logs, configurationChangeCh: make(chan *configurationChangeFuture), configurations: configurations{}, rpcCh: trans.Consumer(), @@ -461,6 +481,7 @@ func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps Sna configurationsCh: make(chan *configurationsFuture, 8), bootstrapCh: make(chan *bootstrapFuture), observers: make(map[uint64]*Observer), + leadershipTransferCh: make(chan *leadershipTransferFuture, 1), } // Initialize as a follower. @@ -487,14 +508,13 @@ func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps Sna for index := snapshotIndex + 1; index <= lastLog.Index; index++ { var entry Log if err := r.logs.GetLog(index, &entry); err != nil { - r.logger.Printf("[ERR] raft: Failed to get log at %d: %v", index, err) + r.logger.Error(fmt.Sprintf("Failed to get log at %d: %v", index, err)) panic(err) } r.processConfigurationLogEntry(&entry) } - - r.logger.Printf("[INFO] raft: Initial configuration (index=%d): %+v", - r.configurations.latestIndex, r.configurations.latest.Servers) + r.logger.Info(fmt.Sprintf("Initial configuration (index=%d): %+v", + r.configurations.latestIndex, r.configurations.latest.Servers)) // Setup a heartbeat fast-path to avoid head-of-line // blocking where possible. It MUST be safe for this @@ -514,7 +534,7 @@ func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps Sna func (r *Raft) restoreSnapshot() error { snapshots, err := r.snapshots.List() if err != nil { - r.logger.Printf("[ERR] raft: Failed to list snapshots: %v", err) + r.logger.Error(fmt.Sprintf("Failed to list snapshots: %v", err)) return err } @@ -522,18 +542,18 @@ func (r *Raft) restoreSnapshot() error { for _, snapshot := range snapshots { _, source, err := r.snapshots.Open(snapshot.ID) if err != nil { - r.logger.Printf("[ERR] raft: Failed to open snapshot %v: %v", snapshot.ID, err) + r.logger.Error(fmt.Sprintf("Failed to open snapshot %v: %v", snapshot.ID, err)) continue } defer source.Close() if err := r.fsm.Restore(source); err != nil { - r.logger.Printf("[ERR] raft: Failed to restore snapshot %v: %v", snapshot.ID, err) + r.logger.Error(fmt.Sprintf("Failed to restore snapshot %v: %v", snapshot.ID, err)) continue } // Log success - r.logger.Printf("[INFO] raft: Restored from snapshot %v", snapshot.ID) + r.logger.Info(fmt.Sprintf("Restored from snapshot %v", snapshot.ID)) // Update the lastApplied so we don't replay old logs r.setLastApplied(snapshot.Index) @@ -914,8 +934,9 @@ func (r *Raft) LastContact() time.Time { // "last_snapshot_index", "last_snapshot_term", // "latest_configuration", "last_contact", and "num_peers". // -// The value of "state" is a numerical value representing a -// RaftState const. +// The value of "state" is a numeric constant representing one of +// the possible leadership states the node is in at any given time. +// the possible states are: "Follower", "Candidate", "Leader", "Shutdown". // // The value of "latest_configuration" is a string which contains // the id of each server, its suffrage status, and its address. @@ -955,7 +976,7 @@ func (r *Raft) Stats() map[string]string { future := r.GetConfiguration() if err := future.Error(); err != nil { - r.logger.Printf("[WARN] raft: could not get configuration for Stats: %v", err) + r.logger.Warn(fmt.Sprintf("could not get configuration for Stats: %v", err)) } else { configuration := future.Configuration() s["latest_configuration_index"] = toString(future.Index()) @@ -1006,3 +1027,38 @@ func (r *Raft) LastIndex() uint64 { func (r *Raft) AppliedIndex() uint64 { return r.getLastApplied() } + +// LeadershipTransfer will transfer leadership to a server in the cluster. +// This can only be called from the leader, or it will fail. The leader will +// stop accepting client requests, make sure the target server is up to date +// and starts the transfer with a TimeoutNow message. This message has the same +// effect as if the election timeout on the on the target server fires. Since +// it is unlikely that another server is starting an election, it is very +// likely that the target server is able to win the election. Note that raft +// protocol version 3 is not sufficient to use LeadershipTransfer. A recent +// version of that library has to be used that includes this feature. Using +// transfer leadership is safe however in a cluster where not every node has +// the latest version. If a follower cannot be promoted, it will fail +// gracefully. +func (r *Raft) LeadershipTransfer() Future { + if r.protocolVersion < 3 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.initiateLeadershipTransfer(nil, nil) +} + +// LeadershipTransferToServer does the same as LeadershipTransfer but takes a +// server in the arguments in case a leadership should be transitioned to a +// specific server in the cluster. Note that raft protocol version 3 is not +// sufficient to use LeadershipTransfer. A recent version of that library has +// to be used that includes this feature. Using transfer leadership is safe +// however in a cluster where not every node has the latest version. If a +// follower cannot be promoted, it will fail gracefully. +func (r *Raft) LeadershipTransferToServer(id ServerID, address ServerAddress) Future { + if r.protocolVersion < 3 { + return errorFuture{ErrUnsupportedProtocol} + } + + return r.initiateLeadershipTransfer(&id, &address) +} diff --git a/vendor/github.com/hashicorp/raft/commands.go b/vendor/github.com/hashicorp/raft/commands.go index 5d89e7bcd..17416311d 100644 --- a/vendor/github.com/hashicorp/raft/commands.go +++ b/vendor/github.com/hashicorp/raft/commands.go @@ -76,6 +76,11 @@ type RequestVoteRequest struct { // Used to ensure safety LastLogIndex uint64 LastLogTerm uint64 + + // Used to indicate to peers if this vote was triggered by a leadership + // transfer. It is required for leadership transfer to work, because servers + // wouldn't vote otherwise if they are aware of an existing leader. + LeadershipTransfer bool } // See WithRPCHeader. @@ -149,3 +154,24 @@ type InstallSnapshotResponse struct { func (r *InstallSnapshotResponse) GetRPCHeader() RPCHeader { return r.RPCHeader } + +// TimeoutNowRequest is the command used by a leader to signal another server to +// start an election. +type TimeoutNowRequest struct { + RPCHeader +} + +// See WithRPCHeader. +func (r *TimeoutNowRequest) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// TimeoutNowResponse is the response to TimeoutNowRequest. +type TimeoutNowResponse struct { + RPCHeader +} + +// See WithRPCHeader. +func (r *TimeoutNowResponse) GetRPCHeader() RPCHeader { + return r.RPCHeader +} diff --git a/vendor/github.com/hashicorp/raft/config.go b/vendor/github.com/hashicorp/raft/config.go index c1ce03ac2..66d4d0fa0 100644 --- a/vendor/github.com/hashicorp/raft/config.go +++ b/vendor/github.com/hashicorp/raft/config.go @@ -3,8 +3,9 @@ package raft import ( "fmt" "io" - "log" "time" + + "github.com/hashicorp/go-hclog" ) // These are the versions of the protocol (which includes RPC messages as @@ -37,7 +38,8 @@ import ( // // 1. Remove the server from the cluster with RemoveServer, using its network // address as its ServerID. -// 2. Update the server's config to a better ID (restarting the server). +// 2. Update the server's config to use a UUID or something else that is +// not tied to the machine as the ServerID (restarting the server). // 3. Add the server back to the cluster with AddVoter, using its new ID. // // You can do this during the rolling upgrade from N+1 to N+2 of your app, or @@ -190,9 +192,13 @@ type Config struct { // Defaults to os.Stderr. LogOutput io.Writer - // Logger is a user-provided logger. If nil, a logger writing to LogOutput - // is used. - Logger *log.Logger + // LogLevel represents a log level. If a no matching string is specified, + // hclog.NoLevel is assumed. + LogLevel string + + // Logger is a user-provided hc-log logger. If nil, a logger writing to + // LogOutput with LogLevel is used. + Logger hclog.Logger } // DefaultConfig returns a Config with usable defaults. @@ -208,6 +214,7 @@ func DefaultConfig() *Config { SnapshotInterval: 120 * time.Second, SnapshotThreshold: 8192, LeaderLeaseTimeout: 500 * time.Millisecond, + LogLevel: "DEBUG", } } diff --git a/vendor/github.com/hashicorp/raft/future.go b/vendor/github.com/hashicorp/raft/future.go index fac59a5cc..cc1e905ef 100644 --- a/vendor/github.com/hashicorp/raft/future.go +++ b/vendor/github.com/hashicorp/raft/future.go @@ -58,6 +58,12 @@ type SnapshotFuture interface { Open() (*SnapshotMeta, io.ReadCloser, error) } +// LeadershipTransferFuture is used for waiting on a user-triggered leadership +// transfer to complete. +type LeadershipTransferFuture interface { + Future +} + // errorFuture is used to return a static error. type errorFuture struct { err error @@ -227,6 +233,15 @@ type verifyFuture struct { voteLock sync.Mutex } +// leadershipTransferFuture is used to track the progress of a leadership +// transfer internally. +type leadershipTransferFuture struct { + deferError + + ID *ServerID + Address *ServerAddress +} + // configurationsFuture is used to retrieve the current configurations. This is // used to allow safe access to this information outside of the main thread. type configurationsFuture struct { diff --git a/vendor/github.com/hashicorp/raft/go.mod b/vendor/github.com/hashicorp/raft/go.mod new file mode 100644 index 000000000..09803b688 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/go.mod @@ -0,0 +1,10 @@ +module github.com/hashicorp/raft + +go 1.12 + +require ( + github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 + github.com/hashicorp/go-hclog v0.9.1 + github.com/hashicorp/go-msgpack v0.5.5 + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/github.com/hashicorp/raft/go.sum b/vendor/github.com/hashicorp/raft/go.sum new file mode 100644 index 000000000..b06b6a7a4 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/go.sum @@ -0,0 +1,37 @@ +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM= +github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/hashicorp/raft/inmem_snapshot.go b/vendor/github.com/hashicorp/raft/inmem_snapshot.go index 3aa92b3e9..ad52f93ae 100644 --- a/vendor/github.com/hashicorp/raft/inmem_snapshot.go +++ b/vendor/github.com/hashicorp/raft/inmem_snapshot.go @@ -82,7 +82,10 @@ func (m *InmemSnapshotStore) Open(id string) (*SnapshotMeta, io.ReadCloser, erro return nil, nil, fmt.Errorf("[ERR] snapshot: failed to open snapshot id: %s", id) } - return &m.latest.meta, ioutil.NopCloser(m.latest.contents), nil + // Make a copy of the contents, since a bytes.Buffer can only be read + // once. + contents := bytes.NewBuffer(m.latest.contents.Bytes()) + return &m.latest.meta, ioutil.NopCloser(contents), nil } // Write appends the given bytes to the snapshot contents diff --git a/vendor/github.com/hashicorp/raft/inmem_transport.go b/vendor/github.com/hashicorp/raft/inmem_transport.go index bb42eeb68..7f493f487 100644 --- a/vendor/github.com/hashicorp/raft/inmem_transport.go +++ b/vendor/github.com/hashicorp/raft/inmem_transport.go @@ -135,6 +135,19 @@ func (i *InmemTransport) InstallSnapshot(id ServerID, target ServerAddress, args return nil } +// TimeoutNow implements the Transport interface. +func (i *InmemTransport) TimeoutNow(id ServerID, target ServerAddress, args *TimeoutNowRequest, resp *TimeoutNowResponse) error { + rpcResp, err := i.makeRPC(target, args, nil, 10*i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*TimeoutNowResponse) + *resp = *out + return nil +} + func (i *InmemTransport) makeRPC(target ServerAddress, args interface{}, r io.Reader, timeout time.Duration) (rpcResp RPCResponse, err error) { i.RLock() peer, ok := i.peers[target] diff --git a/vendor/github.com/hashicorp/raft/net_transport.go b/vendor/github.com/hashicorp/raft/net_transport.go index 4f1f101e0..523fa698e 100644 --- a/vendor/github.com/hashicorp/raft/net_transport.go +++ b/vendor/github.com/hashicorp/raft/net_transport.go @@ -19,6 +19,7 @@ const ( rpcAppendEntries uint8 = iota rpcRequestVote rpcInstallSnapshot + rpcTimeoutNow // DefaultTimeoutScale is the default TimeoutScale in a NetworkTransport. DefaultTimeoutScale = 256 * 1024 // 256KB @@ -459,6 +460,11 @@ func (n *NetworkTransport) DecodePeer(buf []byte) ServerAddress { return ServerAddress(buf) } +// TimeoutNow implements the Transport interface. +func (n *NetworkTransport) TimeoutNow(id ServerID, target ServerAddress, args *TimeoutNowRequest, resp *TimeoutNowResponse) error { + return n.genericRPC(id, target, rpcTimeoutNow, args, resp) +} + // listen is used to handling incoming connections. func (n *NetworkTransport) listen() { const baseDelay = 5 * time.Millisecond @@ -577,6 +583,13 @@ func (n *NetworkTransport) handleCommand(r *bufio.Reader, dec *codec.Decoder, en rpc.Command = &req rpc.Reader = io.LimitReader(r, req.Size) + case rpcTimeoutNow: + var req TimeoutNowRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + default: return fmt.Errorf("unknown rpc type %d", rpcType) } diff --git a/vendor/github.com/hashicorp/raft/observer.go b/vendor/github.com/hashicorp/raft/observer.go index bce17ef19..2d4f37db1 100644 --- a/vendor/github.com/hashicorp/raft/observer.go +++ b/vendor/github.com/hashicorp/raft/observer.go @@ -9,7 +9,10 @@ type Observation struct { // Raft holds the Raft instance generating the observation. Raft *Raft // Data holds observation-specific data. Possible types are - // *RequestVoteRequest and RaftState. + // *RequestVoteRequest + // RaftState + // PeerObservation + // LeaderObservation Data interface{} } @@ -18,6 +21,12 @@ type LeaderObservation struct { leader ServerAddress } +// PeerObservation is sent to observers when peers change. +type PeerObservation struct { + Removed bool + Peer Server +} + // nextObserverId is used to provide a unique ID for each observer to aid in // deregistration. var nextObserverID uint64 diff --git a/vendor/github.com/hashicorp/raft/raft.go b/vendor/github.com/hashicorp/raft/raft.go index a6e0d72c0..f6fa62289 100644 --- a/vendor/github.com/hashicorp/raft/raft.go +++ b/vendor/github.com/hashicorp/raft/raft.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "io/ioutil" + "sync/atomic" "time" "github.com/armon/go-metrics" @@ -77,12 +78,13 @@ type commitTuple struct { // leaderState is state that is used while we are a leader. type leaderState struct { - commitCh chan struct{} - commitment *commitment - inflight *list.List // list of logFuture in log index order - replState map[ServerID]*followerReplication - notify map[*verifyFuture]struct{} - stepDown chan struct{} + leadershipTransferInProgress int32 // indicates that a leadership transfer is in progress. + commitCh chan struct{} + commitment *commitment + inflight *list.List // list of logFuture in log index order + replState map[ServerID]*followerReplication + notify map[*verifyFuture]struct{} + stepDown chan struct{} } // setLeader is used to modify the current leader of the cluster @@ -145,10 +147,11 @@ func (r *Raft) run() { // runFollower runs the FSM for a follower. func (r *Raft) runFollower() { didWarn := false - r.logger.Printf("[INFO] raft: %v entering Follower state (Leader: %q)", r, r.Leader()) + r.logger.Info(fmt.Sprintf("%v entering Follower state (Leader: %q)", r, r.Leader())) metrics.IncrCounter([]string{"raft", "state", "follower"}, 1) heartbeatTimer := randomTimeout(r.conf.HeartbeatTimeout) - for { + + for r.getState() == Follower { select { case rpc := <-r.rpcCh: r.processRPC(rpc) @@ -169,6 +172,10 @@ func (r *Raft) runFollower() { // Reject any restores since we are not the leader r.respond(ErrNotLeader) + case r := <-r.leadershipTransferCh: + // Reject any operations since we are not the leader + r.respond(ErrNotLeader) + case c := <-r.configurationsCh: c.configurations = r.configurations.Clone() c.respond(nil) @@ -192,17 +199,17 @@ func (r *Raft) runFollower() { if r.configurations.latestIndex == 0 { if !didWarn { - r.logger.Printf("[WARN] raft: no known peers, aborting election") + r.logger.Warn("no known peers, aborting election") didWarn = true } } else if r.configurations.latestIndex == r.configurations.committedIndex && !hasVote(r.configurations.latest, r.localID) { if !didWarn { - r.logger.Printf("[WARN] raft: not part of stable configuration, aborting election") + r.logger.Warn("not part of stable configuration, aborting election") didWarn = true } } else { - r.logger.Printf(`[WARN] raft: Heartbeat timeout from %q reached, starting election`, lastLeader) + r.logger.Warn(fmt.Sprintf("Heartbeat timeout from %q reached, starting election", lastLeader)) metrics.IncrCounter([]string{"raft", "transition", "heartbeat_timeout"}, 1) r.setState(Candidate) return @@ -238,18 +245,25 @@ func (r *Raft) liveBootstrap(configuration Configuration) error { // runCandidate runs the FSM for a candidate. func (r *Raft) runCandidate() { - r.logger.Printf("[INFO] raft: %v entering Candidate state in term %v", - r, r.getCurrentTerm()+1) + r.logger.Info(fmt.Sprintf("%v entering Candidate state in term %v", r, r.getCurrentTerm()+1)) metrics.IncrCounter([]string{"raft", "state", "candidate"}, 1) // Start vote for us, and set a timeout voteCh := r.electSelf() + + // Make sure the leadership transfer flag is reset after each run. Having this + // flag will set the field LeadershipTransfer in a RequestVoteRequst to true, + // which will make other servers vote even though they have a leader already. + // It is important to reset that flag, because this priviledge could be abused + // otherwise. + defer func() { r.candidateFromLeadershipTransfer = false }() + electionTimer := randomTimeout(r.conf.ElectionTimeout) // Tally the votes, need a simple majority grantedVotes := 0 votesNeeded := r.quorumSize() - r.logger.Printf("[DEBUG] raft: Votes needed: %d", votesNeeded) + r.logger.Debug(fmt.Sprintf("Votes needed: %d", votesNeeded)) for r.getState() == Candidate { select { @@ -259,7 +273,7 @@ func (r *Raft) runCandidate() { case vote := <-voteCh: // Check if the term is greater than ours, bail if vote.Term > r.getCurrentTerm() { - r.logger.Printf("[DEBUG] raft: Newer term discovered, fallback to follower") + r.logger.Debug("Newer term discovered, fallback to follower") r.setState(Follower) r.setCurrentTerm(vote.Term) return @@ -268,13 +282,13 @@ func (r *Raft) runCandidate() { // Check if the vote is granted if vote.Granted { grantedVotes++ - r.logger.Printf("[DEBUG] raft: Vote granted from %s in term %v. Tally: %d", - vote.voterID, vote.Term, grantedVotes) + r.logger.Debug(fmt.Sprintf("Vote granted from %s in term %v. Tally: %d", + vote.voterID, vote.Term, grantedVotes)) } // Check if we've become the leader if grantedVotes >= votesNeeded { - r.logger.Printf("[INFO] raft: Election won. Tally: %d", grantedVotes) + r.logger.Info(fmt.Sprintf("Election won. Tally: %d", grantedVotes)) r.setState(Leader) r.setLeader(r.localAddr) return @@ -306,7 +320,7 @@ func (r *Raft) runCandidate() { case <-electionTimer: // Election failed! Restart the election. We simply return, // which will kick us back into runCandidate - r.logger.Printf("[WARN] raft: Election timeout reached, restarting election") + r.logger.Warn("Election timeout reached, restarting election") return case <-r.shutdownCh: @@ -315,10 +329,37 @@ func (r *Raft) runCandidate() { } } +func (r *Raft) setLeadershipTransferInProgress(v bool) { + if v { + atomic.StoreInt32(&r.leaderState.leadershipTransferInProgress, 1) + } else { + atomic.StoreInt32(&r.leaderState.leadershipTransferInProgress, 0) + } +} + +func (r *Raft) getLeadershipTransferInProgress() bool { + v := atomic.LoadInt32(&r.leaderState.leadershipTransferInProgress) + if v == 1 { + return true + } + return false +} + +func (r *Raft) setupLeaderState() { + r.leaderState.commitCh = make(chan struct{}, 1) + r.leaderState.commitment = newCommitment(r.leaderState.commitCh, + r.configurations.latest, + r.getLastIndex()+1 /* first index that may be committed in this term */) + r.leaderState.inflight = list.New() + r.leaderState.replState = make(map[ServerID]*followerReplication) + r.leaderState.notify = make(map[*verifyFuture]struct{}) + r.leaderState.stepDown = make(chan struct{}, 1) +} + // runLeader runs the FSM for a leader. Do the setup here and drop into // the leaderLoop for the hot loop. func (r *Raft) runLeader() { - r.logger.Printf("[INFO] raft: %v entering Leader state", r) + r.logger.Info(fmt.Sprintf("%v entering Leader state", r)) metrics.IncrCounter([]string{"raft", "state", "leader"}, 1) // Notify that we are the leader @@ -332,15 +373,9 @@ func (r *Raft) runLeader() { } } - // Setup leader state - r.leaderState.commitCh = make(chan struct{}, 1) - r.leaderState.commitment = newCommitment(r.leaderState.commitCh, - r.configurations.latest, - r.getLastIndex()+1 /* first index that may be committed in this term */) - r.leaderState.inflight = list.New() - r.leaderState.replState = make(map[ServerID]*followerReplication) - r.leaderState.notify = make(map[*verifyFuture]struct{}) - r.leaderState.stepDown = make(chan struct{}, 1) + // setup leader state. This is only supposed to be accessed within the + // leaderloop. + r.setupLeaderState() // Cleanup state on step down defer func() { @@ -435,22 +470,24 @@ func (r *Raft) startStopReplication() { } inConfig[server.ID] = true if _, ok := r.leaderState.replState[server.ID]; !ok { - r.logger.Printf("[INFO] raft: Added peer %v, starting replication", server.ID) + r.logger.Info(fmt.Sprintf("Added peer %v, starting replication", server.ID)) s := &followerReplication{ - peer: server, - commitment: r.leaderState.commitment, - stopCh: make(chan uint64, 1), - triggerCh: make(chan struct{}, 1), - currentTerm: r.getCurrentTerm(), - nextIndex: lastIdx + 1, - lastContact: time.Now(), - notify: make(map[*verifyFuture]struct{}), - notifyCh: make(chan struct{}, 1), - stepDown: r.leaderState.stepDown, + peer: server, + commitment: r.leaderState.commitment, + stopCh: make(chan uint64, 1), + triggerCh: make(chan struct{}, 1), + triggerDeferErrorCh: make(chan *deferError, 1), + currentTerm: r.getCurrentTerm(), + nextIndex: lastIdx + 1, + lastContact: time.Now(), + notify: make(map[*verifyFuture]struct{}), + notifyCh: make(chan struct{}, 1), + stepDown: r.leaderState.stepDown, } r.leaderState.replState[server.ID] = s r.goFunc(func() { r.replicate(s) }) asyncNotifyCh(s.triggerCh) + r.observe(PeerObservation{Peer: server, Removed: false}) } } @@ -460,10 +497,11 @@ func (r *Raft) startStopReplication() { continue } // Replicate up to lastIdx and stop - r.logger.Printf("[INFO] raft: Removed peer %v, stopping replication after %v", serverID, lastIdx) + r.logger.Info(fmt.Sprintf("Removed peer %v, stopping replication after %v", serverID, lastIdx)) repl.stopCh <- lastIdx close(repl.stopCh) delete(r.leaderState.replState, serverID) + r.observe(PeerObservation{Peer: repl.peer, Removed: true}) } } @@ -495,8 +533,8 @@ func (r *Raft) leaderLoop() { // only a single peer (ourself) and replicating to an undefined set // of peers. stepDown := false - lease := time.After(r.conf.LeaderLeaseTimeout) + for r.getState() == Leader { select { case rpc := <-r.rpcCh: @@ -505,6 +543,74 @@ func (r *Raft) leaderLoop() { case <-r.leaderState.stepDown: r.setState(Follower) + case future := <-r.leadershipTransferCh: + if r.getLeadershipTransferInProgress() { + r.logger.Debug(ErrLeadershipTransferInProgress.Error()) + future.respond(ErrLeadershipTransferInProgress) + continue + } + r.logger.Debug("starting leadership transfer", "id", future.ID, "address", future.Address) + + // When we are leaving leaderLoop, we are no longer + // leader, so we should stop transferring. + leftLeaderLoop := make(chan struct{}) + defer func() { close(leftLeaderLoop) }() + + stopCh := make(chan struct{}) + doneCh := make(chan error, 1) + + // This is intentionally being setup outside of the + // leadershipTransfer function. Because the TimeoutNow + // call is blocking and there is no way to abort that + // in case eg the timer expires. + // The leadershipTransfer function is controlled with + // the stopCh and doneCh. + go func() { + select { + case <-time.After(r.conf.ElectionTimeout): + close(stopCh) + err := fmt.Errorf("leadership transfer timeout") + r.logger.Debug(err.Error()) + future.respond(err) + <-doneCh + case <-leftLeaderLoop: + close(stopCh) + err := fmt.Errorf("lost leadership during transfer (expected)") + r.logger.Debug(err.Error()) + future.respond(nil) + <-doneCh + case err := <-doneCh: + if err != nil { + r.logger.Debug(err.Error()) + } + future.respond(err) + } + }() + + // leaderState.replState is accessed here before + // starting leadership transfer asynchronously because + // leaderState is only supposed to be accessed in the + // leaderloop. + id := future.ID + address := future.Address + if id == nil { + s := r.pickServer() + if s != nil { + id = &s.ID + address = &s.Address + } else { + doneCh <- fmt.Errorf("cannot find peer") + continue + } + } + state, ok := r.leaderState.replState[*id] + if !ok { + doneCh <- fmt.Errorf("cannot find replication state for %v", id) + continue + } + + go r.leadershipTransfer(*id, *address, state, stopCh, doneCh) + case <-r.leaderState.commitCh: // Process the newly committed entries oldCommitIndex := r.getCommitIndex() @@ -550,10 +656,10 @@ func (r *Raft) leaderLoop() { if stepDown { if r.conf.ShutdownOnRemove { - r.logger.Printf("[INFO] raft: Removed ourself, shutting down") + r.logger.Info("Removed ourself, shutting down") r.Shutdown() } else { - r.logger.Printf("[INFO] raft: Removed ourself, transitioning to follower") + r.logger.Info("Removed ourself, transitioning to follower") r.setState(Follower) } } @@ -565,7 +671,7 @@ func (r *Raft) leaderLoop() { } else if v.votes < v.quorumSize { // Early return, means there must be a new leader - r.logger.Printf("[WARN] raft: New leader elected, stepping down") + r.logger.Warn("New leader elected, stepping down") r.setState(Follower) delete(r.leaderState.notify, v) for _, repl := range r.leaderState.replState { @@ -583,20 +689,40 @@ func (r *Raft) leaderLoop() { } case future := <-r.userRestoreCh: + if r.getLeadershipTransferInProgress() { + r.logger.Debug(ErrLeadershipTransferInProgress.Error()) + future.respond(ErrLeadershipTransferInProgress) + continue + } err := r.restoreUserSnapshot(future.meta, future.reader) future.respond(err) - case c := <-r.configurationsCh: - c.configurations = r.configurations.Clone() - c.respond(nil) + case future := <-r.configurationsCh: + if r.getLeadershipTransferInProgress() { + r.logger.Debug(ErrLeadershipTransferInProgress.Error()) + future.respond(ErrLeadershipTransferInProgress) + continue + } + future.configurations = r.configurations.Clone() + future.respond(nil) case future := <-r.configurationChangeChIfStable(): + if r.getLeadershipTransferInProgress() { + r.logger.Debug(ErrLeadershipTransferInProgress.Error()) + future.respond(ErrLeadershipTransferInProgress) + continue + } r.appendConfigurationEntry(future) case b := <-r.bootstrapCh: b.respond(ErrCantBootstrap) case newLog := <-r.applyCh: + if r.getLeadershipTransferInProgress() { + r.logger.Debug(ErrLeadershipTransferInProgress.Error()) + newLog.respond(ErrLeadershipTransferInProgress) + continue + } // Group commit, gather all the ready commits ready := []*logFuture{newLog} for i := 0; i < r.conf.MaxAppendEntries; i++ { @@ -664,6 +790,54 @@ func (r *Raft) verifyLeader(v *verifyFuture) { } } +// leadershipTransfer is doing the heavy lifting for the leadership transfer. +func (r *Raft) leadershipTransfer(id ServerID, address ServerAddress, repl *followerReplication, stopCh chan struct{}, doneCh chan error) { + + // make sure we are not already stopped + select { + case <-stopCh: + doneCh <- nil + return + default: + } + + // Step 1: set this field which stops this leader from responding to any client requests. + r.setLeadershipTransferInProgress(true) + defer func() { r.setLeadershipTransferInProgress(false) }() + + for atomic.LoadUint64(&repl.nextIndex) <= r.getLastIndex() { + err := &deferError{} + err.init() + repl.triggerDeferErrorCh <- err + select { + case err := <-err.errCh: + if err != nil { + doneCh <- err + return + } + case <-stopCh: + doneCh <- nil + return + } + } + + // Step ?: the thesis describes in chap 6.4.1: Using clocks to reduce + // messaging for read-only queries. If this is implemented, the lease + // has to be reset as well, in case leadership is transferred. This + // implementation also has a lease, but it serves another purpose and + // doesn't need to be reset. The lease mechanism in our raft lib, is + // setup in a similar way to the one in the thesis, but in practice + // it's a timer that just tells the leader how often to check + // heartbeats are still coming in. + + // Step 3: send TimeoutNow message to target server. + err := r.trans.TimeoutNow(id, address, &TimeoutNowRequest{RPCHeader: r.getRPCHeader()}, &TimeoutNowResponse{}) + if err != nil { + err = fmt.Errorf("failed to make TimeoutNow RPC to %v: %v", id, err) + } + doneCh <- err +} + // checkLeaderLease is used to check if we can contact a quorum of nodes // within the last leader lease interval. If not, we need to step down, // as we may have lost connectivity. Returns the maximum duration without @@ -685,9 +859,9 @@ func (r *Raft) checkLeaderLease() time.Duration { } else { // Log at least once at high value, then debug. Otherwise it gets very verbose. if diff <= 3*r.conf.LeaderLeaseTimeout { - r.logger.Printf("[WARN] raft: Failed to contact %v in %v", peer, diff) + r.logger.Warn(fmt.Sprintf("Failed to contact %v in %v", peer, diff)) } else { - r.logger.Printf("[DEBUG] raft: Failed to contact %v in %v", peer, diff) + r.logger.Debug(fmt.Sprintf("Failed to contact %v in %v", peer, diff)) } } metrics.AddSample([]string{"raft", "leader", "lastContact"}, float32(diff/time.Millisecond)) @@ -696,7 +870,7 @@ func (r *Raft) checkLeaderLease() time.Duration { // Verify we can contact a quorum quorum := r.quorumSize() if contacted < quorum { - r.logger.Printf("[WARN] raft: Failed to contact quorum of nodes, stepping down") + r.logger.Warn("Failed to contact quorum of nodes, stepping down") r.setState(Follower) metrics.IncrCounter([]string{"raft", "transition", "leader_lease_timeout"}, 1) } @@ -784,7 +958,7 @@ func (r *Raft) restoreUserSnapshot(meta *SnapshotMeta, reader io.Reader) error { if err := sink.Close(); err != nil { return fmt.Errorf("failed to close snapshot: %v", err) } - r.logger.Printf("[INFO] raft: Copied %d bytes to local snapshot", n) + r.logger.Info(fmt.Sprintf("Copied %d bytes to local snapshot", n)) // Restore the snapshot into the FSM. If this fails we are in a // bad state so we panic to take ourselves out. @@ -808,7 +982,7 @@ func (r *Raft) restoreUserSnapshot(meta *SnapshotMeta, reader io.Reader) error { r.setLastApplied(lastIndex) r.setLastSnapshot(lastIndex, term) - r.logger.Printf("[INFO] raft: Restored user snapshot (index %d)", lastIndex) + r.logger.Info(fmt.Sprintf("Restored user snapshot (index %d)", lastIndex)) return nil } @@ -822,8 +996,8 @@ func (r *Raft) appendConfigurationEntry(future *configurationChangeFuture) { return } - r.logger.Printf("[INFO] raft: Updating configuration with %s (%v, %v) to %+v", - future.req.command, future.req.serverID, future.req.serverAddress, configuration.Servers) + r.logger.Info(fmt.Sprintf("Updating configuration with %s (%v, %v) to %+v", + future.req.command, future.req.serverID, future.req.serverAddress, configuration.Servers)) // In pre-ID compatibility mode we translate all configuration changes // in to an old remove peer message, which can handle all supported @@ -876,7 +1050,7 @@ func (r *Raft) dispatchLogs(applyLogs []*logFuture) { // Write the log entry locally if err := r.logs.StoreLogs(logs); err != nil { - r.logger.Printf("[ERR] raft: Failed to commit logs: %v", err) + r.logger.Error(fmt.Sprintf("Failed to commit logs: %v", err)) for _, applyLog := range applyLogs { applyLog.respond(err) } @@ -905,7 +1079,7 @@ func (r *Raft) processLogs(index uint64, future *logFuture) { // Reject logs we've applied already lastApplied := r.getLastApplied() if index <= lastApplied { - r.logger.Printf("[WARN] raft: Skipping application of old log: %d", index) + r.logger.Warn(fmt.Sprintf("Skipping application of old log: %d", index)) return } @@ -917,7 +1091,7 @@ func (r *Raft) processLogs(index uint64, future *logFuture) { } else { l := new(Log) if err := r.logs.GetLog(idx, l); err != nil { - r.logger.Printf("[ERR] raft: Failed to get log at %d: %v", idx, err) + r.logger.Error(fmt.Sprintf("Failed to get log at %d: %v", idx, err)) panic(err) } r.processLog(l, nil) @@ -980,8 +1154,10 @@ func (r *Raft) processRPC(rpc RPC) { r.requestVote(rpc, cmd) case *InstallSnapshotRequest: r.installSnapshot(rpc, cmd) + case *TimeoutNowRequest: + r.timeoutNow(rpc, cmd) default: - r.logger.Printf("[ERR] raft: Got unexpected command: %#v", rpc.Command) + r.logger.Error(fmt.Sprintf("Got unexpected command: %#v", rpc.Command)) rpc.Respond(nil, fmt.Errorf("unexpected command")) } } @@ -1004,7 +1180,7 @@ func (r *Raft) processHeartbeat(rpc RPC) { case *AppendEntriesRequest: r.appendEntries(rpc, cmd) default: - r.logger.Printf("[ERR] raft: Expected heartbeat, got command: %#v", rpc.Command) + r.logger.Error(fmt.Sprintf("Expected heartbeat, got command: %#v", rpc.Command)) rpc.Respond(nil, fmt.Errorf("unexpected command")) } } @@ -1054,8 +1230,8 @@ func (r *Raft) appendEntries(rpc RPC, a *AppendEntriesRequest) { } else { var prevLog Log if err := r.logs.GetLog(a.PrevLogEntry, &prevLog); err != nil { - r.logger.Printf("[WARN] raft: Failed to get previous log: %d %v (last: %d)", - a.PrevLogEntry, err, lastIdx) + r.logger.Warn(fmt.Sprintf("Failed to get previous log: %d %v (last: %d)", + a.PrevLogEntry, err, lastIdx)) resp.NoRetryBackoff = true return } @@ -1063,8 +1239,8 @@ func (r *Raft) appendEntries(rpc RPC, a *AppendEntriesRequest) { } if a.PrevLogTerm != prevLogTerm { - r.logger.Printf("[WARN] raft: Previous log term mis-match: ours: %d remote: %d", - prevLogTerm, a.PrevLogTerm) + r.logger.Warn(fmt.Sprintf("Previous log term mis-match: ours: %d remote: %d", + prevLogTerm, a.PrevLogTerm)) resp.NoRetryBackoff = true return } @@ -1084,14 +1260,14 @@ func (r *Raft) appendEntries(rpc RPC, a *AppendEntriesRequest) { } var storeEntry Log if err := r.logs.GetLog(entry.Index, &storeEntry); err != nil { - r.logger.Printf("[WARN] raft: Failed to get log entry %d: %v", - entry.Index, err) + r.logger.Warn(fmt.Sprintf("Failed to get log entry %d: %v", + entry.Index, err)) return } if entry.Term != storeEntry.Term { - r.logger.Printf("[WARN] raft: Clearing log suffix from %d to %d", entry.Index, lastLogIdx) + r.logger.Warn(fmt.Sprintf("Clearing log suffix from %d to %d", entry.Index, lastLogIdx)) if err := r.logs.DeleteRange(entry.Index, lastLogIdx); err != nil { - r.logger.Printf("[ERR] raft: Failed to clear log suffix: %v", err) + r.logger.Error(fmt.Sprintf("Failed to clear log suffix: %v", err)) return } if entry.Index <= r.configurations.latestIndex { @@ -1106,7 +1282,7 @@ func (r *Raft) appendEntries(rpc RPC, a *AppendEntriesRequest) { if n := len(newEntries); n > 0 { // Append the new entries if err := r.logs.StoreLogs(newEntries); err != nil { - r.logger.Printf("[ERR] raft: Failed to append to logs: %v", err) + r.logger.Error(fmt.Sprintf("Failed to append to logs: %v", err)) // TODO: leaving r.getLastLog() in the wrong // state if there was a truncation above return @@ -1183,11 +1359,14 @@ func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) { resp.Peers = encodePeers(r.configurations.latest, r.trans) } - // Check if we have an existing leader [who's not the candidate] + // Check if we have an existing leader [who's not the candidate] and also + // check the LeadershipTransfer flag is set. Usually votes are rejected if + // there is a known leader. But if the leader initiated a leadership transfer, + // vote! candidate := r.trans.DecodePeer(req.Candidate) - if leader := r.Leader(); leader != "" && leader != candidate { - r.logger.Printf("[WARN] raft: Rejecting vote request from %v since we have a leader: %v", - candidate, leader) + if leader := r.Leader(); leader != "" && leader != candidate && !req.LeadershipTransfer { + r.logger.Warn(fmt.Sprintf("Rejecting vote request from %v since we have a leader: %v", + candidate, leader)) return } @@ -1199,6 +1378,7 @@ func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) { // Increase the term if we see a newer one if req.Term > r.getCurrentTerm() { // Ensure transition to follower + r.logger.Debug("lost leadership because received a requestvote with newer term") r.setState(Follower) r.setCurrentTerm(req.Term) resp.Term = req.Term @@ -1207,20 +1387,20 @@ func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) { // Check if we have voted yet lastVoteTerm, err := r.stable.GetUint64(keyLastVoteTerm) if err != nil && err.Error() != "not found" { - r.logger.Printf("[ERR] raft: Failed to get last vote term: %v", err) + r.logger.Error(fmt.Sprintf("Failed to get last vote term: %v", err)) return } lastVoteCandBytes, err := r.stable.Get(keyLastVoteCand) if err != nil && err.Error() != "not found" { - r.logger.Printf("[ERR] raft: Failed to get last vote candidate: %v", err) + r.logger.Error(fmt.Sprintf("Failed to get last vote candidate: %v", err)) return } // Check if we've voted in this election before if lastVoteTerm == req.Term && lastVoteCandBytes != nil { - r.logger.Printf("[INFO] raft: Duplicate RequestVote for same term: %d", req.Term) + r.logger.Info(fmt.Sprintf("Duplicate RequestVote for same term: %d", req.Term)) if bytes.Compare(lastVoteCandBytes, req.Candidate) == 0 { - r.logger.Printf("[WARN] raft: Duplicate RequestVote from candidate: %s", req.Candidate) + r.logger.Warn(fmt.Sprintf("Duplicate RequestVote from candidate: %s", req.Candidate)) resp.Granted = true } return @@ -1229,20 +1409,20 @@ func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) { // Reject if their term is older lastIdx, lastTerm := r.getLastEntry() if lastTerm > req.LastLogTerm { - r.logger.Printf("[WARN] raft: Rejecting vote request from %v since our last term is greater (%d, %d)", - candidate, lastTerm, req.LastLogTerm) + r.logger.Warn(fmt.Sprintf("Rejecting vote request from %v since our last term is greater (%d, %d)", + candidate, lastTerm, req.LastLogTerm)) return } if lastTerm == req.LastLogTerm && lastIdx > req.LastLogIndex { - r.logger.Printf("[WARN] raft: Rejecting vote request from %v since our last index is greater (%d, %d)", - candidate, lastIdx, req.LastLogIndex) + r.logger.Warn(fmt.Sprintf("Rejecting vote request from %v since our last index is greater (%d, %d)", + candidate, lastIdx, req.LastLogIndex)) return } // Persist a vote for safety if err := r.persistVote(req.Term, req.Candidate); err != nil { - r.logger.Printf("[ERR] raft: Failed to persist vote: %v", err) + r.logger.Error(fmt.Sprintf("Failed to persist vote: %v", err)) return } @@ -1277,7 +1457,8 @@ func (r *Raft) installSnapshot(rpc RPC, req *InstallSnapshotRequest) { // Ignore an older term if req.Term < r.getCurrentTerm() { - r.logger.Printf("[INFO] raft: Ignoring installSnapshot request with older term of %d vs currentTerm %d", req.Term, r.getCurrentTerm()) + r.logger.Info(fmt.Sprintf("Ignoring installSnapshot request with older term of %d vs currentTerm %d", + req.Term, r.getCurrentTerm())) return } @@ -1306,7 +1487,7 @@ func (r *Raft) installSnapshot(rpc RPC, req *InstallSnapshotRequest) { sink, err := r.snapshots.Create(version, req.LastLogIndex, req.LastLogTerm, reqConfiguration, reqConfigurationIndex, r.trans) if err != nil { - r.logger.Printf("[ERR] raft: Failed to create snapshot to install: %v", err) + r.logger.Error(fmt.Sprintf("Failed to create snapshot to install: %v", err)) rpcErr = fmt.Errorf("failed to create snapshot: %v", err) return } @@ -1315,7 +1496,7 @@ func (r *Raft) installSnapshot(rpc RPC, req *InstallSnapshotRequest) { n, err := io.Copy(sink, rpc.Reader) if err != nil { sink.Cancel() - r.logger.Printf("[ERR] raft: Failed to copy snapshot: %v", err) + r.logger.Error(fmt.Sprintf("Failed to copy snapshot: %v", err)) rpcErr = err return } @@ -1323,18 +1504,18 @@ func (r *Raft) installSnapshot(rpc RPC, req *InstallSnapshotRequest) { // Check that we received it all if n != req.Size { sink.Cancel() - r.logger.Printf("[ERR] raft: Failed to receive whole snapshot: %d / %d", n, req.Size) + r.logger.Error(fmt.Sprintf("Failed to receive whole snapshot: %d / %d", n, req.Size)) rpcErr = fmt.Errorf("short read") return } // Finalize the snapshot if err := sink.Close(); err != nil { - r.logger.Printf("[ERR] raft: Failed to finalize snapshot: %v", err) + r.logger.Error(fmt.Sprintf("Failed to finalize snapshot: %v", err)) rpcErr = err return } - r.logger.Printf("[INFO] raft: Copied %d bytes to local snapshot", n) + r.logger.Info(fmt.Sprintf("Copied %d bytes to local snapshot", n)) // Restore snapshot future := &restoreFuture{ID: sink.ID()} @@ -1348,7 +1529,7 @@ func (r *Raft) installSnapshot(rpc RPC, req *InstallSnapshotRequest) { // Wait for the restore to happen if err := future.Error(); err != nil { - r.logger.Printf("[ERR] raft: Failed to restore snapshot: %v", err) + r.logger.Error(fmt.Sprintf("Failed to restore snapshot: %v", err)) rpcErr = err return } @@ -1367,10 +1548,10 @@ func (r *Raft) installSnapshot(rpc RPC, req *InstallSnapshotRequest) { // Compact logs, continue even if this fails if err := r.compactLogs(req.LastLogIndex); err != nil { - r.logger.Printf("[ERR] raft: Failed to compact logs: %v", err) + r.logger.Error(fmt.Sprintf("Failed to compact logs: %v", err)) } - r.logger.Printf("[INFO] raft: Installed remote snapshot") + r.logger.Info("Installed remote snapshot") resp.Success = true r.setLastContact() return @@ -1402,11 +1583,12 @@ func (r *Raft) electSelf() <-chan *voteResult { // Construct the request lastIdx, lastTerm := r.getLastEntry() req := &RequestVoteRequest{ - RPCHeader: r.getRPCHeader(), - Term: r.getCurrentTerm(), - Candidate: r.trans.EncodePeer(r.localID, r.localAddr), - LastLogIndex: lastIdx, - LastLogTerm: lastTerm, + RPCHeader: r.getRPCHeader(), + Term: r.getCurrentTerm(), + Candidate: r.trans.EncodePeer(r.localID, r.localAddr), + LastLogIndex: lastIdx, + LastLogTerm: lastTerm, + LeadershipTransfer: r.candidateFromLeadershipTransfer, } // Construct a function to ask for a vote @@ -1416,7 +1598,7 @@ func (r *Raft) electSelf() <-chan *voteResult { resp := &voteResult{voterID: peer.ID} err := r.trans.RequestVote(peer.ID, peer.Address, req, &resp.RequestVoteResponse) if err != nil { - r.logger.Printf("[ERR] raft: Failed to make RequestVote RPC to %v: %v", peer, err) + r.logger.Error(fmt.Sprintf("Failed to make RequestVote RPC to %v: %v", peer, err)) resp.Term = req.Term resp.Granted = false } @@ -1430,7 +1612,7 @@ func (r *Raft) electSelf() <-chan *voteResult { if server.ID == r.localID { // Persist a vote for ourselves if err := r.persistVote(req.Term, req.Candidate); err != nil { - r.logger.Printf("[ERR] raft: Failed to persist vote : %v", err) + r.logger.Error(fmt.Sprintf("Failed to persist vote : %v", err)) return nil } // Include our own vote @@ -1482,3 +1664,68 @@ func (r *Raft) setState(state RaftState) { r.observe(state) } } + +// LookupServer looks up a server by ServerID. +func (r *Raft) lookupServer(id ServerID) *Server { + for _, server := range r.configurations.latest.Servers { + if server.ID != r.localID { + return &server + } + } + return nil +} + +// pickServer returns the follower that is most up to date. Because it accesses +// leaderstate, it should only be called from the leaderloop. +func (r *Raft) pickServer() *Server { + var pick *Server + var current uint64 + for _, server := range r.configurations.latest.Servers { + if server.ID == r.localID { + continue + } + state, ok := r.leaderState.replState[server.ID] + if !ok { + continue + } + nextIdx := atomic.LoadUint64(&state.nextIndex) + if nextIdx > current { + current = nextIdx + tmp := server + pick = &tmp + } + } + return pick +} + +// initiateLeadershipTransfer starts the leadership on the leader side, by +// sending a message to the leadershipTransferCh, to make sure it runs in the +// mainloop. +func (r *Raft) initiateLeadershipTransfer(id *ServerID, address *ServerAddress) LeadershipTransferFuture { + future := &leadershipTransferFuture{ID: id, Address: address} + future.init() + + if id != nil && *id == r.localID { + err := fmt.Errorf("cannot transfer leadership to itself") + r.logger.Info(err.Error()) + future.respond(err) + return future + } + + select { + case r.leadershipTransferCh <- future: + return future + case <-r.shutdownCh: + return errorFuture{ErrRaftShutdown} + default: + return errorFuture{ErrEnqueueTimeout} + } +} + +// timeoutNow is what happens when a server receives a TimeoutNowRequest. +func (r *Raft) timeoutNow(rpc RPC, req *TimeoutNowRequest) { + r.setLeader("") + r.setState(Candidate) + r.candidateFromLeadershipTransfer = true + rpc.Respond(&TimeoutNowResponse{}, nil) +} diff --git a/vendor/github.com/hashicorp/raft/replication.go b/vendor/github.com/hashicorp/raft/replication.go index 574d9ed69..b62e0d0d2 100644 --- a/vendor/github.com/hashicorp/raft/replication.go +++ b/vendor/github.com/hashicorp/raft/replication.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "sync" + "sync/atomic" "time" "github.com/armon/go-metrics" @@ -40,12 +41,18 @@ type followerReplication struct { // index; replication should be attempted with a best effort up through that // index, before exiting. stopCh chan uint64 + // triggerCh is notified every time new entries are appended to the log. triggerCh chan struct{} + // triggerDeferErrorCh is used to provide a backchannel. By sending a + // deferErr, the sender can be notifed when the replication is done. + triggerDeferErrorCh chan *deferError + // currentTerm is the term of this leader, to be included in AppendEntries // requests. currentTerm uint64 + // nextIndex is the index of the next log entry to send to the follower, // which may fall past the end of the log. nextIndex uint64 @@ -134,13 +141,21 @@ RPC: r.replicateTo(s, maxIndex) } return + case deferErr := <-s.triggerDeferErrorCh: + lastLogIdx, _ := r.getLastLog() + shouldStop = r.replicateTo(s, lastLogIdx) + if !shouldStop { + deferErr.respond(nil) + } else { + deferErr.respond(fmt.Errorf("replication failed")) + } case <-s.triggerCh: lastLogIdx, _ := r.getLastLog() shouldStop = r.replicateTo(s, lastLogIdx) // This is _not_ our heartbeat mechanism but is to ensure - // followers quickly learn the leader's commit index when - // raft commits stop flowing naturally. The actual heartbeats - // can't do this to keep them unblocked by disk IO on the + // followers quickly learn the leader's commit index when + // raft commits stop flowing naturally. The actual heartbeats + // can't do this to keep them unblocked by disk IO on the // follower. See https://github.com/hashicorp/raft/issues/282. case <-randomTimeout(r.conf.CommitTimeout): lastLogIdx, _ := r.getLastLog() @@ -163,7 +178,7 @@ PIPELINE: // to standard mode on failure. if err := r.pipelineReplicate(s); err != nil { if err != ErrPipelineReplicationNotSupported { - r.logger.Printf("[ERR] raft: Failed to start pipeline replication to %s: %s", s.peer, err) + r.logger.Error(fmt.Sprintf("Failed to start pipeline replication to %s: %s", s.peer, err)) } } goto RPC @@ -187,7 +202,7 @@ START: } // Setup the request - if err := r.setupAppendEntries(s, &req, s.nextIndex, lastIndex); err == ErrLogNotFound { + if err := r.setupAppendEntries(s, &req, atomic.LoadUint64(&s.nextIndex), lastIndex); err == ErrLogNotFound { goto SEND_SNAP } else if err != nil { return @@ -196,7 +211,7 @@ START: // Make the RPC call start = time.Now() if err := r.trans.AppendEntries(s.peer.ID, s.peer.Address, &req, &resp); err != nil { - r.logger.Printf("[ERR] raft: Failed to AppendEntries to %v: %v", s.peer, err) + r.logger.Error(fmt.Sprintf("Failed to AppendEntries to %v: %v", s.peer, err)) s.failures++ return } @@ -220,13 +235,13 @@ START: s.failures = 0 s.allowPipeline = true } else { - s.nextIndex = max(min(s.nextIndex-1, resp.LastLog+1), 1) + atomic.StoreUint64(&s.nextIndex, max(min(s.nextIndex-1, resp.LastLog+1), 1)) if resp.NoRetryBackoff { s.failures = 0 } else { s.failures++ } - r.logger.Printf("[WARN] raft: AppendEntries to %v rejected, sending older logs (next: %d)", s.peer, s.nextIndex) + r.logger.Warn(fmt.Sprintf("AppendEntries to %v rejected, sending older logs (next: %d)", s.peer, atomic.LoadUint64(&s.nextIndex))) } CHECK_MORE: @@ -242,7 +257,7 @@ CHECK_MORE: } // Check if there are more logs to replicate - if s.nextIndex <= lastIndex { + if atomic.LoadUint64(&s.nextIndex) <= lastIndex { goto START } return @@ -253,7 +268,7 @@ SEND_SNAP: if stop, err := r.sendLatestSnapshot(s); stop { return true } else if err != nil { - r.logger.Printf("[ERR] raft: Failed to send snapshot to %v: %v", s.peer, err) + r.logger.Error(fmt.Sprintf("Failed to send snapshot to %v: %v", s.peer, err)) return } @@ -267,7 +282,7 @@ func (r *Raft) sendLatestSnapshot(s *followerReplication) (bool, error) { // Get the snapshots snapshots, err := r.snapshots.List() if err != nil { - r.logger.Printf("[ERR] raft: Failed to list snapshots: %v", err) + r.logger.Error(fmt.Sprintf("Failed to list snapshots: %v", err)) return false, err } @@ -280,7 +295,7 @@ func (r *Raft) sendLatestSnapshot(s *followerReplication) (bool, error) { snapID := snapshots[0].ID meta, snapshot, err := r.snapshots.Open(snapID) if err != nil { - r.logger.Printf("[ERR] raft: Failed to open snapshot %v: %v", snapID, err) + r.logger.Error(fmt.Sprintf("Failed to open snapshot %v: %v", snapID, err)) return false, err } defer snapshot.Close() @@ -303,7 +318,7 @@ func (r *Raft) sendLatestSnapshot(s *followerReplication) (bool, error) { start := time.Now() var resp InstallSnapshotResponse if err := r.trans.InstallSnapshot(s.peer.ID, s.peer.Address, &req, &resp, snapshot); err != nil { - r.logger.Printf("[ERR] raft: Failed to install snapshot %v: %v", snapID, err) + r.logger.Error(fmt.Sprintf("Failed to install snapshot %v: %v", snapID, err)) s.failures++ return false, err } @@ -321,7 +336,7 @@ func (r *Raft) sendLatestSnapshot(s *followerReplication) (bool, error) { // Check for success if resp.Success { // Update the indexes - s.nextIndex = meta.Index + 1 + atomic.StoreUint64(&s.nextIndex, meta.Index+1) s.commitment.match(s.peer.ID, meta.Index) // Clear any failures @@ -331,7 +346,7 @@ func (r *Raft) sendLatestSnapshot(s *followerReplication) (bool, error) { s.notifyAll(true) } else { s.failures++ - r.logger.Printf("[WARN] raft: InstallSnapshot to %v rejected", s.peer) + r.logger.Warn(fmt.Sprintf("InstallSnapshot to %v rejected", s.peer)) } return false, nil } @@ -358,7 +373,7 @@ func (r *Raft) heartbeat(s *followerReplication, stopCh chan struct{}) { start := time.Now() if err := r.trans.AppendEntries(s.peer.ID, s.peer.Address, &req, &resp); err != nil { - r.logger.Printf("[ERR] raft: Failed to heartbeat to %v: %v", s.peer.Address, err) + r.logger.Error(fmt.Sprintf("Failed to heartbeat to %v: %v", s.peer.Address, err)) failures++ select { case <-time.After(backoff(failureWait, failures, maxFailureScale)): @@ -386,8 +401,8 @@ func (r *Raft) pipelineReplicate(s *followerReplication) error { defer pipeline.Close() // Log start and stop of pipeline - r.logger.Printf("[INFO] raft: pipelining replication to peer %v", s.peer) - defer r.logger.Printf("[INFO] raft: aborting pipeline replication to peer %v", s.peer) + r.logger.Info(fmt.Sprintf("pipelining replication to peer %v", s.peer)) + defer r.logger.Info(fmt.Sprintf("aborting pipeline replication to peer %v", s.peer)) // Create a shutdown and finish channel stopCh := make(chan struct{}) @@ -397,7 +412,7 @@ func (r *Raft) pipelineReplicate(s *followerReplication) error { r.goFunc(func() { r.pipelineDecode(s, pipeline, stopCh, finishCh) }) // Start pipeline sends at the last good nextIndex - nextIndex := s.nextIndex + nextIndex := atomic.LoadUint64(&s.nextIndex) shouldStop := false SEND: @@ -411,6 +426,14 @@ SEND: r.pipelineSend(s, pipeline, &nextIndex, maxIndex) } break SEND + case deferErr := <-s.triggerDeferErrorCh: + lastLogIdx, _ := r.getLastLog() + shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx) + if !shouldStop { + deferErr.respond(nil) + } else { + deferErr.respond(fmt.Errorf("replication failed")) + } case <-s.triggerCh: lastLogIdx, _ := r.getLastLog() shouldStop = r.pipelineSend(s, pipeline, &nextIndex, lastLogIdx) @@ -440,14 +463,14 @@ func (r *Raft) pipelineSend(s *followerReplication, p AppendPipeline, nextIdx *u // Pipeline the append entries if _, err := p.AppendEntries(req, new(AppendEntriesResponse)); err != nil { - r.logger.Printf("[ERR] raft: Failed to pipeline AppendEntries to %v: %v", s.peer, err) + r.logger.Error(fmt.Sprintf("Failed to pipeline AppendEntries to %v: %v", s.peer, err)) return true } // Increase the next send log to avoid re-sending old logs if n := len(req.Entries); n > 0 { last := req.Entries[n-1] - *nextIdx = last.Index + 1 + atomic.StoreUint64(nextIdx, last.Index+1) } return false } @@ -516,8 +539,7 @@ func (r *Raft) setPreviousLog(req *AppendEntriesRequest, nextIndex uint64) error } else { var l Log if err := r.logs.GetLog(nextIndex-1, &l); err != nil { - r.logger.Printf("[ERR] raft: Failed to get log at index %d: %v", - nextIndex-1, err) + r.logger.Error(fmt.Sprintf("Failed to get log at index %d: %v", nextIndex-1, err)) return err } @@ -536,7 +558,7 @@ func (r *Raft) setNewLogs(req *AppendEntriesRequest, nextIndex, lastIndex uint64 for i := nextIndex; i <= maxIndex; i++ { oldLog := new(Log) if err := r.logs.GetLog(i, oldLog); err != nil { - r.logger.Printf("[ERR] raft: Failed to get log at index %d: %v", i, err) + r.logger.Error(fmt.Sprintf("Failed to get log at index %d: %v", i, err)) return err } req.Entries = append(req.Entries, oldLog) @@ -552,7 +574,7 @@ func appendStats(peer string, start time.Time, logs float32) { // handleStaleTerm is used when a follower indicates that we have a stale term. func (r *Raft) handleStaleTerm(s *followerReplication) { - r.logger.Printf("[ERR] raft: peer %v has newer term, stopping replication", s.peer) + r.logger.Error(fmt.Sprintf("peer %v has newer term, stopping replication", s.peer)) s.notifyAll(false) // No longer leader asyncNotifyCh(s.stepDown) } @@ -564,7 +586,7 @@ func updateLastAppended(s *followerReplication, req *AppendEntriesRequest) { // Mark any inflight logs as committed if logs := req.Entries; len(logs) > 0 { last := logs[len(logs)-1] - s.nextIndex = last.Index + 1 + atomic.StoreUint64(&s.nextIndex, last.Index+1) s.commitment.match(s.peer.ID, last.Index) } diff --git a/vendor/github.com/hashicorp/raft/snapshot.go b/vendor/github.com/hashicorp/raft/snapshot.go index 5287ebc41..2e0f77a5d 100644 --- a/vendor/github.com/hashicorp/raft/snapshot.go +++ b/vendor/github.com/hashicorp/raft/snapshot.go @@ -77,14 +77,14 @@ func (r *Raft) runSnapshots() { // Trigger a snapshot if _, err := r.takeSnapshot(); err != nil { - r.logger.Printf("[ERR] raft: Failed to take snapshot: %v", err) + r.logger.Error(fmt.Sprintf("Failed to take snapshot: %v", err)) } case future := <-r.userSnapshotCh: // User-triggered, run immediately id, err := r.takeSnapshot() if err != nil { - r.logger.Printf("[ERR] raft: Failed to take snapshot: %v", err) + r.logger.Error(fmt.Sprintf("Failed to take snapshot: %v", err)) } else { future.opener = func() (*SnapshotMeta, io.ReadCloser, error) { return r.snapshots.Open(id) @@ -107,7 +107,7 @@ func (r *Raft) shouldSnapshot() bool { // Check the last log index lastIdx, err := r.logs.LastIndex() if err != nil { - r.logger.Printf("[ERR] raft: Failed to get last log index: %v", err) + r.logger.Error(fmt.Sprintf("Failed to get last log index: %v", err)) return false } @@ -172,7 +172,7 @@ func (r *Raft) takeSnapshot() (string, error) { } // Create a new snapshot. - r.logger.Printf("[INFO] raft: Starting snapshot up to %d", snapReq.index) + r.logger.Info(fmt.Sprintf("Starting snapshot up to %d", snapReq.index)) start := time.Now() version := getSnapshotVersion(r.protocolVersion) sink, err := r.snapshots.Create(version, snapReq.index, snapReq.term, committed, committedIndex, r.trans) @@ -202,7 +202,7 @@ func (r *Raft) takeSnapshot() (string, error) { return "", err } - r.logger.Printf("[INFO] raft: Snapshot to %d complete", snapReq.index) + r.logger.Info(fmt.Sprintf("Snapshot to %d complete", snapReq.index)) return sink.ID(), nil } @@ -229,7 +229,7 @@ func (r *Raft) compactLogs(snapIdx uint64) error { maxLog := min(snapIdx, lastLogIdx-r.conf.TrailingLogs) // Log this - r.logger.Printf("[INFO] raft: Compacting logs from %d to %d", minLog, maxLog) + r.logger.Info(fmt.Sprintf("Compacting logs from %d to %d", minLog, maxLog)) // Compact the logs if err := r.logs.DeleteRange(minLog, maxLog); err != nil { diff --git a/vendor/github.com/hashicorp/raft/transport.go b/vendor/github.com/hashicorp/raft/transport.go index 85459b221..b18d24593 100644 --- a/vendor/github.com/hashicorp/raft/transport.go +++ b/vendor/github.com/hashicorp/raft/transport.go @@ -58,6 +58,9 @@ type Transport interface { // disk IO. If a Transport does not support this, it can simply // ignore the call, and push the heartbeat onto the Consumer channel. SetHeartbeatHandler(cb func(rpc RPC)) + + // TimeoutNow is used to start a leadership transfer to the target node. + TimeoutNow(id ServerID, target ServerAddress, args *TimeoutNowRequest, resp *TimeoutNowResponse) error } // WithClose is an interface that a transport may provide which diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go index 623d3d83f..c0d70b2fa 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -29,27 +29,72 @@ type Collector interface { // collected by this Collector to the provided channel and returns once // the last descriptor has been sent. The sent descriptors fulfill the // consistency and uniqueness requirements described in the Desc - // documentation. (It is valid if one and the same Collector sends - // duplicate descriptors. Those duplicates are simply ignored. However, - // two different Collectors must not send duplicate descriptors.) This - // method idempotently sends the same descriptors throughout the - // lifetime of the Collector. If a Collector encounters an error while - // executing this method, it must send an invalid descriptor (created - // with NewInvalidDesc) to signal the error to the registry. + // documentation. + // + // It is valid if one and the same Collector sends duplicate + // descriptors. Those duplicates are simply ignored. However, two + // different Collectors must not send duplicate descriptors. + // + // Sending no descriptor at all marks the Collector as “unchecked”, + // i.e. no checks will be performed at registration time, and the + // Collector may yield any Metric it sees fit in its Collect method. + // + // This method idempotently sends the same descriptors throughout the + // lifetime of the Collector. It may be called concurrently and + // therefore must be implemented in a concurrency safe way. + // + // If a Collector encounters an error while executing this method, it + // must send an invalid descriptor (created with NewInvalidDesc) to + // signal the error to the registry. Describe(chan<- *Desc) // Collect is called by the Prometheus registry when collecting // metrics. The implementation sends each collected metric via the // provided channel and returns once the last metric has been sent. The - // descriptor of each sent metric is one of those returned by - // Describe. Returned metrics that share the same descriptor must differ - // in their variable label values. This method may be called - // concurrently and must therefore be implemented in a concurrency safe - // way. Blocking occurs at the expense of total performance of rendering - // all registered metrics. Ideally, Collector implementations support - // concurrent readers. + // descriptor of each sent metric is one of those returned by Describe + // (unless the Collector is unchecked, see above). Returned metrics that + // share the same descriptor must differ in their variable label + // values. + // + // This method may be called concurrently and must therefore be + // implemented in a concurrency safe way. Blocking occurs at the expense + // of total performance of rendering all registered metrics. Ideally, + // Collector implementations support concurrent readers. Collect(chan<- Metric) } +// DescribeByCollect is a helper to implement the Describe method of a custom +// Collector. It collects the metrics from the provided Collector and sends +// their descriptors to the provided channel. +// +// If a Collector collects the same metrics throughout its lifetime, its +// Describe method can simply be implemented as: +// +// func (c customCollector) Describe(ch chan<- *Desc) { +// DescribeByCollect(c, ch) +// } +// +// However, this will not work if the metrics collected change dynamically over +// the lifetime of the Collector in a way that their combined set of descriptors +// changes as well. The shortcut implementation will then violate the contract +// of the Describe method. If a Collector sometimes collects no metrics at all +// (for example vectors like CounterVec, GaugeVec, etc., which only collect +// metrics after a metric with a fully specified label set has been accessed), +// it might even get registered as an unchecked Collecter (cf. the Register +// method of the Registerer interface). Hence, only use this shortcut +// implementation of Describe if you are certain to fulfill the contract. +// +// The Collector example demonstrates a use of DescribeByCollect. +func DescribeByCollect(c Collector, descs chan<- *Desc) { + metrics := make(chan Metric) + go func() { + c.Collect(metrics) + close(metrics) + }() + for m := range metrics { + descs <- m.Desc() + } +} + // selfCollector implements Collector for a single Metric so that the Metric // collects itself. Add it as an anonymous field to a struct that implements // Metric, and call init with the Metric itself as an argument. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index 765e4550c..d463e36d3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -136,7 +136,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { return &CounterVec{ metricVec: newMetricVec(desc, func(lvs ...string) Metric { if len(lvs) != len(desc.variableLabels) { - panic(errInconsistentCardinality) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) } result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 4a755b0fa..1d034f871 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -67,7 +67,7 @@ type Desc struct { // NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc // and will be reported on registration time. variableLabels and constLabels can -// be nil if no such labels should be set. fqName and help must not be empty. +// be nil if no such labels should be set. fqName must not be empty. // // variableLabels only contain the label names. Their label values are variable // and therefore not part of the Desc. (They are managed within the Metric.) @@ -80,10 +80,6 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * help: help, variableLabels: variableLabels, } - if help == "" { - d.err = errors.New("empty help string") - return d - } if !model.IsValidMetricName(model.LabelValue(fqName)) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d @@ -97,7 +93,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // First add only the const label names and sort them... for labelName := range constLabels { if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) return d } labelNames = append(labelNames, labelName) @@ -119,7 +115,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // dimension with a different mix between preset and variable labels. for _, labelName := range variableLabels { if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) return d } labelNames = append(labelNames, "$"+labelName) @@ -156,7 +152,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * Value: proto.String(v), }) } - sort.Sort(LabelPairSorter(d.constLabelPairs)) + sort.Sort(labelPairSorter(d.constLabelPairs)) return d } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 83c3657d7..5d9525def 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -121,7 +121,17 @@ // NewConstSummary (and their respective Must… versions). That will happen in // the Collect method. The Describe method has to return separate Desc // instances, representative of the “throw-away” metrics to be created later. -// NewDesc comes in handy to create those Desc instances. +// NewDesc comes in handy to create those Desc instances. Alternatively, you +// could return no Desc at all, which will marke the Collector “unchecked”. No +// checks are porformed at registration time, but metric consistency will still +// be ensured at scrape time, i.e. any inconsistencies will lead to scrape +// errors. Thus, with unchecked Collectors, the responsibility to not collect +// metrics that lead to inconsistencies in the total scrape result lies with the +// implementer of the Collector. While this is not a desirable state, it is +// sometimes necessary. The typical use case is a situatios where the exact +// metrics to be returned by a Collector cannot be predicted at registration +// time, but the implementer has sufficient knowledge of the whole system to +// guarantee metric consistency. // // The Collector example illustrates the use case. You can also look at the // source code of the processCollector (mirroring process metrics), the diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go index e3b67df8a..3d383a735 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package prometheus // Inline and byte-free variant of hash/fnv's fnv64a. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index 17c72d7eb..71d406bd9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -147,7 +147,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { return &GaugeVec{ metricVec: newMetricVec(desc, func(lvs ...string) Metric { if len(lvs) != len(desc.variableLabels) { - panic(errInconsistentCardinality) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) } result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index 096454af9..ba3b9333e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package prometheus import ( @@ -17,8 +30,12 @@ type goCollector struct { metrics memStatsMetrics } -// NewGoCollector returns a collector which exports metrics about the current -// go process. +// NewGoCollector returns a collector which exports metrics about the current Go +// process. This includes memory stats. To collect those, runtime.ReadMemStats +// is called. This causes a stop-the-world, which is very short with Go1.9+ +// (~25µs). However, with older Go versions, the stop-the-world duration depends +// on the heap size and can be quite significant (~1.7 ms/GiB as per +// https://go-review.googlesource.com/c/go/+/34937). func NewGoCollector() Collector { return &goCollector{ goroutinesDesc: NewDesc( @@ -265,7 +282,7 @@ func (c *goCollector) Collect(ch chan<- Metric) { quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() } quantiles[0.0] = stats.PauseQuantiles[0].Seconds() - ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 331783a75..f88da707b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -16,7 +16,9 @@ package prometheus import ( "fmt" "math" + "runtime" "sort" + "sync" "sync/atomic" "github.com/golang/protobuf/proto" @@ -108,8 +110,9 @@ func ExponentialBuckets(start, factor float64, count int) []float64 { } // HistogramOpts bundles the options for creating a Histogram metric. It is -// mandatory to set Name and Help to a non-empty string. All other fields are -// optional and can safely be left at their zero value. +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. type HistogramOpts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Histogram (created by joining these components with @@ -120,7 +123,7 @@ type HistogramOpts struct { Subsystem string Name string - // Help provides information about this Histogram. Mandatory! + // Help provides information about this Histogram. // // Metrics with the same fully-qualified name must have the same Help // string. @@ -162,7 +165,7 @@ func NewHistogram(opts HistogramOpts) Histogram { func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { if len(desc.variableLabels) != len(labelValues) { - panic(errInconsistentCardinality) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) } for _, n := range desc.variableLabels { @@ -184,6 +187,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr desc: desc, upperBounds: opts.Buckets, labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}}, } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -200,28 +204,53 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } } - // Finally we know the final length of h.upperBounds and can make counts. - h.counts = make([]uint64, len(h.upperBounds)) + // Finally we know the final length of h.upperBounds and can make counts + // for both states: + h.counts[0].buckets = make([]uint64, len(h.upperBounds)) + h.counts[1].buckets = make([]uint64, len(h.upperBounds)) h.init(h) // Init self-collection. return h } -type histogram struct { +type histogramCounts struct { // sumBits contains the bits of the float64 representing the sum of all // observations. sumBits and count have to go first in the struct to // guarantee alignment for atomic operations. // http://golang.org/pkg/sync/atomic/#pkg-note-BUG sumBits uint64 count uint64 + buckets []uint64 +} + +type histogram struct { + // countAndHotIdx is a complicated one. For lock-free yet atomic + // observations, we need to save the total count of observations again, + // combined with the index of the currently-hot counts struct, so that + // we can perform the operation on both values atomically. The least + // significant bit defines the hot counts struct. The remaining 63 bits + // represent the total count of observations. This happens under the + // assumption that the 63bit count will never overflow. Rationale: An + // observations takes about 30ns. Let's assume it could happen in + // 10ns. Overflowing the counter will then take at least (2^63)*10ns, + // which is about 3000 years. + // + // This has to be first in the struct for 64bit alignment. See + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 selfCollector - // Note that there is no mutex required. - - desc *Desc + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. upperBounds []float64 - counts []uint64 + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*histogramCounts + hotIdx int // Index of currently-hot counts. Only used within Write. labelPairs []*dto.LabelPair } @@ -241,36 +270,113 @@ func (h *histogram) Observe(v float64) { // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op // 300 buckets: 154 ns/op linear - binary 61.6 ns/op i := sort.SearchFloat64s(h.upperBounds, v) - if i < len(h.counts) { - atomic.AddUint64(&h.counts[i], 1) + + // We increment h.countAndHotIdx by 2 so that the counter in the upper + // 63 bits gets incremented by 1. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 2) + hotCounts := h.counts[n%2] + + if i < len(h.upperBounds) { + atomic.AddUint64(&hotCounts.buckets[i], 1) } - atomic.AddUint64(&h.count, 1) for { - oldBits := atomic.LoadUint64(&h.sumBits) + oldBits := atomic.LoadUint64(&hotCounts.sumBits) newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { break } } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) } func (h *histogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} - buckets := make([]*dto.Bucket, len(h.upperBounds)) + var ( + his = &dto.Histogram{} + buckets = make([]*dto.Bucket, len(h.upperBounds)) + hotCounts, coldCounts *histogramCounts + count uint64 + ) - his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) - his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) - var count uint64 + // For simplicity, we mutex the rest of this method. It is not in the + // hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it. + h.writeMtx.Lock() + defer h.writeMtx.Unlock() + + // This is a bit arcane, which is why the following spells out this if + // clause in English: + // + // If the currently-hot counts struct is #0, we atomically increment + // h.countAndHotIdx by 1 so that from now on Observe will use the counts + // struct #1. Furthermore, the atomic increment gives us the new value, + // which, in its most significant 63 bits, tells us the count of + // observations done so far up to and including currently ongoing + // observations still using the counts struct just changed from hot to + // cold. To have a normal uint64 for the count, we bitshift by 1 and + // save the result in count. We also set h.hotIdx to 1 for the next + // Write call, and we will refer to counts #1 as hotCounts and to counts + // #0 as coldCounts. + // + // If the currently-hot counts struct is #1, we do the corresponding + // things the other way round. We have to _decrement_ h.countAndHotIdx + // (which is a bit arcane in itself, as we have to express -1 with an + // unsigned int...). + if h.hotIdx == 0 { + count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1 + h.hotIdx = 1 + hotCounts = h.counts[1] + coldCounts = h.counts[0] + } else { + count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement. + h.hotIdx = 0 + hotCounts = h.counts[0] + coldCounts = h.counts[1] + } + + // Now we have to wait for the now-declared-cold counts to actually cool + // down, i.e. wait for all observations still using it to finish. That's + // the case once the count in the cold counts struct is the same as the + // one atomically retrieved from the upper 63bits of h.countAndHotIdx. + for { + if count == atomic.LoadUint64(&coldCounts.count) { + break + } + runtime.Gosched() // Let observations get work done. + } + + his.SampleCount = proto.Uint64(count) + his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))) + var cumCount uint64 for i, upperBound := range h.upperBounds { - count += atomic.LoadUint64(&h.counts[i]) + cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) buckets[i] = &dto.Bucket{ - CumulativeCount: proto.Uint64(count), + CumulativeCount: proto.Uint64(cumCount), UpperBound: proto.Float64(upperBound), } } + his.Bucket = buckets out.Histogram = his out.Label = h.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + for i := range h.upperBounds { + atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) + atomic.StoreUint64(&coldCounts.buckets[i], 0) + } return nil } @@ -454,7 +560,7 @@ func (h *constHistogram) Write(out *dto.Metric) error { // bucket. // // NewConstHistogram returns an error if the length of labelValues is not -// consistent with the variable labels in Desc. +// consistent with the variable labels in Desc or if Desc is invalid. func NewConstHistogram( desc *Desc, count uint64, @@ -462,6 +568,9 @@ func NewConstHistogram( buckets map[float64]uint64, labelValues ...string, ) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go index dd0f8197f..9f0875bfc 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -15,9 +15,7 @@ package prometheus import ( "bufio" - "bytes" "compress/gzip" - "fmt" "io" "net" "net/http" @@ -41,19 +39,10 @@ const ( acceptEncodingHeader = "Accept-Encoding" ) -var bufPool sync.Pool - -func getBuf() *bytes.Buffer { - buf := bufPool.Get() - if buf == nil { - return &bytes.Buffer{} - } - return buf.(*bytes.Buffer) -} - -func giveBuf(buf *bytes.Buffer) { - buf.Reset() - bufPool.Put(buf) +var gzipPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, } // Handler returns an HTTP handler for the DefaultGatherer. It is @@ -61,68 +50,50 @@ func giveBuf(buf *bytes.Buffer) { // name). // // Deprecated: Please note the issues described in the doc comment of -// InstrumentHandler. You might want to consider using -// promhttp.InstrumentedHandler instead. +// InstrumentHandler. You might want to consider using promhttp.Handler instead. func Handler() http.Handler { return InstrumentHandler("prometheus", UninstrumentedHandler()) } // UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. // -// Deprecated: Use promhttp.Handler instead. See there for further documentation. +// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{}) +// instead. See there for further documentation. func UninstrumentedHandler() http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + return http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { mfs, err := DefaultGatherer.Gather() if err != nil { - http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) + httpError(rsp, err) return } contentType := expfmt.Negotiate(req.Header) - buf := getBuf() - defer giveBuf(buf) - writer, encoding := decorateWriter(req, buf) - enc := expfmt.NewEncoder(writer, contentType) - var lastErr error + header := rsp.Header() + header.Set(contentTypeHeader, string(contentType)) + + w := io.Writer(rsp) + if gzipAccepted(req.Header) { + header.Set(contentEncodingHeader, "gzip") + gz := gzipPool.Get().(*gzip.Writer) + defer gzipPool.Put(gz) + + gz.Reset(w) + defer gz.Close() + + w = gz + } + + enc := expfmt.NewEncoder(w, contentType) + for _, mf := range mfs { if err := enc.Encode(mf); err != nil { - lastErr = err - http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + httpError(rsp, err) return } } - if closer, ok := writer.(io.Closer); ok { - closer.Close() - } - if lastErr != nil && buf.Len() == 0 { - http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError) - return - } - header := w.Header() - header.Set(contentTypeHeader, string(contentType)) - header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) - if encoding != "" { - header.Set(contentEncodingHeader, encoding) - } - w.Write(buf.Bytes()) }) } -// decorateWriter wraps a writer to handle gzip compression if requested. It -// returns the decorated writer and the appropriate "Content-Encoding" header -// (which is empty if no compression is enabled). -func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { - header := request.Header.Get(acceptEncodingHeader) - parts := strings.Split(header, ",") - for _, part := range parts { - part := strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return gzip.NewWriter(writer), "gzip" - } - } - return writer, "" -} - var instLabels = []string{"method", "code"} type nower interface { @@ -139,16 +110,6 @@ var now nower = nowFunc(func() time.Time { return time.Now() }) -func nowSeries(t ...time.Time) nower { - return nowFunc(func() time.Time { - defer func() { - t = t[1:] - }() - - return t[0] - }) -} - // InstrumentHandler wraps the given HTTP handler for instrumentation. It // registers four metric collectors (if not already done) and reports HTTP // metrics to the (newly or already) registered collectors: http_requests_total @@ -159,21 +120,14 @@ func nowSeries(t ...time.Time) nower { // (label name "method") and HTTP status code (label name "code"). // // Deprecated: InstrumentHandler has several issues. Use the tooling provided in -// package promhttp instead. The issues are the following: -// -// - It uses Summaries rather than Histograms. Summaries are not useful if -// aggregation across multiple instances is required. -// -// - It uses microseconds as unit, which is deprecated and should be replaced by -// seconds. -// -// - The size of the request is calculated in a separate goroutine. Since this -// calculator requires access to the request header, it creates a race with -// any writes to the header performed during request handling. -// httputil.ReverseProxy is a prominent example for a handler -// performing such writes. -// -// - It has additional issues with HTTP/2, cf. +// package promhttp instead. The issues are the following: (1) It uses Summaries +// rather than Histograms. Summaries are not useful if aggregation across +// multiple instances is required. (2) It uses microseconds as unit, which is +// deprecated and should be replaced by seconds. (3) The size of the request is +// calculated in a separate goroutine. Since this calculator requires access to +// the request header, it creates a race with any writes to the header performed +// during request handling. httputil.ReverseProxy is a prominent example for a +// handler performing such writes. (4) It has additional issues with HTTP/2, cf. // https://github.com/prometheus/client_golang/issues/272. func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) @@ -317,7 +271,7 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo } func computeApproximateRequestSize(r *http.Request) <-chan int { - // Get URL length in current go routine for avoiding a race condition. + // Get URL length in current goroutine for avoiding a race condition. // HandlerFunc that runs in parallel may modify the URL. s := 0 if r.URL != nil { @@ -352,10 +306,9 @@ func computeApproximateRequestSize(r *http.Request) <-chan int { type responseWriterDelegator struct { http.ResponseWriter - handler, method string - status int - written int64 - wroteHeader bool + status int + written int64 + wroteHeader bool } func (r *responseWriterDelegator) WriteHeader(code int) { @@ -521,3 +474,31 @@ func sanitizeCode(s int) string { return strconv.Itoa(s) } } + +// gzipAccepted returns whether the client will accept gzip-encoded content. +func gzipAccepted(header http.Header) bool { + a := header.Get(acceptEncodingHeader) + parts := strings.Split(a, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return true + } + } + return false +} + +// httpError removes any content-encoding header and then calls http.Error with +// the provided error and http.StatusInternalServerErrer. Error contents is +// supposed to be uncompressed plain text. However, same as with a plain +// http.Error, any header settings will be void if the header has already been +// sent. The error message will still be written to the writer, but it will +// probably be of limited use. +func httpError(rsp http.ResponseWriter, err error) { + rsp.Header().Del(contentEncodingHeader) + http.Error( + rsp, + "An error has occurred while serving metrics:\n\n"+err.Error(), + http.StatusInternalServerError, + ) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go new file mode 100644 index 000000000..351c26e1a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go @@ -0,0 +1,85 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "sort" + + dto "github.com/prometheus/client_model/go" +) + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// NormalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index 2502e3734..2744443ac 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package prometheus import ( @@ -24,9 +37,22 @@ const reservedLabelPrefix = "__" var errInconsistentCardinality = errors.New("inconsistent label cardinality") +func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { + return fmt.Errorf( + "%s: %q has %d variable labels named %q but %d values %q were provided", + errInconsistentCardinality, fqName, + len(labels), labels, + len(labelValues), labelValues, + ) +} + func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { if len(labels) != expectedNumberOfValues { - return errInconsistentCardinality + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(labels), labels, + ) } for name, val := range labels { @@ -40,7 +66,11 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { func validateLabelValues(vals []string, expectedNumberOfValues int) error { if len(vals) != expectedNumberOfValues { - return errInconsistentCardinality + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(vals), vals, + ) } for _, val := range vals { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 6213ee812..55e6d86d5 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -15,6 +15,9 @@ package prometheus import ( "strings" + "time" + + "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" ) @@ -43,9 +46,8 @@ type Metric interface { // While populating dto.Metric, it is the responsibility of the // implementation to ensure validity of the Metric protobuf (like valid // UTF-8 strings or syntactically valid metric and label names). It is - // recommended to sort labels lexicographically. (Implementers may find - // LabelPairSorter useful for that.) Callers of Write should still make - // sure of sorting if they depend on it. + // recommended to sort labels lexicographically. Callers of Write should + // still make sure of sorting if they depend on it. Write(*dto.Metric) error // TODO(beorn7): The original rationale of passing in a pre-allocated // dto.Metric protobuf to save allocations has disappeared. The @@ -57,8 +59,9 @@ type Metric interface { // implementation XXX has its own XXXOpts type, but in most cases, it is just be // an alias of this type (which might change when the requirement arises.) // -// It is mandatory to set Name and Help to a non-empty string. All other fields -// are optional and can safely be left at their zero value. +// It is mandatory to set Name to a non-empty string. All other fields are +// optional and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. type Opts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Metric (created by joining these components with @@ -69,7 +72,7 @@ type Opts struct { Subsystem string Name string - // Help provides information about this metric. Mandatory! + // Help provides information about this metric. // // Metrics with the same fully-qualified name must have the same Help // string. @@ -110,37 +113,22 @@ func BuildFQName(namespace, subsystem, name string) string { return name } -// LabelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. This is useful for implementing the Write method of -// custom metrics. -type LabelPairSorter []*dto.LabelPair +// labelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. +type labelPairSorter []*dto.LabelPair -func (s LabelPairSorter) Len() int { +func (s labelPairSorter) Len() int { return len(s) } -func (s LabelPairSorter) Swap(i, j int) { +func (s labelPairSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s LabelPairSorter) Less(i, j int) bool { +func (s labelPairSorter) Less(i, j int) bool { return s[i].GetName() < s[j].GetName() } -type hashSorter []uint64 - -func (s hashSorter) Len() int { - return len(s) -} - -func (s hashSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s hashSorter) Less(i, j int) bool { - return s[i] < s[j] -} - type invalidMetric struct { desc *Desc err error @@ -156,3 +144,31 @@ func NewInvalidMetric(desc *Desc, err error) Metric { func (m *invalidMetric) Desc() *Desc { return m.desc } func (m *invalidMetric) Write(*dto.Metric) error { return m.err } + +type timestampedMetric struct { + Metric + t time.Time +} + +func (m timestampedMetric) Write(pb *dto.Metric) error { + e := m.Metric.Write(pb) + pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) + return e +} + +// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a +// way that it has an explicit timestamp set to the provided Time. This is only +// useful in rare cases as the timestamp of a Prometheus metric should usually +// be set by the Prometheus server during scraping. Exceptions include mirroring +// metrics with given timestamps from other metric +// sources. +// +// NewMetricWithTimestamp works best with MustNewConstMetric, +// MustNewConstHistogram, and MustNewConstSummary, see example. +// +// Currently, the exposition formats used by Prometheus are limited to +// millisecond resolution. Thus, the provided time will be rounded down to the +// next full millisecond value. +func NewMetricWithTimestamp(t time.Time, m Metric) Metric { + return timestampedMetric{Metric: m, t: t} +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 32ac74a7f..55176d58c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -13,46 +13,74 @@ package prometheus -import "github.com/prometheus/procfs" +import ( + "errors" + "os" + + "github.com/prometheus/procfs" +) type processCollector struct { - pid int collectFn func(chan<- Metric) pidFn func() (int, error) + reportErrors bool cpuTotal *Desc openFDs, maxFDs *Desc - vsize, rss *Desc + vsize, maxVsize *Desc + rss *Desc startTime *Desc } +// ProcessCollectorOpts defines the behavior of a process metrics collector +// created with NewProcessCollector. +type ProcessCollectorOpts struct { + // PidFn returns the PID of the process the collector collects metrics + // for. It is called upon each collection. By default, the PID of the + // current process is used, as determined on construction time by + // calling os.Getpid(). + PidFn func() (int, error) + // If non-empty, each of the collected metrics is prefixed by the + // provided string and an underscore ("_"). + Namespace string + // If true, any error encountered during collection is reported as an + // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored + // and the collected metrics will be incomplete. (Possibly, no metrics + // will be collected at all.) While that's usually not desired, it is + // appropriate for the common "mix-in" of process metrics, where process + // metrics are nice to have, but failing to collect them should not + // disrupt the collection of the remaining metrics. + ReportErrors bool +} + // NewProcessCollector returns a collector which exports the current state of // process metrics including CPU, memory and file descriptor usage as well as -// the process start time for the given process ID under the given namespace. +// the process start time. The detailed behavior is defined by the provided +// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a +// collector for the current process with an empty namespace string and no error +// reporting. // // Currently, the collector depends on a Linux-style proc filesystem and // therefore only exports metrics for Linux. -func NewProcessCollector(pid int, namespace string) Collector { - return NewProcessCollectorPIDFn( - func() (int, error) { return pid, nil }, - namespace, - ) -} - -// NewProcessCollectorPIDFn works like NewProcessCollector but the process ID is -// determined on each collect anew by calling the given pidFn function. -func NewProcessCollectorPIDFn( - pidFn func() (int, error), - namespace string, -) Collector { +// +// Note: An older version of this function had the following signature: +// +// NewProcessCollector(pid int, namespace string) Collector +// +// Most commonly, it was called as +// +// NewProcessCollector(os.Getpid(), "") +// +// The following call of the current version is equivalent to the above: +// +// NewProcessCollector(ProcessCollectorOpts{}) +func NewProcessCollector(opts ProcessCollectorOpts) Collector { ns := "" - if len(namespace) > 0 { - ns = namespace + "_" + if len(opts.Namespace) > 0 { + ns = opts.Namespace + "_" } - c := processCollector{ - pidFn: pidFn, - collectFn: func(chan<- Metric) {}, - + c := &processCollector{ + reportErrors: opts.ReportErrors, cpuTotal: NewDesc( ns+"process_cpu_seconds_total", "Total user and system CPU time spent in seconds.", @@ -73,6 +101,11 @@ func NewProcessCollectorPIDFn( "Virtual memory size in bytes.", nil, nil, ), + maxVsize: NewDesc( + ns+"process_virtual_memory_max_bytes", + "Maximum amount of virtual memory available in bytes.", + nil, nil, + ), rss: NewDesc( ns+"process_resident_memory_bytes", "Resident memory size in bytes.", @@ -85,12 +118,23 @@ func NewProcessCollectorPIDFn( ), } + if opts.PidFn == nil { + pid := os.Getpid() + c.pidFn = func() (int, error) { return pid, nil } + } else { + c.pidFn = opts.PidFn + } + // Set up process metric collection if supported by the runtime. if _, err := procfs.NewStat(); err == nil { c.collectFn = c.processCollect + } else { + c.collectFn = func(ch chan<- Metric) { + c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) + } } - return &c + return c } // Describe returns all descriptions of the collector. @@ -99,6 +143,7 @@ func (c *processCollector) Describe(ch chan<- *Desc) { ch <- c.openFDs ch <- c.maxFDs ch <- c.vsize + ch <- c.maxVsize ch <- c.rss ch <- c.startTime } @@ -108,16 +153,16 @@ func (c *processCollector) Collect(ch chan<- Metric) { c.collectFn(ch) } -// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the -// client allows users to configure the error behavior. func (c *processCollector) processCollect(ch chan<- Metric) { pid, err := c.pidFn() if err != nil { + c.reportError(ch, nil, err) return } p, err := procfs.NewProc(pid) if err != nil { + c.reportError(ch, nil, err) return } @@ -127,14 +172,33 @@ func (c *processCollector) processCollect(ch chan<- Metric) { ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) if startTime, err := stat.StartTime(); err == nil { ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + c.reportError(ch, c.startTime, err) } + } else { + c.reportError(ch, nil, err) } if fds, err := p.FileDescriptorsLen(); err == nil { ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } else { + c.reportError(ch, c.openFDs, err) } if limits, err := p.NewLimits(); err == nil { ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) + } else { + c.reportError(ch, nil, err) } } + +func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { + if !c.reportErrors { + return + } + if desc == nil { + desc = NewInvalidDesc(err) + } + ch <- NewInvalidMetric(desc, err) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 9c1c66dcc..67b56d37c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -76,16 +76,16 @@ type flusherDelegator struct{ *responseWriterDelegator } type hijackerDelegator struct{ *responseWriterDelegator } type readerFromDelegator struct{ *responseWriterDelegator } -func (d *closeNotifierDelegator) CloseNotify() <-chan bool { +func (d closeNotifierDelegator) CloseNotify() <-chan bool { return d.ResponseWriter.(http.CloseNotifier).CloseNotify() } -func (d *flusherDelegator) Flush() { +func (d flusherDelegator) Flush() { d.ResponseWriter.(http.Flusher).Flush() } -func (d *hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { +func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { return d.ResponseWriter.(http.Hijacker).Hijack() } -func (d *readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { +func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { if !d.wroteHeader { d.WriteHeader(http.StatusOK) } @@ -102,34 +102,34 @@ func init() { return d } pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 - return &closeNotifierDelegator{d} + return closeNotifierDelegator{d} } pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 - return &flusherDelegator{d} + return flusherDelegator{d} } pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 return struct { *responseWriterDelegator http.Flusher http.CloseNotifier - }{d, &flusherDelegator{d}, &closeNotifierDelegator{d}} + }{d, flusherDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 - return &hijackerDelegator{d} + return hijackerDelegator{d} } pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 return struct { *responseWriterDelegator http.Hijacker http.CloseNotifier - }{d, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 return struct { *responseWriterDelegator http.Hijacker http.Flusher - }{d, &hijackerDelegator{d}, &flusherDelegator{d}} + }{d, hijackerDelegator{d}, flusherDelegator{d}} } pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 return struct { @@ -137,7 +137,7 @@ func init() { http.Hijacker http.Flusher http.CloseNotifier - }{d, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 return readerFromDelegator{d} @@ -147,14 +147,14 @@ func init() { *responseWriterDelegator io.ReaderFrom http.CloseNotifier - }{d, &readerFromDelegator{d}, &closeNotifierDelegator{d}} + }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 return struct { *responseWriterDelegator io.ReaderFrom http.Flusher - }{d, &readerFromDelegator{d}, &flusherDelegator{d}} + }{d, readerFromDelegator{d}, flusherDelegator{d}} } pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 return struct { @@ -162,14 +162,14 @@ func init() { io.ReaderFrom http.Flusher http.CloseNotifier - }{d, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 return struct { *responseWriterDelegator io.ReaderFrom http.Hijacker - }{d, &readerFromDelegator{d}, &hijackerDelegator{d}} + }{d, readerFromDelegator{d}, hijackerDelegator{d}} } pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 return struct { @@ -177,7 +177,7 @@ func init() { io.ReaderFrom http.Hijacker http.CloseNotifier - }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 return struct { @@ -185,7 +185,7 @@ func init() { io.ReaderFrom http.Hijacker http.Flusher - }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} } pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 return struct { @@ -194,6 +194,6 @@ func init() { http.Hijacker http.Flusher http.CloseNotifier - }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go index 75a905e2f..31a706956 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go @@ -22,27 +22,27 @@ import ( type pusherDelegator struct{ *responseWriterDelegator } -func (d *pusherDelegator) Push(target string, opts *http.PushOptions) error { +func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { return d.ResponseWriter.(http.Pusher).Push(target, opts) } func init() { pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 - return &pusherDelegator{d} + return pusherDelegator{d} } pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 return struct { *responseWriterDelegator http.Pusher http.CloseNotifier - }{d, &pusherDelegator{d}, &closeNotifierDelegator{d}} + }{d, pusherDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 return struct { *responseWriterDelegator http.Pusher http.Flusher - }{d, &pusherDelegator{d}, &flusherDelegator{d}} + }{d, pusherDelegator{d}, flusherDelegator{d}} } pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 return struct { @@ -50,14 +50,14 @@ func init() { http.Pusher http.Flusher http.CloseNotifier - }{d, &pusherDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 return struct { *responseWriterDelegator http.Pusher http.Hijacker - }{d, &pusherDelegator{d}, &hijackerDelegator{d}} + }{d, pusherDelegator{d}, hijackerDelegator{d}} } pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 return struct { @@ -65,7 +65,7 @@ func init() { http.Pusher http.Hijacker http.CloseNotifier - }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 return struct { @@ -73,7 +73,7 @@ func init() { http.Pusher http.Hijacker http.Flusher - }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} } pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 return struct { @@ -82,14 +82,14 @@ func init() { http.Hijacker http.Flusher http.CloseNotifier - }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 return struct { *responseWriterDelegator http.Pusher io.ReaderFrom - }{d, &pusherDelegator{d}, &readerFromDelegator{d}} + }{d, pusherDelegator{d}, readerFromDelegator{d}} } pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 return struct { @@ -97,7 +97,7 @@ func init() { http.Pusher io.ReaderFrom http.CloseNotifier - }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &closeNotifierDelegator{d}} + }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 return struct { @@ -105,7 +105,7 @@ func init() { http.Pusher io.ReaderFrom http.Flusher - }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}} + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} } pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 return struct { @@ -114,7 +114,7 @@ func init() { io.ReaderFrom http.Flusher http.CloseNotifier - }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 return struct { @@ -122,7 +122,7 @@ func init() { http.Pusher io.ReaderFrom http.Hijacker - }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}} + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} } pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 return struct { @@ -131,7 +131,7 @@ func init() { io.ReaderFrom http.Hijacker http.CloseNotifier - }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 return struct { @@ -140,7 +140,7 @@ func init() { io.ReaderFrom http.Hijacker http.Flusher - }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} } pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 return struct { @@ -150,7 +150,7 @@ func init() { http.Hijacker http.Flusher http.CloseNotifier - }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 8dc260355..668eb6b3c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -32,7 +32,6 @@ package promhttp import ( - "bytes" "compress/gzip" "fmt" "io" @@ -53,19 +52,10 @@ const ( acceptEncodingHeader = "Accept-Encoding" ) -var bufPool sync.Pool - -func getBuf() *bytes.Buffer { - buf := bufPool.Get() - if buf == nil { - return &bytes.Buffer{} - } - return buf.(*bytes.Buffer) -} - -func giveBuf(buf *bytes.Buffer) { - buf.Reset() - bufPool.Put(buf) +var gzipPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, } // Handler returns an http.Handler for the prometheus.DefaultGatherer, using @@ -100,19 +90,18 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) } - h := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if inFlightSem != nil { select { case inFlightSem <- struct{}{}: // All good, carry on. defer func() { <-inFlightSem }() default: - http.Error(w, fmt.Sprintf( + http.Error(rsp, fmt.Sprintf( "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, ), http.StatusServiceUnavailable) return } } - mfs, err := reg.Gather() if err != nil { if opts.ErrorLog != nil { @@ -123,26 +112,40 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { panic(err) case ContinueOnError: if len(mfs) == 0 { - http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError) + // Still report the error if no metrics have been gathered. + httpError(rsp, err) return } case HTTPErrorOnError: - http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError) + httpError(rsp, err) return } } contentType := expfmt.Negotiate(req.Header) - buf := getBuf() - defer giveBuf(buf) - writer, encoding := decorateWriter(req, buf, opts.DisableCompression) - enc := expfmt.NewEncoder(writer, contentType) + header := rsp.Header() + header.Set(contentTypeHeader, string(contentType)) + + w := io.Writer(rsp) + if !opts.DisableCompression && gzipAccepted(req.Header) { + header.Set(contentEncodingHeader, "gzip") + gz := gzipPool.Get().(*gzip.Writer) + defer gzipPool.Put(gz) + + gz.Reset(w) + defer gz.Close() + + w = gz + } + + enc := expfmt.NewEncoder(w, contentType) + var lastErr error for _, mf := range mfs { if err := enc.Encode(mf); err != nil { lastErr = err if opts.ErrorLog != nil { - opts.ErrorLog.Println("error encoding metric family:", err) + opts.ErrorLog.Println("error encoding and sending metric family:", err) } switch opts.ErrorHandling { case PanicOnError: @@ -150,28 +153,15 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { case ContinueOnError: // Handled later. case HTTPErrorOnError: - http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + httpError(rsp, err) return } } } - if closer, ok := writer.(io.Closer); ok { - closer.Close() + + if lastErr != nil { + httpError(rsp, lastErr) } - if lastErr != nil && buf.Len() == 0 { - http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError) - return - } - header := w.Header() - header.Set(contentTypeHeader, string(contentType)) - header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) - if encoding != "" { - header.Set(contentEncodingHeader, encoding) - } - if _, err := w.Write(buf.Bytes()); err != nil && opts.ErrorLog != nil { - opts.ErrorLog.Println("error while sending encoded metrics:", err) - } - // TODO(beorn7): Consider streaming serving of metrics. }) if opts.Timeout <= 0 { @@ -292,20 +282,30 @@ type HandlerOpts struct { Timeout time.Duration } -// decorateWriter wraps a writer to handle gzip compression if requested. It -// returns the decorated writer and the appropriate "Content-Encoding" header -// (which is empty if no compression is enabled). -func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) { - if compressionDisabled { - return writer, "" - } - header := request.Header.Get(acceptEncodingHeader) - parts := strings.Split(header, ",") +// gzipAccepted returns whether the client will accept gzip-encoded content. +func gzipAccepted(header http.Header) bool { + a := header.Get(acceptEncodingHeader) + parts := strings.Split(a, ",") for _, part := range parts { - part := strings.TrimSpace(part) + part = strings.TrimSpace(part) if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return gzip.NewWriter(writer), "gzip" + return true } } - return writer, "" + return false +} + +// httpError removes any content-encoding header and then calls http.Error with +// the provided error and http.StatusInternalServerErrer. Error contents is +// supposed to be uncompressed plain text. However, same as with a plain +// http.Error, any header settings will be void if the header has already been +// sent. The error message will still be written to the writer, but it will +// probably be of limited use. +func httpError(rsp http.ResponseWriter, err error) { + rsp.Header().Del(contentEncodingHeader) + http.Error( + rsp, + "An error has occurred while serving metrics:\n\n"+err.Error(), + http.StatusInternalServerError, + ) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index bee370364..b5e70b93f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -15,17 +15,22 @@ package prometheus import ( "bytes" - "errors" "fmt" + "io/ioutil" "os" + "path/filepath" "runtime" "sort" + "strings" "sync" "unicode/utf8" "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" ) const ( @@ -38,12 +43,13 @@ const ( // Registerer and Gatherer interface a number of convenience functions in this // package act on. Initially, both variables point to the same Registry, which // has a process collector (currently on Linux only, see NewProcessCollector) -// and a Go collector (see NewGoCollector) already registered. This approach to -// keep default instances as global state mirrors the approach of other packages -// in the Go standard library. Note that there are caveats. Change the variables -// with caution and only if you understand the consequences. Users who want to -// avoid global state altogether should not use the convenience functions and -// act on custom instances instead. +// and a Go collector (see NewGoCollector, in particular the note about +// stop-the-world implication with Go versions older than 1.9) already +// registered. This approach to keep default instances as global state mirrors +// the approach of other packages in the Go standard library. Note that there +// are caveats. Change the variables with caution and only if you understand the +// consequences. Users who want to avoid global state altogether should not use +// the convenience functions and act on custom instances instead. var ( defaultRegistry = NewRegistry() DefaultRegisterer Registerer = defaultRegistry @@ -51,7 +57,7 @@ var ( ) func init() { - MustRegister(NewProcessCollector(os.Getpid(), "")) + MustRegister(NewProcessCollector(ProcessCollectorOpts{})) MustRegister(NewGoCollector()) } @@ -67,7 +73,8 @@ func NewRegistry() *Registry { // NewPedanticRegistry returns a registry that checks during collection if each // collected Metric is consistent with its reported Desc, and if the Desc has -// actually been registered with the registry. +// actually been registered with the registry. Unchecked Collectors (those whose +// Describe methed does not yield any descriptors) are excluded from the check. // // Usually, a Registry will be happy as long as the union of all collected // Metrics is consistent and valid even if some metrics are not consistent with @@ -97,8 +104,13 @@ type Registerer interface { // returned error is an instance of AlreadyRegisteredError, which // contains the previously registered Collector. // - // It is in general not safe to register the same Collector multiple - // times concurrently. + // A Collector whose Describe method does not yield any Desc is treated + // as unchecked. Registration will always succeed. No check for + // re-registering (see previous paragraph) is performed. Thus, the + // caller is responsible for not double-registering the same unchecked + // Collector, and for providing a Collector that will not cause + // inconsistent metrics on collection. (This would lead to scrape + // errors.) Register(Collector) error // MustRegister works like Register but registers any number of // Collectors and panics upon the first registration that causes an @@ -107,7 +119,9 @@ type Registerer interface { // Unregister unregisters the Collector that equals the Collector passed // in as an argument. (Two Collectors are considered equal if their // Describe method yields the same set of descriptors.) The function - // returns whether a Collector was unregistered. + // returns whether a Collector was unregistered. Note that an unchecked + // Collector cannot be unregistered (as its Describe method does not + // yield any descriptor). // // Note that even after unregistering, it will not be possible to // register a new Collector that is inconsistent with the unregistered @@ -125,15 +139,23 @@ type Registerer interface { type Gatherer interface { // Gather calls the Collect method of the registered Collectors and then // gathers the collected metrics into a lexicographically sorted slice - // of MetricFamily protobufs. Even if an error occurs, Gather attempts - // to gather as many metrics as possible. Hence, if a non-nil error is - // returned, the returned MetricFamily slice could be nil (in case of a - // fatal error that prevented any meaningful metric collection) or - // contain a number of MetricFamily protobufs, some of which might be - // incomplete, and some might be missing altogether. The returned error - // (which might be a MultiError) explains the details. In scenarios - // where complete collection is critical, the returned MetricFamily - // protobufs should be disregarded if the returned error is non-nil. + // of uniquely named MetricFamily protobufs. Gather ensures that the + // returned slice is valid and self-consistent so that it can be used + // for valid exposition. As an exception to the strict consistency + // requirements described for metric.Desc, Gather will tolerate + // different sets of label names for metrics of the same metric family. + // + // Even if an error occurs, Gather attempts to gather as many metrics as + // possible. Hence, if a non-nil error is returned, the returned + // MetricFamily slice could be nil (in case of a fatal error that + // prevented any meaningful metric collection) or contain a number of + // MetricFamily protobufs, some of which might be incomplete, and some + // might be missing altogether. The returned error (which might be a + // MultiError) explains the details. Note that this is mostly useful for + // debugging purposes. If the gathered protobufs are to be used for + // exposition in actual monitoring, it is almost always better to not + // expose an incomplete result and instead disregard the returned + // MetricFamily protobufs in case the returned error is non-nil. Gather() ([]*dto.MetricFamily, error) } @@ -234,6 +256,7 @@ type Registry struct { collectorsByID map[uint64]Collector // ID is a hash of the descIDs. descIDs map[uint64]struct{} dimHashesByName map[string]uint64 + uncheckedCollectors []Collector pedanticChecksEnabled bool } @@ -251,7 +274,12 @@ func (r *Registry) Register(c Collector) error { close(descChan) }() r.mtx.Lock() - defer r.mtx.Unlock() + defer func() { + // Drain channel in case of premature return to not leak a goroutine. + for range descChan { + } + r.mtx.Unlock() + }() // Conduct various tests... for desc := range descChan { @@ -291,9 +319,10 @@ func (r *Registry) Register(c Collector) error { } } } - // Did anything happen at all? + // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { - return errors.New("collector has no descriptors") + r.uncheckedCollectors = append(r.uncheckedCollectors, c) + return nil } if existing, exists := r.collectorsByID[collectorID]; exists { return AlreadyRegisteredError{ @@ -367,20 +396,24 @@ func (r *Registry) MustRegister(cs ...Collector) { // Gather implements Gatherer. func (r *Registry) Gather() ([]*dto.MetricFamily, error) { var ( - metricChan = make(chan Metric, capMetricChan) - metricHashes = map[uint64]struct{}{} - dimHashes = map[string]uint64{} - wg sync.WaitGroup - errs MultiError // The collected errors to return in the end. - registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + checkedMetricChan = make(chan Metric, capMetricChan) + uncheckedMetricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks ) r.mtx.RLock() - goroutineBudget := len(r.collectorsByID) + goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) - collectors := make(chan Collector, len(r.collectorsByID)) + checkedCollectors := make(chan Collector, len(r.collectorsByID)) + uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) for _, collector := range r.collectorsByID { - collectors <- collector + checkedCollectors <- collector + } + for _, collector := range r.uncheckedCollectors { + uncheckedCollectors <- collector } // In case pedantic checks are enabled, we have to copy the map before // giving up the RLock. @@ -397,12 +430,14 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { collectWorker := func() { for { select { - case collector := <-collectors: - collector.Collect(metricChan) - wg.Done() + case collector := <-checkedCollectors: + collector.Collect(checkedMetricChan) + case collector := <-uncheckedCollectors: + collector.Collect(uncheckedMetricChan) default: return } + wg.Done() } } @@ -410,53 +445,128 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { go collectWorker() goroutineBudget-- - // Close the metricChan once all collectors are collected. + // Close checkedMetricChan and uncheckedMetricChan once all collectors + // are collected. go func() { wg.Wait() - close(metricChan) + close(checkedMetricChan) + close(uncheckedMetricChan) }() - // Drain metricChan in case of premature return. + // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. defer func() { - for range metricChan { + if checkedMetricChan != nil { + for range checkedMetricChan { + } + } + if uncheckedMetricChan != nil { + for range uncheckedMetricChan { + } } }() -collectLoop: + // Copy the channel references so we can nil them out later to remove + // them from the select statements below. + cmc := checkedMetricChan + umc := uncheckedMetricChan + for { select { - case metric, ok := <-metricChan: + case metric, ok := <-cmc: if !ok { - // metricChan is closed, we are done. - break collectLoop + cmc = nil + break } errs.Append(processMetric( metric, metricFamiliesByName, - metricHashes, dimHashes, + metricHashes, registeredDescIDs, )) + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) default: - if goroutineBudget <= 0 || len(collectors) == 0 { - // All collectors are aleady being worked on or + if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { + // All collectors are already being worked on or // we have already as many goroutines started as - // there are collectors. Just process metrics - // from now on. - for metric := range metricChan { + // there are collectors. Do the same as above, + // just without the default. + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } errs.Append(processMetric( metric, metricFamiliesByName, - metricHashes, dimHashes, + metricHashes, registeredDescIDs, )) + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) } - break collectLoop + break } // Start more workers. go collectWorker() goroutineBudget-- runtime.Gosched() } + // Once both checkedMetricChan and uncheckdMetricChan are closed + // and drained, the contraption above will nil out cmc and umc, + // and then we can leave the collect loop here. + if cmc == nil && umc == nil { + break + } } - return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the +// Prometheus text format, and writes it to a temporary file. Upon success, the +// temporary file is renamed to the provided filename. +// +// This is intended for use with the textfile collector of the node exporter. +// Note that the node exporter expects the filename to be suffixed with ".prom". +func WriteToTextfile(filename string, g Gatherer) error { + tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) + if err != nil { + return err + } + defer os.Remove(tmp.Name()) + + mfs, err := g.Gather() + if err != nil { + return err + } + for _, mf := range mfs { + if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { + return err + } + } + if err := tmp.Close(); err != nil { + return err + } + + if err := os.Chmod(tmp.Name(), 0644); err != nil { + return err + } + return os.Rename(tmp.Name(), filename) } // processMetric is an internal helper method only used by the Gather method. @@ -464,16 +574,20 @@ func processMetric( metric Metric, metricFamiliesByName map[string]*dto.MetricFamily, metricHashes map[uint64]struct{}, - dimHashes map[string]uint64, registeredDescIDs map[uint64]struct{}, ) error { desc := metric.Desc() + // Wrapped metrics collected by an unchecked Collector can have an + // invalid Desc. + if desc.err != nil { + return desc.err + } dtoMetric := &dto.Metric{} if err := metric.Write(dtoMetric); err != nil { return fmt.Errorf("error collecting metric %v: %s", desc, err) } metricFamily, ok := metricFamiliesByName[desc.fqName] - if ok { + if ok { // Existing name. if metricFamily.GetHelp() != desc.help { return fmt.Errorf( "collected metric %s %s has help %q but should have %q", @@ -520,7 +634,7 @@ func processMetric( default: panic("encountered MetricFamily with invalid type") } - } else { + } else { // New name. metricFamily = &dto.MetricFamily{} metricFamily.Name = proto.String(desc.fqName) metricFamily.Help = proto.String(desc.help) @@ -539,9 +653,12 @@ func processMetric( default: return fmt.Errorf("empty metric collected: %s", dtoMetric) } + if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { + return err + } metricFamiliesByName[desc.fqName] = metricFamily } - if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { return err } if registeredDescIDs != nil { @@ -583,7 +700,6 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { var ( metricFamiliesByName = map[string]*dto.MetricFamily{} metricHashes = map[uint64]struct{}{} - dimHashes = map[string]uint64{} errs MultiError // The collected errors to return in the end. ) @@ -620,10 +736,14 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { existingMF.Name = mf.Name existingMF.Help = mf.Help existingMF.Type = mf.Type + if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { + errs = append(errs, err) + continue + } metricFamiliesByName[mf.GetName()] = existingMF } for _, m := range mf.Metric { - if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { + if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { errs = append(errs, err) continue } @@ -631,88 +751,80 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { } } } - return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } -// metricSorter is a sortable slice of *dto.Metric. -type metricSorter []*dto.Metric - -func (s metricSorter) Len() int { - return len(s) -} - -func (s metricSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s metricSorter) Less(i, j int) bool { - if len(s[i].Label) != len(s[j].Label) { - // This should not happen. The metrics are - // inconsistent. However, we have to deal with the fact, as - // people might use custom collectors or metric family injection - // to create inconsistent metrics. So let's simply compare the - // number of labels in this case. That will still yield - // reproducible sorting. - return len(s[i].Label) < len(s[j].Label) +// checkSuffixCollisions checks for collisions with the “magic” suffixes the +// Prometheus text format and the internal metric representation of the +// Prometheus server add while flattening Summaries and Histograms. +func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { + var ( + newName = mf.GetName() + newType = mf.GetType() + newNameWithoutSuffix = "" + ) + switch { + case strings.HasSuffix(newName, "_count"): + newNameWithoutSuffix = newName[:len(newName)-6] + case strings.HasSuffix(newName, "_sum"): + newNameWithoutSuffix = newName[:len(newName)-4] + case strings.HasSuffix(newName, "_bucket"): + newNameWithoutSuffix = newName[:len(newName)-7] } - for n, lp := range s[i].Label { - vi := lp.GetValue() - vj := s[j].Label[n].GetValue() - if vi != vj { - return vi < vj + if newNameWithoutSuffix != "" { + if existingMF, ok := mfs[newNameWithoutSuffix]; ok { + switch existingMF.GetType() { + case dto.MetricType_SUMMARY: + if !strings.HasSuffix(newName, "_bucket") { + return fmt.Errorf( + "collected metric named %q collides with previously collected summary named %q", + newName, newNameWithoutSuffix, + ) + } + case dto.MetricType_HISTOGRAM: + return fmt.Errorf( + "collected metric named %q collides with previously collected histogram named %q", + newName, newNameWithoutSuffix, + ) + } } } - - // We should never arrive here. Multiple metrics with the same - // label set in the same scrape will lead to undefined ingestion - // behavior. However, as above, we have to provide stable sorting - // here, even for inconsistent metrics. So sort equal metrics - // by their timestamp, with missing timestamps (implying "now") - // coming last. - if s[i].TimestampMs == nil { - return false - } - if s[j].TimestampMs == nil { - return true - } - return s[i].GetTimestampMs() < s[j].GetTimestampMs() -} - -// normalizeMetricFamilies returns a MetricFamily slice with empty -// MetricFamilies pruned and the remaining MetricFamilies sorted by name within -// the slice, with the contained Metrics sorted within each MetricFamily. -func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { - for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) - } - names := make([]string, 0, len(metricFamiliesByName)) - for name, mf := range metricFamiliesByName { - if len(mf.Metric) > 0 { - names = append(names, name) + if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_count"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_count", + ) + } + if _, ok := mfs[newName+"_sum"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_sum", + ) } } - sort.Strings(names) - result := make([]*dto.MetricFamily, 0, len(names)) - for _, name := range names { - result = append(result, metricFamiliesByName[name]) + if newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_bucket"]; ok { + return fmt.Errorf( + "collected histogram named %q collides with previously collected metric named %q", + newName, newName+"_bucket", + ) + } } - return result + return nil } // checkMetricConsistency checks if the provided Metric is consistent with the -// provided MetricFamily. It also hashed the Metric labels and the MetricFamily +// provided MetricFamily. It also hashes the Metric labels and the MetricFamily // name. If the resulting hash is already in the provided metricHashes, an error -// is returned. If not, it is added to metricHashes. The provided dimHashes maps -// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes -// doesn't yet contain a hash for the provided MetricFamily, it is -// added. Otherwise, an error is returned if the existing dimHashes in not equal -// the calculated dimHash. +// is returned. If not, it is added to metricHashes. func checkMetricConsistency( metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, metricHashes map[uint64]struct{}, - dimHashes map[string]uint64, ) error { + name := metricFamily.GetName() + // Type consistency with metric family. if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || @@ -720,47 +832,65 @@ func checkMetricConsistency( metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { return fmt.Errorf( - "collected metric %s %s is not a %s", - metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + "collected metric %q { %s} is not a %s", + name, dtoMetric, metricFamily.GetType(), ) } + previousLabelName := "" for _, labelPair := range dtoMetric.GetLabel() { - if !utf8.ValidString(*labelPair.Value) { - return fmt.Errorf("collected metric's label %s is not utf8: %#v", *labelPair.Name, *labelPair.Value) + labelName := labelPair.GetName() + if labelName == previousLabelName { + return fmt.Errorf( + "collected metric %q { %s} has two or more labels with the same name: %s", + name, dtoMetric, labelName, + ) } + if !checkLabelName(labelName) { + return fmt.Errorf( + "collected metric %q { %s} has a label with an invalid name: %s", + name, dtoMetric, labelName, + ) + } + if dtoMetric.Summary != nil && labelName == quantileLabel { + return fmt.Errorf( + "collected metric %q { %s} must not have an explicit %q label", + name, dtoMetric, quantileLabel, + ) + } + if !utf8.ValidString(labelPair.GetValue()) { + return fmt.Errorf( + "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", + name, dtoMetric, labelName, labelPair.GetValue()) + } + previousLabelName = labelName } - // Is the metric unique (i.e. no other metric with the same name and the same label values)? + // Is the metric unique (i.e. no other metric with the same name and the same labels)? h := hashNew() - h = hashAdd(h, metricFamily.GetName()) + h = hashAdd(h, name) h = hashAddByte(h, separatorByte) - dh := hashNew() // Make sure label pairs are sorted. We depend on it for the consistency // check. - sort.Sort(LabelPairSorter(dtoMetric.Label)) + if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { + // We cannot sort dtoMetric.Label in place as it is immutable by contract. + copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) + copy(copiedLabels, dtoMetric.Label) + sort.Sort(labelPairSorter(copiedLabels)) + dtoMetric.Label = copiedLabels + } for _, lp := range dtoMetric.Label { + h = hashAdd(h, lp.GetName()) + h = hashAddByte(h, separatorByte) h = hashAdd(h, lp.GetValue()) h = hashAddByte(h, separatorByte) - dh = hashAdd(dh, lp.GetName()) - dh = hashAddByte(dh, separatorByte) } if _, exists := metricHashes[h]; exists { return fmt.Errorf( - "collected metric %s %s was collected before with the same name and label values", - metricFamily.GetName(), dtoMetric, + "collected metric %q { %s} was collected before with the same name and label values", + name, dtoMetric, ) } - if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { - if dimHash != dh { - return fmt.Errorf( - "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", - metricFamily.GetName(), dtoMetric, - ) - } - } else { - dimHashes[metricFamily.GetName()] = dh - } metricHashes[h] = struct{}{} return nil } @@ -779,8 +909,8 @@ func checkDescConsistency( } // Is the desc consistent with the content of the metric? - lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) - lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) + copy(lpsFromDesc, desc.constLabelPairs) for _, l := range desc.variableLabels { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ Name: proto.String(l), @@ -792,7 +922,7 @@ func checkDescConsistency( metricFamily.GetName(), dtoMetric, desc, ) } - sort.Sort(LabelPairSorter(lpsFromDesc)) + sort.Sort(labelPairSorter(lpsFromDesc)) for i, lpFromDesc := range lpsFromDesc { lpFromMetric := dtoMetric.Label[i] if lpFromDesc.GetName() != lpFromMetric.GetName() || diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index f7dc85b96..2980614df 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -37,7 +37,7 @@ const quantileLabel = "quantile" // A typical use-case is the observation of request latencies. By default, a // Summary provides the median, the 90th and the 99th percentile of the latency // as rank estimations. However, the default behavior will change in the -// upcoming v0.10 of the library. There will be no rank estiamtions at all by +// upcoming v0.10 of the library. There will be no rank estimations at all by // default. For a sane transition, it is recommended to set the desired rank // estimations explicitly. // @@ -81,10 +81,10 @@ const ( ) // SummaryOpts bundles the options for creating a Summary metric. It is -// mandatory to set Name and Help to a non-empty string. While all other fields -// are optional and can safely be left at their zero value, it is recommended to -// explicitly set the Objectives field to the desired value as the default value -// will change in the upcoming v0.10 of the library. +// mandatory to set Name to a non-empty string. While all other fields are +// optional and can safely be left at their zero value, it is recommended to set +// a help string and to explicitly set the Objectives field to the desired value +// as the default value will change in the upcoming v0.10 of the library. type SummaryOpts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Summary (created by joining these components with @@ -95,7 +95,7 @@ type SummaryOpts struct { Subsystem string Name string - // Help provides information about this Summary. Mandatory! + // Help provides information about this Summary. // // Metrics with the same fully-qualified name must have the same Help // string. @@ -105,6 +105,11 @@ type SummaryOpts struct { // with the same fully-qualified name must have the same label names in // their ConstLabels. // + // Due to the way a Summary is represented in the Prometheus text format + // and how it is handled by the Prometheus server internally, “quantile” + // is an illegal label name. Construction of a Summary or SummaryVec + // will panic if this label name is used in ConstLabels. + // // ConstLabels are only used rarely. In particular, do not use them to // attach the same labels to all your metrics. Those use cases are // better covered by target labels set by the scraping Prometheus @@ -176,7 +181,7 @@ func NewSummary(opts SummaryOpts) Summary { func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { if len(desc.variableLabels) != len(labelValues) { - panic(errInconsistentCardinality) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) } for _, n := range desc.variableLabels { @@ -402,7 +407,16 @@ type SummaryVec struct { // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and // partitioned by the given label names. +// +// Due to the way a Summary is represented in the Prometheus text format and how +// it is handled by the Prometheus server internally, “quantile” is an illegal +// label name. NewSummaryVec will panic if this label name is used. func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + for _, ln := range labelNames { + if ln == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, @@ -572,7 +586,7 @@ func (s *constSummary) Write(out *dto.Metric) error { // map[float64]float64{0.5: 0.23, 0.99: 0.56} // // NewConstSummary returns an error if the length of labelValues is not -// consistent with the variable labels in Desc. +// consistent with the variable labels in Desc or if Desc is invalid. func NewConstSummary( desc *Desc, count uint64, @@ -580,6 +594,9 @@ func NewConstSummary( quantiles map[float64]float64, labelValues ...string, ) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go index b8fc5f18c..8d5f10523 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -39,13 +39,16 @@ func NewTimer(o Observer) *Timer { // ObserveDuration records the duration passed since the Timer was created with // NewTimer. It calls the Observe method of the Observer provided during -// construction with the duration in seconds as an argument. ObserveDuration is -// usually called with a defer statement. +// construction with the duration in seconds as an argument. The observed +// duration is also returned. ObserveDuration is usually called with a defer +// statement. // // Note that this method is only guaranteed to never observe negative durations // if used with Go1.9+. -func (t *Timer) ObserveDuration() { +func (t *Timer) ObserveDuration() time.Duration { + d := time.Since(t.begin) if t.observer != nil { - t.observer.Observe(time.Since(t.begin).Seconds()) + t.observer.Observe(d.Seconds()) } + return d } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index 543b57c27..eb248f108 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -17,9 +17,9 @@ import ( "fmt" "sort" - dto "github.com/prometheus/client_model/go" - "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" ) // ValueType is an enumeration of metric types that represent a simple value. @@ -77,8 +77,12 @@ func (v *valueFunc) Write(out *dto.Metric) error { // operations. However, when implementing custom Collectors, it is useful as a // throw-away metric that is generated on the fly to send it to Prometheus in // the Collect method. NewConstMetric returns an error if the length of -// labelValues is not consistent with the variable labels in Desc. +// labelValues is not consistent with the variable labels in Desc or if Desc is +// invalid. func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { return nil, err } @@ -152,9 +156,7 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { Value: proto.String(labelValues[i]), }) } - for _, lp := range desc.constLabelPairs { - labelPairs = append(labelPairs, lp) - } - sort.Sort(LabelPairSorter(labelPairs)) + labelPairs = append(labelPairs, desc.constLabelPairs...) + sort.Sort(labelPairSorter(labelPairs)) return labelPairs } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index cea158249..14ed9e856 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -277,6 +277,9 @@ func (m *metricMap) deleteByHashWithLabelValues( func (m *metricMap) deleteByHashWithLabels( h uint64, labels Labels, curry []curriedLabelValue, ) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + metrics, ok := m.metrics[h] if !ok { return false diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go new file mode 100644 index 000000000..49159bf3e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -0,0 +1,179 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// WrapRegistererWith returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided Labels to all Metrics it collects (as +// ConstLabels). The Metrics collected by the unmodified Collector must not +// duplicate any of those labels. +// +// WrapRegistererWith provides a way to add fixed labels to a subset of +// Collectors. It should not be used to add fixed labels to all metrics exposed. +// +// The Collector example demonstrates a use of WrapRegistererWith. +func WrapRegistererWith(labels Labels, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + labels: labels, + } +} + +// WrapRegistererWithPrefix returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided prefix to the name of all Metrics it collects. +// +// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of +// a sub-system. To make this work, register metrics of the sub-system with the +// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful +// to use the same prefix for all metrics exposed. In particular, do not prefix +// metric names that are standardized across applications, as that would break +// horizontal monitoring, for example the metrics provided by the Go collector +// (see NewGoCollector) and the process collector (see NewProcessCollector). (In +// fact, those metrics are already prefixed with “go_” or “process_”, +// respectively.) +func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + prefix: prefix, + } +} + +type wrappingRegisterer struct { + wrappedRegisterer Registerer + prefix string + labels Labels +} + +func (r *wrappingRegisterer) Register(c Collector) error { + return r.wrappedRegisterer.Register(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +func (r *wrappingRegisterer) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +func (r *wrappingRegisterer) Unregister(c Collector) bool { + return r.wrappedRegisterer.Unregister(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +type wrappingCollector struct { + wrappedCollector Collector + prefix string + labels Labels +} + +func (c *wrappingCollector) Collect(ch chan<- Metric) { + wrappedCh := make(chan Metric) + go func() { + c.wrappedCollector.Collect(wrappedCh) + close(wrappedCh) + }() + for m := range wrappedCh { + ch <- &wrappingMetric{ + wrappedMetric: m, + prefix: c.prefix, + labels: c.labels, + } + } +} + +func (c *wrappingCollector) Describe(ch chan<- *Desc) { + wrappedCh := make(chan *Desc) + go func() { + c.wrappedCollector.Describe(wrappedCh) + close(wrappedCh) + }() + for desc := range wrappedCh { + ch <- wrapDesc(desc, c.prefix, c.labels) + } +} + +type wrappingMetric struct { + wrappedMetric Metric + prefix string + labels Labels +} + +func (m *wrappingMetric) Desc() *Desc { + return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) +} + +func (m *wrappingMetric) Write(out *dto.Metric) error { + if err := m.wrappedMetric.Write(out); err != nil { + return err + } + if len(m.labels) == 0 { + // No wrapping labels. + return nil + } + for ln, lv := range m.labels { + out.Label = append(out.Label, &dto.LabelPair{ + Name: proto.String(ln), + Value: proto.String(lv), + }) + } + sort.Sort(labelPairSorter(out.Label)) + return nil +} + +func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { + constLabels := Labels{} + for _, lp := range desc.constLabelPairs { + constLabels[*lp.Name] = *lp.Value + } + for ln, lv := range labels { + if _, alreadyUsed := constLabels[ln]; alreadyUsed { + return &Desc{ + fqName: desc.fqName, + help: desc.help, + variableLabels: desc.variableLabels, + constLabelPairs: desc.constLabelPairs, + err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), + } + } + constLabels[ln] = lv + } + // NewDesc will do remaining validations. + newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) + // Propagate errors if there was any. This will override any errer + // created by NewDesc above, i.e. earlier errors get precedence. + if desc.err != nil { + newDesc.err = desc.err + } + return newDesc +} diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index b065f8683..9805432c2 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,34 +1,23 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: metrics.proto -// DO NOT EDIT! -/* -Package io_prometheus_client is a generated protocol buffer package. - -It is generated from these files: - metrics.proto - -It has these top-level messages: - LabelPair - Gauge - Counter - Quantile - Summary - Untyped - Histogram - Bucket - Metric - MetricFamily -*/ -package io_prometheus_client +package io_prometheus_client // import "github.com/prometheus/client_model/go" import proto "github.com/golang/protobuf/proto" +import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal +var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + type MetricType int32 const ( @@ -70,16 +59,41 @@ func (x *MetricType) UnmarshalJSON(data []byte) error { *x = MetricType(value) return nil } +func (MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} +} type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *LabelPair) Reset() { *m = LabelPair{} } func (m *LabelPair) String() string { return proto.CompactTextString(m) } func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} +} +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelPair.Unmarshal(m, b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) +} +func (dst *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(dst, src) +} +func (m *LabelPair) XXX_Size() int { + return xxx_messageInfo_LabelPair.Size(m) +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo func (m *LabelPair) GetName() string { if m != nil && m.Name != nil { @@ -96,13 +110,35 @@ func (m *LabelPair) GetValue() string { } type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Gauge) Reset() { *m = Gauge{} } func (m *Gauge) String() string { return proto.CompactTextString(m) } func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1} +} +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Gauge.Unmarshal(m, b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) +} +func (dst *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(dst, src) +} +func (m *Gauge) XXX_Size() int { + return xxx_messageInfo_Gauge.Size(m) +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo func (m *Gauge) GetValue() float64 { if m != nil && m.Value != nil { @@ -112,13 +148,35 @@ func (m *Gauge) GetValue() float64 { } type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Counter) Reset() { *m = Counter{} } func (m *Counter) String() string { return proto.CompactTextString(m) } func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2} +} +func (m *Counter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Counter.Unmarshal(m, b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Counter.Marshal(b, m, deterministic) +} +func (dst *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(dst, src) +} +func (m *Counter) XXX_Size() int { + return xxx_messageInfo_Counter.Size(m) +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo func (m *Counter) GetValue() float64 { if m != nil && m.Value != nil { @@ -128,14 +186,36 @@ func (m *Counter) GetValue() float64 { } type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Quantile) Reset() { *m = Quantile{} } func (m *Quantile) String() string { return proto.CompactTextString(m) } func (*Quantile) ProtoMessage() {} +func (*Quantile) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3} +} +func (m *Quantile) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Quantile.Unmarshal(m, b) +} +func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) +} +func (dst *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(dst, src) +} +func (m *Quantile) XXX_Size() int { + return xxx_messageInfo_Quantile.Size(m) +} +func (m *Quantile) XXX_DiscardUnknown() { + xxx_messageInfo_Quantile.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantile proto.InternalMessageInfo func (m *Quantile) GetQuantile() float64 { if m != nil && m.Quantile != nil { @@ -152,15 +232,37 @@ func (m *Quantile) GetValue() float64 { } type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_unrecognized []byte `json:"-"` + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Summary) Reset() { *m = Summary{} } func (m *Summary) String() string { return proto.CompactTextString(m) } func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4} +} +func (m *Summary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Summary.Unmarshal(m, b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) +} +func (dst *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(dst, src) +} +func (m *Summary) XXX_Size() int { + return xxx_messageInfo_Summary.Size(m) +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo func (m *Summary) GetSampleCount() uint64 { if m != nil && m.SampleCount != nil { @@ -184,13 +286,35 @@ func (m *Summary) GetQuantile() []*Quantile { } type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Untyped) Reset() { *m = Untyped{} } func (m *Untyped) String() string { return proto.CompactTextString(m) } func (*Untyped) ProtoMessage() {} +func (*Untyped) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5} +} +func (m *Untyped) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Untyped.Unmarshal(m, b) +} +func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) +} +func (dst *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(dst, src) +} +func (m *Untyped) XXX_Size() int { + return xxx_messageInfo_Untyped.Size(m) +} +func (m *Untyped) XXX_DiscardUnknown() { + xxx_messageInfo_Untyped.DiscardUnknown(m) +} + +var xxx_messageInfo_Untyped proto.InternalMessageInfo func (m *Untyped) GetValue() float64 { if m != nil && m.Value != nil { @@ -200,15 +324,37 @@ func (m *Untyped) GetValue() float64 { } type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` - XXX_unrecognized []byte `json:"-"` + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Histogram) Reset() { *m = Histogram{} } func (m *Histogram) String() string { return proto.CompactTextString(m) } func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Histogram.Unmarshal(m, b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) +} +func (dst *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(dst, src) +} +func (m *Histogram) XXX_Size() int { + return xxx_messageInfo_Histogram.Size(m) +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo func (m *Histogram) GetSampleCount() uint64 { if m != nil && m.SampleCount != nil { @@ -232,14 +378,36 @@ func (m *Histogram) GetBucket() []*Bucket { } type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` - XXX_unrecognized []byte `json:"-"` + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Bucket) Reset() { *m = Bucket{} } func (m *Bucket) String() string { return proto.CompactTextString(m) } func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7} +} +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Bucket.Unmarshal(m, b) +} +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) +} +func (dst *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(dst, src) +} +func (m *Bucket) XXX_Size() int { + return xxx_messageInfo_Bucket.Size(m) +} +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_Bucket proto.InternalMessageInfo func (m *Bucket) GetCumulativeCount() uint64 { if m != nil && m.CumulativeCount != nil { @@ -256,19 +424,41 @@ func (m *Bucket) GetUpperBound() float64 { } type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` - XXX_unrecognized []byte `json:"-"` + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Metric) Reset() { *m = Metric{} } func (m *Metric) String() string { return proto.CompactTextString(m) } func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (dst *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(dst, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo func (m *Metric) GetLabel() []*LabelPair { if m != nil { @@ -320,16 +510,38 @@ func (m *Metric) GetTimestampMs() int64 { } type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *MetricFamily) Reset() { *m = MetricFamily{} } func (m *MetricFamily) String() string { return proto.CompactTextString(m) } func (*MetricFamily) ProtoMessage() {} +func (*MetricFamily) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9} +} +func (m *MetricFamily) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricFamily.Unmarshal(m, b) +} +func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) +} +func (dst *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(dst, src) +} +func (m *MetricFamily) XXX_Size() int { + return xxx_messageInfo_MetricFamily.Size(m) +} +func (m *MetricFamily) XXX_DiscardUnknown() { + xxx_messageInfo_MetricFamily.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricFamily proto.InternalMessageInfo func (m *MetricFamily) GetName() string { if m != nil && m.Name != nil { @@ -360,5 +572,58 @@ func (m *MetricFamily) GetMetric() []*Metric { } func init() { + proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") + proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") + proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") + proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") + proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") + proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") + proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") + proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") + proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) } + +func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) } + +var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{ + // 591 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e, + 0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89, + 0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81, + 0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47, + 0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77, + 0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e, + 0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64, + 0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58, + 0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c, + 0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2, + 0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4, + 0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12, + 0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c, + 0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee, + 0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f, + 0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54, + 0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea, + 0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63, + 0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45, + 0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d, + 0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5, + 0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d, + 0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d, + 0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7, + 0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8, + 0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2, + 0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58, + 0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11, + 0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff, + 0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02, + 0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd, + 0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25, + 0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9, + 0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27, + 0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9, + 0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48, + 0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index a7a42d5ef..c092723e8 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -164,9 +164,9 @@ func (sd *SampleDecoder) Decode(s *model.Vector) error { } // ExtractSamples builds a slice of samples from the provided metric -// families. If an error occurs during sample extraction, it continues to +// families. If an error occurrs during sample extraction, it continues to // extract from the remaining metric families. The returned error is the last -// error that has occured. +// error that has occurred. func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { var ( all model.Vector diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index f11321cd0..8e473d0fe 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -14,13 +14,45 @@ package expfmt import ( + "bytes" "fmt" "io" "math" + "strconv" "strings" + "sync" + + "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" +) + +// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer +// implements it. +type enhancedWriter interface { + io.Writer + WriteRune(r rune) (n int, err error) + WriteString(s string) (n int, err error) + WriteByte(c byte) error +} + +const ( + initialBufSize = 512 + initialNumBufSize = 24 +) + +var ( + bufPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(make([]byte, 0, initialBufSize)) + }, + } + numBufPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 0, initialNumBufSize) + return &b + }, + } ) // MetricFamilyToText converts a MetricFamily proto message into text format and @@ -32,37 +64,92 @@ import ( // will result in invalid text format output. // // This method fulfills the type 'prometheus.encoder'. -func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { - var written int - +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { // Fail-fast checks. if len(in.Metric) == 0 { - return written, fmt.Errorf("MetricFamily has no metrics: %s", in) + return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) } name := in.GetName() if name == "" { - return written, fmt.Errorf("MetricFamily has no name: %s", in) + return 0, fmt.Errorf("MetricFamily has no name: %s", in) } + // Try the interface upgrade. If it doesn't work, we'll use a + // bytes.Buffer from the sync.Pool and write out its content to out in a + // single go in the end. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bytes.Buffer) + b.Reset() + w = b + defer func() { + bWritten, bErr := out.Write(b.Bytes()) + written = bWritten + if err == nil { + err = bErr + } + bufPool.Put(b) + }() + } + + var n int + // Comments, first HELP, then TYPE. if in.Help != nil { - n, err := fmt.Fprintf( - out, "# HELP %s %s\n", - name, escapeString(*in.Help, false), - ) + n, err = w.WriteString("# HELP ") written += n if err != nil { - return written, err + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Help, false) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return } } - metricType := in.GetType() - n, err := fmt.Fprintf( - out, "# TYPE %s %s\n", - name, strings.ToLower(metricType.String()), - ) + n, err = w.WriteString("# TYPE ") written += n if err != nil { - return written, err + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + metricType := in.GetType() + switch metricType { + case dto.MetricType_COUNTER: + n, err = w.WriteString(" counter\n") + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" untyped\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } + written += n + if err != nil { + return } // Finally the samples, one line for each. @@ -75,9 +162,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { ) } n, err = writeSample( - name, metric, "", "", + w, name, "", metric, "", 0, metric.Counter.GetValue(), - out, ) case dto.MetricType_GAUGE: if metric.Gauge == nil { @@ -86,9 +172,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { ) } n, err = writeSample( - name, metric, "", "", + w, name, "", metric, "", 0, metric.Gauge.GetValue(), - out, ) case dto.MetricType_UNTYPED: if metric.Untyped == nil { @@ -97,9 +182,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { ) } n, err = writeSample( - name, metric, "", "", + w, name, "", metric, "", 0, metric.Untyped.GetValue(), - out, ) case dto.MetricType_SUMMARY: if metric.Summary == nil { @@ -109,29 +193,26 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { } for _, q := range metric.Summary.Quantile { n, err = writeSample( - name, metric, - model.QuantileLabel, fmt.Sprint(q.GetQuantile()), + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), q.GetValue(), - out, ) written += n if err != nil { - return written, err + return } } n, err = writeSample( - name+"_sum", metric, "", "", + w, name, "_sum", metric, "", 0, metric.Summary.GetSampleSum(), - out, ) - if err != nil { - return written, err - } written += n + if err != nil { + return + } n, err = writeSample( - name+"_count", metric, "", "", + w, name, "_count", metric, "", 0, float64(metric.Summary.GetSampleCount()), - out, ) case dto.MetricType_HISTOGRAM: if metric.Histogram == nil { @@ -140,46 +221,42 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { ) } infSeen := false - for _, q := range metric.Histogram.Bucket { + for _, b := range metric.Histogram.Bucket { n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, fmt.Sprint(q.GetUpperBound()), - float64(q.GetCumulativeCount()), - out, + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + float64(b.GetCumulativeCount()), ) written += n if err != nil { - return written, err + return } - if math.IsInf(q.GetUpperBound(), +1) { + if math.IsInf(b.GetUpperBound(), +1) { infSeen = true } } if !infSeen { n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, "+Inf", + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), float64(metric.Histogram.GetSampleCount()), - out, ) - if err != nil { - return written, err - } written += n + if err != nil { + return + } } n, err = writeSample( - name+"_sum", metric, "", "", + w, name, "_sum", metric, "", 0, metric.Histogram.GetSampleSum(), - out, ) - if err != nil { - return written, err - } written += n + if err != nil { + return + } n, err = writeSample( - name+"_count", metric, "", "", + w, name, "_count", metric, "", 0, float64(metric.Histogram.GetSampleCount()), - out, ) default: return written, fmt.Errorf( @@ -188,116 +265,204 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { } written += n if err != nil { - return written, err + return } } - return written, nil + return } -// writeSample writes a single sample in text format to out, given the metric +// writeSample writes a single sample in text format to w, given the metric // name, the metric proto message itself, optionally an additional label name -// and value (use empty strings if not required), and the value. The function -// returns the number of bytes written and any error encountered. +// with a float64 value (use empty string as label name if not required), and +// the value. The function returns the number of bytes written and any error +// encountered. func writeSample( - name string, + w enhancedWriter, + name, suffix string, metric *dto.Metric, - additionalLabelName, additionalLabelValue string, + additionalLabelName string, additionalLabelValue float64, value float64, - out io.Writer, ) (int, error) { var written int - n, err := fmt.Fprint(out, name) + n, err := w.WriteString(name) written += n if err != nil { return written, err } - n, err = labelPairsToText( - metric.Label, - additionalLabelName, additionalLabelValue, - out, - ) - written += n - if err != nil { - return written, err - } - n, err = fmt.Fprintf(out, " %v", value) - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) + if suffix != "" { + n, err = w.WriteString(suffix) written += n if err != nil { return written, err } } - n, err = out.Write([]byte{'\n'}) + n, err = writeLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, + ) written += n if err != nil { return written, err } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeFloat(w, value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeInt(w, *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } return written, nil } -// labelPairsToText converts a slice of LabelPair proto messages plus the +// writeLabelPairs converts a slice of LabelPair proto messages plus the // explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'out'. An empty slice in combination with an -// empty string 'additionalLabelName' results in nothing being -// written. Otherwise, the label pairs are written, escaped as required by the -// text format, and enclosed in '{...}'. The function returns the number of -// bytes written and any error encountered. -func labelPairsToText( +// text format and writes it to 'w'. An empty slice in combination with an empty +// string 'additionalLabelName' results in nothing being written. Otherwise, the +// label pairs are written, escaped as required by the text format, and enclosed +// in '{...}'. The function returns the number of bytes written and any error +// encountered. +func writeLabelPairs( + w enhancedWriter, in []*dto.LabelPair, - additionalLabelName, additionalLabelValue string, - out io.Writer, + additionalLabelName string, additionalLabelValue float64, ) (int, error) { if len(in) == 0 && additionalLabelName == "" { return 0, nil } - var written int - separator := '{' + var ( + written int + separator byte = '{' + ) for _, lp := range in { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, lp.GetName(), escapeString(lp.GetValue(), true), - ) + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) written += n if err != nil { return written, err } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } separator = ',' } if additionalLabelName != "" { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, additionalLabelName, - escapeString(additionalLabelValue, true), - ) + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) written += n if err != nil { return written, err } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeFloat(w, additionalLabelValue) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } } - n, err := out.Write([]byte{'}'}) - written += n + err := w.WriteByte('}') + written++ if err != nil { return written, err } return written, nil } +// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. var ( - escape = strings.NewReplacer("\\", `\\`, "\n", `\n`) - escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) + escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) + quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) ) -// escapeString replaces '\' by '\\', new line character by '\n', and - if -// includeDoubleQuote is true - '"' by '\"'. -func escapeString(v string, includeDoubleQuote bool) string { +func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { if includeDoubleQuote { - return escapeWithDoubleQuote.Replace(v) + return quotedEscaper.WriteString(w, v) + } else { + return escaper.WriteString(w, v) } - - return escape.Replace(v) +} + +// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes +// a few common cases for increased efficiency. For non-hardcoded cases, it uses +// strconv.AppendFloat to avoid allocations, similar to writeInt. +func writeFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return 1, w.WriteByte('1') + case f == 0: + return 1, w.WriteByte('0') + case f == -1: + return w.WriteString("-1") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err + } +} + +// writeInt is equivalent to fmt.Fprint with an int64 argument but uses +// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid +// allocations. +func writeInt(w enhancedWriter, i int64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendInt((*bp)[:0], i, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err } diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 54bcfde29..ec3d86ba7 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -359,7 +359,7 @@ func (p *TextParser) startLabelValue() stateFn { } return p.readingValue default: - p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) return nil } } @@ -556,8 +556,8 @@ func (p *TextParser) readTokenUntilWhitespace() { // byte considered is the byte already read (now in p.currentByte). The first // newline byte encountered is still copied into p.currentByte, but not into // p.currentToken. If recognizeEscapeSequence is true, two escape sequences are -// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All -// other escape sequences are invalid and cause an error. +// recognized: '\\' translates into '\', and '\n' into a line-feed character. +// All other escape sequences are invalid and cause an error. func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.Reset() escaped := false diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index 7538e2997..bb99889d2 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -59,8 +59,8 @@ func (m *Matcher) Validate() error { return nil } -// Silence defines the representation of a silence definiton -// in the Prometheus eco-system. +// Silence defines the representation of a silence definition in the Prometheus +// eco-system. type Silence struct { ID uint64 `json:"id,omitempty"` diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 74ed5a9f7..46259b1f1 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -43,7 +43,7 @@ const ( // (1970-01-01 00:00 UTC) excluding leap seconds. type Time int64 -// Interval describes and interval between two timestamps. +// Interval describes an interval between two timestamps. type Interval struct { Start, End Time } diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index c9ed3ffd8..c9d8fb1a2 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -100,7 +100,7 @@ func (s *SamplePair) UnmarshalJSON(b []byte) error { } // Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The sematics of Value equality is defined by SampleValue.Equal. +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. func (s *SamplePair) Equal(o *SamplePair) bool { return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) } @@ -117,7 +117,7 @@ type Sample struct { } // Equal compares first the metrics, then the timestamp, then the value. The -// sematics of value equality is defined by SampleValue.Equal. +// semantics of value equality is defined by SampleValue.Equal. func (s *Sample) Equal(o *Sample) bool { if s == o { return true diff --git a/vendor/github.com/prometheus/procfs/.travis.yml b/vendor/github.com/prometheus/procfs/.travis.yml deleted file mode 100644 index 5416cf8a2..000000000 --- a/vendor/github.com/prometheus/procfs/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -sudo: false - -language: go - -go: -- 1.7.x -- 1.8.x -- 1.9.x -- 1.10.x -- 1.x - -go_import_path: github.com/prometheus/procfs - -script: -- make style check_license vet test staticcheck diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile index 5c8f72625..947d7d8fa 100644 --- a/vendor/github.com/prometheus/procfs/Makefile +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -11,61 +11,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Ensure GOBIN is not set during build so that promu is installed to the correct path -unexport GOBIN - -GO ?= go -GOFMT ?= $(GO)fmt -FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck -pkgs = $(shell $(GO) list ./... | grep -v /vendor/) - -PREFIX ?= $(shell pwd) -BIN_DIR ?= $(shell pwd) - -ifdef DEBUG - bindata_flags = -debug -endif - -STATICCHECK_IGNORE = - -all: format staticcheck build test - -style: - @echo ">> checking code style" - @! $(GOFMT) -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' - -check_license: - @echo ">> checking license header" - @./scripts/check_license.sh - -test: fixtures/.unpacked sysfs/fixtures/.unpacked - @echo ">> running all tests" - @$(GO) test -race $(shell $(GO) list ./... | grep -v /vendor/ | grep -v examples) - -format: - @echo ">> formatting code" - @$(GO) fmt $(pkgs) - -vet: - @echo ">> vetting code" - @$(GO) vet $(pkgs) - -staticcheck: $(STATICCHECK) - @echo ">> running staticcheck" - @$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) +include Makefile.common %/.unpacked: %.ttar ./ttar -C $(dir $*) -x -f $*.ttar touch $@ -$(FIRST_GOPATH)/bin/staticcheck: - @GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck +update_fixtures: fixtures.ttar sysfs/fixtures.ttar -.PHONY: all style check_license format test vet staticcheck +%fixtures.ttar: %/fixtures + rm -v $(dir $*)fixtures/.unpacked + ./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/ -# Declaring the binaries at their default locations as PHONY targets is a hack -# to ensure the latest version is downloaded on every make execution. -# If this is not desired, copy/symlink these binaries to a different path and -# set the respective environment variables. -.PHONY: $(GOPATH)/bin/staticcheck +.PHONY: build +build: + +.PHONY: test +test: fixtures/.unpacked sysfs/fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common new file mode 100644 index 000000000..741579e60 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -0,0 +1,223 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# A common Makefile that includes rules to be reused in different prometheus projects. +# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! + +# Example usage : +# Create the main Makefile in the root project directory. +# include Makefile.common +# customTarget: +# @echo ">> Running customTarget" +# + +# Ensure GOBIN is not set during build so that promu is installed to the correct path +unexport GOBIN + +GO ?= go +GOFMT ?= $(GO)fmt +FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +GOOPTS ?= + +GO_VERSION ?= $(shell $(GO) version) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') + +unexport GOVENDOR +ifeq (, $(PRE_GO_111)) + ifneq (,$(wildcard go.mod)) + # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). + GO111MODULE := on + + ifneq (,$(wildcard vendor)) + # Always use the local vendor/ directory to satisfy the dependencies. + GOOPTS := $(GOOPTS) -mod=vendor + endif + endif +else + ifneq (,$(wildcard go.mod)) + ifneq (,$(wildcard vendor)) +$(warning This repository requires Go >= 1.11 because of Go modules) +$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') + endif + else + # This repository isn't using Go modules (yet). + GOVENDOR := $(FIRST_GOPATH)/bin/govendor + endif + + unexport GO111MODULE +endif +PROMU := $(FIRST_GOPATH)/bin/promu +STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck +pkgs = ./... + +GO_VERSION ?= $(shell $(GO) version) +GO_BUILD_PLATFORM ?= $(subst /,-,$(lastword $(GO_VERSION))) + +PROMU_VERSION ?= 0.2.0 +PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz + +PREFIX ?= $(shell pwd) +BIN_DIR ?= $(shell pwd) +DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) +DOCKER_REPO ?= prom + +.PHONY: all +all: precheck style staticcheck unused build test + +# This rule is used to forward a target like "build" to "common-build". This +# allows a new "build" target to be defined in a Makefile which includes this +# one and override "common-build" without override warnings. +%: common-% ; + +.PHONY: common-style +common-style: + @echo ">> checking code style" + @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ + if [ -n "$${fmtRes}" ]; then \ + echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ + echo "Please ensure you are using $$($(GO) version) for formatting code."; \ + exit 1; \ + fi + +.PHONY: common-check_license +common-check_license: + @echo ">> checking license header" + @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ + awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +.PHONY: common-test-short +common-test-short: + @echo ">> running short tests" + GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs) + +.PHONY: common-test +common-test: + @echo ">> running all tests" + GO111MODULE=$(GO111MODULE) $(GO) test -race $(GOOPTS) $(pkgs) + +.PHONY: common-format +common-format: + @echo ">> formatting code" + GO111MODULE=$(GO111MODULE) $(GO) fmt $(GOOPTS) $(pkgs) + +.PHONY: common-vet +common-vet: + @echo ">> vetting code" + GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) + +.PHONY: common-staticcheck +common-staticcheck: $(STATICCHECK) + @echo ">> running staticcheck" +ifdef GO111MODULE + GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" -checks "SA*" $(pkgs) +else + $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) +endif + +.PHONY: common-unused +common-unused: $(GOVENDOR) +ifdef GOVENDOR + @echo ">> running check for unused packages" + @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' +else +ifdef GO111MODULE + @echo ">> running check for unused/missing packages in go.mod" + GO111MODULE=$(GO111MODULE) $(GO) mod tidy + @git diff --exit-code -- go.sum go.mod +ifneq (,$(wildcard vendor)) + @echo ">> running check for unused packages in vendor/" + GO111MODULE=$(GO111MODULE) $(GO) mod vendor + @git diff --exit-code -- go.sum go.mod vendor/ +endif +endif +endif + +.PHONY: common-build +common-build: promu + @echo ">> building binaries" + GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) + +.PHONY: common-tarball +common-tarball: promu + @echo ">> building release tarball" + $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) + +.PHONY: common-docker +common-docker: + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . + +.PHONY: common-docker-publish +common-docker-publish: + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" + +.PHONY: common-docker-tag-latest +common-docker-tag-latest: + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest" + +.PHONY: promu +promu: $(PROMU) + +$(PROMU): + curl -s -L $(PROMU_URL) | tar -xvz -C /tmp + mkdir -v -p $(FIRST_GOPATH)/bin + cp -v /tmp/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(PROMU) + +.PHONY: proto +proto: + @echo ">> generating code from proto files" + @./scripts/genproto.sh + +.PHONY: $(STATICCHECK) +$(STATICCHECK): +ifdef GO111MODULE +# Get staticcheck from a temporary directory to avoid modifying the local go.{mod,sum}. +# See https://github.com/golang/go/issues/27643. +# For now, we are using the next branch of staticcheck because master isn't compatible yet with Go modules. + tmpModule=$$(mktemp -d 2>&1) && \ + mkdir -p $${tmpModule}/staticcheck && \ + cd "$${tmpModule}"/staticcheck && \ + GO111MODULE=on $(GO) mod init example.com/staticcheck && \ + GO111MODULE=on GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck@next && \ + rm -rf $${tmpModule}; +else + GOOS= GOARCH= GO111MODULE=off $(GO) get -u honnef.co/go/tools/cmd/staticcheck +endif + +ifdef GOVENDOR +.PHONY: $(GOVENDOR) +$(GOVENDOR): + GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor +endif + +.PHONY: precheck +precheck:: + +define PRECHECK_COMMAND_template = +precheck:: $(1)_precheck + + +PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) +.PHONY: $(1)_precheck +$(1)_precheck: + @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ + echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ + exit 1; \ + fi +endef diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar index 3ee8291e8..13c831ef5 100644 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -15,6 +15,9 @@ Lines: 1 vim Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/cwd +SymlinkTo: /usr/bin +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/exe SymlinkTo: /usr/bin/vim # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -111,6 +114,9 @@ SymlinkTo: mnt:[4026531840] Path: fixtures/26231/ns/net SymlinkTo: net:[4026531993] # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/root +SymlinkTo: / +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/stat Lines: 1 26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 @@ -128,6 +134,9 @@ Lines: 1 ata_sff Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/cwd +SymlinkTo: /does/not/exist +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/26232/fd Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -167,6 +176,9 @@ Max realtime priority 0 0 Max realtime timeout unlimited unlimited us Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/root +SymlinkTo: /does/not/exist +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26232/stat Lines: 1 33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 @@ -444,3 +456,7 @@ Path: fixtures/symlinktargets/xyz Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/.unpacked +Lines: 0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod new file mode 100644 index 000000000..e89ee6c90 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/go.mod @@ -0,0 +1 @@ +module github.com/prometheus/procfs diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 1ad21c91a..2ff228e9d 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -13,7 +13,11 @@ package util -import "strconv" +import ( + "io/ioutil" + "strconv" + "strings" +) // ParseUint32s parses a slice of strings into a slice of uint32s. func ParseUint32s(ss []string) ([]uint32, error) { @@ -44,3 +48,12 @@ func ParseUint64s(ss []string) ([]uint64, error) { return us, nil } + +// ReadUintFromFile reads a file and attempts to parse a uint64 from it. +func ReadUintFromFile(path string) (uint64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go new file mode 100644 index 000000000..df0d567b7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go @@ -0,0 +1,45 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package util + +import ( + "bytes" + "os" + "syscall" +) + +// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// https://github.com/prometheus/node_exporter/pull/728/files +func SysReadFile(file string) (string, error) { + f, err := os.Open(file) + if err != nil { + return "", err + } + defer f.Close() + + // On some machines, hwmon drivers are broken and return EAGAIN. This causes + // Go's ioutil.ReadFile implementation to poll forever. + // + // Since we either want to read data or bail immediately, do the simplest + // possible read using syscall directly. + b := make([]byte, 128) + n, err := syscall.Read(int(f.Fd()), b) + if err != nil { + return "", err + } + + return string(bytes.TrimSpace(b[:n])), nil +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index e95ddbc67..7a8a1e099 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -39,8 +39,11 @@ const ( statVersion10 = "1.0" statVersion11 = "1.1" - fieldTransport10Len = 10 - fieldTransport11Len = 13 + fieldTransport10TCPLen = 10 + fieldTransport10UDPLen = 7 + + fieldTransport11TCPLen = 13 + fieldTransport11UDPLen = 10 ) // A Mount is a device mount parsed from /proc/[pid]/mountstats. @@ -186,6 +189,8 @@ type NFSOperationStats struct { // A NFSTransportStats contains statistics for the NFS mount RPC requests and // responses. type NFSTransportStats struct { + // The transport protocol used for the NFS mount. + Protocol string // The local port used for the NFS mount. Port uint64 // Number of times the client has had to establish a connection from scratch @@ -360,7 +365,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) } - tstats, err := parseNFSTransportStats(ss[2:], statVersion) + tstats, err := parseNFSTransportStats(ss[1:], statVersion) if err != nil { return nil, err } @@ -522,13 +527,33 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { // parseNFSTransportStats parses a NFSTransportStats line using an input set of // integer fields matched to a specific stats version. func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + // Extract the protocol field. It is the only string value in the line + protocol := ss[0] + ss = ss[1:] + switch statVersion { case statVersion10: - if len(ss) != fieldTransport10Len { + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport10TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport10UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) } case statVersion11: - if len(ss) != fieldTransport11Len { + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport11TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport11UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) } default: @@ -536,12 +561,13 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay - // in a v1.0 response. + // in a v1.0 response. Since the stat length is bigger for TCP stats, we use + // the TCP length here. // // Note: slice length must be set to length of v1.1 stats to avoid a panic when // only v1.0 stats are present. // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11Len) + ns := make([]uint64, fieldTransport11TCPLen) for i, s := range ss { n, err := strconv.ParseUint(s, 10, 64) if err != nil { @@ -551,7 +577,18 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats ns[i] = n } + // The fields differ depending on the transport protocol (TCP or UDP) + // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt + // + // For the udp RPC transport there is no connection count, connect idle time, + // or idle time (fields #3, #4, and #5); all other fields are the same. So + // we set them to 0 here. + if protocol == "udp" { + ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } + return &NFSTransportStats{ + Protocol: protocol, Port: ns[0], Bind: ns[1], Connect: ns[2], diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 7cf5b8acf..06bed0ef4 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -156,6 +156,26 @@ func (p Proc) Executable() (string, error) { return exe, err } +// Cwd returns the absolute path to the current working directory of the process. +func (p Proc) Cwd() (string, error) { + wd, err := os.Readlink(p.path("cwd")) + if os.IsNotExist(err) { + return "", nil + } + + return wd, err +} + +// RootDir returns the absolute path to the process's root directory (as set by chroot) +func (p Proc) RootDir() (string, error) { + rdir, err := os.Readlink(p.path("root")) + if os.IsNotExist(err) { + return "", nil + } + + return rdir, err +} + // FileDescriptors returns the currently open file descriptors of a process. func (p Proc) FileDescriptors() ([]uintptr, error) { names, err := p.fileDescriptors() diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go index ffe9df50d..8f1508f0f 100644 --- a/vendor/github.com/prometheus/procfs/xfrm.go +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -113,7 +113,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) { if len(fields) != 2 { return XfrmStat{}, fmt.Errorf( - "couldnt parse %s line %s", file.Name(), s.Text()) + "couldn't parse %s line %s", file.Name(), s.Text()) } name := fields[0] diff --git a/vendor/github.com/tv42/httpunix/.gitignore b/vendor/github.com/tv42/httpunix/.gitignore new file mode 100644 index 000000000..9ed3b07ce --- /dev/null +++ b/vendor/github.com/tv42/httpunix/.gitignore @@ -0,0 +1 @@ +*.test diff --git a/vendor/github.com/tv42/httpunix/LICENSE b/vendor/github.com/tv42/httpunix/LICENSE new file mode 100644 index 000000000..33aec1457 --- /dev/null +++ b/vendor/github.com/tv42/httpunix/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2013-2015 Tommi Virtanen. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/tv42/httpunix/httpunix.go b/vendor/github.com/tv42/httpunix/httpunix.go new file mode 100644 index 000000000..95f5e95a8 --- /dev/null +++ b/vendor/github.com/tv42/httpunix/httpunix.go @@ -0,0 +1,95 @@ +// Package httpunix provides a HTTP transport (net/http.RoundTripper) +// that uses Unix domain sockets instead of HTTP. +// +// This is useful for non-browser connections within the same host, as +// it allows using the file system for credentials of both client +// and server, and guaranteeing unique names. +// +// The URLs look like this: +// +// http+unix://LOCATION/PATH_ETC +// +// where LOCATION is translated to a file system path with +// Transport.RegisterLocation, and PATH_ETC follow normal http: scheme +// conventions. +package httpunix + +import ( + "bufio" + "errors" + "net" + "net/http" + "sync" + "time" +) + +// Scheme is the URL scheme used for HTTP over UNIX domain sockets. +const Scheme = "http+unix" + +// Transport is a http.RoundTripper that connects to Unix domain +// sockets. +type Transport struct { + DialTimeout time.Duration + RequestTimeout time.Duration + ResponseHeaderTimeout time.Duration + + mu sync.Mutex + // map a URL "hostname" to a UNIX domain socket path + loc map[string]string +} + +// RegisterLocation registers an URL location and maps it to the given +// file system path. +// +// Calling RegisterLocation twice for the same location is a +// programmer error, and causes a panic. +func (t *Transport) RegisterLocation(loc string, path string) { + t.mu.Lock() + defer t.mu.Unlock() + if t.loc == nil { + t.loc = make(map[string]string) + } + if _, exists := t.loc[loc]; exists { + panic("location " + loc + " already registered") + } + t.loc[loc] = path +} + +var _ http.RoundTripper = (*Transport)(nil) + +// RoundTrip executes a single HTTP transaction. See +// net/http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if req.URL == nil { + return nil, errors.New("http+unix: nil Request.URL") + } + if req.URL.Scheme != Scheme { + return nil, errors.New("unsupported protocol scheme: " + req.URL.Scheme) + } + if req.URL.Host == "" { + return nil, errors.New("http+unix: no Host in request URL") + } + t.mu.Lock() + path, ok := t.loc[req.URL.Host] + t.mu.Unlock() + if !ok { + return nil, errors.New("unknown location: " + req.Host) + } + + c, err := net.DialTimeout("unix", path, t.DialTimeout) + if err != nil { + return nil, err + } + r := bufio.NewReader(c) + if t.RequestTimeout > 0 { + c.SetWriteDeadline(time.Now().Add(t.RequestTimeout)) + } + if err := req.Write(c); err != nil { + return nil, err + } + if t.ResponseHeaderTimeout > 0 { + c.SetReadDeadline(time.Now().Add(t.ResponseHeaderTimeout)) + } + resp, err := http.ReadResponse(r, req) + return resp, err +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 724274a62..416437b3b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -13,7 +13,7 @@ github.com/Azure/go-autorest/autorest/validation github.com/Azure/go-autorest/logger github.com/Azure/go-autorest/version github.com/Azure/go-autorest/autorest/date -# github.com/DataDog/datadog-go v0.0.0-20160329135253-cc2f4770f4d6 +# github.com/DataDog/datadog-go v2.2.0+incompatible github.com/DataDog/datadog-go/statsd # github.com/Jeffail/gabs v1.1.0 github.com/Jeffail/gabs @@ -37,7 +37,7 @@ github.com/SermoDigital/jose github.com/StackExchange/wmi # github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e github.com/armon/circbuf -# github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da +# github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 github.com/armon/go-metrics github.com/armon/go-metrics/circonus github.com/armon/go-metrics/datadog @@ -82,11 +82,12 @@ github.com/beorn7/perks/quantile github.com/bgentry/speakeasy # github.com/boltdb/bolt v1.3.1 github.com/boltdb/bolt -# github.com/circonus-labs/circonus-gometrics v0.0.0-20161109192337-d17a8420c36e +# github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible github.com/circonus-labs/circonus-gometrics github.com/circonus-labs/circonus-gometrics/api github.com/circonus-labs/circonus-gometrics/checkmgr -# github.com/circonus-labs/circonusllhist v0.0.0-20161110002650-365d370cc145 +github.com/circonus-labs/circonus-gometrics/api/config +# github.com/circonus-labs/circonusllhist v0.1.3 github.com/circonus-labs/circonusllhist # github.com/coredns/coredns v1.1.2 github.com/coredns/coredns/plugin/pkg/dnsutil @@ -214,19 +215,19 @@ github.com/hashicorp/go-discover/provider/scaleway github.com/hashicorp/go-discover/provider/softlayer github.com/hashicorp/go-discover/provider/triton github.com/hashicorp/go-discover/provider/vsphere -# github.com/hashicorp/go-hclog v0.0.0-20180402200405-69ff559dc25f +# github.com/hashicorp/go-hclog v0.9.1 github.com/hashicorp/go-hclog # github.com/hashicorp/go-immutable-radix v1.0.0 github.com/hashicorp/go-immutable-radix # github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71 github.com/hashicorp/go-memdb -# github.com/hashicorp/go-msgpack v0.5.4 +# github.com/hashicorp/go-msgpack v0.5.5 github.com/hashicorp/go-msgpack/codec # github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/go-multierror # github.com/hashicorp/go-plugin v0.0.0-20180331002553-e8d22c780116 github.com/hashicorp/go-plugin -# github.com/hashicorp/go-retryablehttp v0.0.0-20180531211321-3b087ef2d313 +# github.com/hashicorp/go-retryablehttp v0.5.3 github.com/hashicorp/go-retryablehttp # github.com/hashicorp/go-rootcerts v1.0.0 github.com/hashicorp/go-rootcerts @@ -264,7 +265,7 @@ github.com/hashicorp/mdns github.com/hashicorp/memberlist # github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 github.com/hashicorp/net-rpc-msgpackrpc -# github.com/hashicorp/raft v1.0.1-0.20190409200437-d9fe23f7d472 +# github.com/hashicorp/raft v1.1.0 github.com/hashicorp/raft # github.com/hashicorp/raft-boltdb v0.0.0-20150201200839-d1e82c1ec3f1 github.com/hashicorp/raft-boltdb @@ -390,7 +391,7 @@ github.com/nicolai86/scaleway-sdk github.com/oklog/run # github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c github.com/packethost/packngo -# github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c +# github.com/pascaldekloe/goe v0.1.0 github.com/pascaldekloe/goe/verify # github.com/patrickmn/go-cache v0.0.0-20180527043350-9f6ff22cfff8 github.com/patrickmn/go-cache @@ -405,16 +406,17 @@ github.com/posener/complete github.com/posener/complete/cmd/install github.com/posener/complete/cmd github.com/posener/complete/match -# github.com/prometheus/client_golang v0.0.0-20180328130430-f504d69affe1 +# github.com/prometheus/client_golang v0.9.2 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 +github.com/prometheus/client_golang/prometheus/internal +# github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.0.0-20180326160409-38c53a9f4bfc +# github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 github.com/prometheus/common/expfmt github.com/prometheus/common/model github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg -# github.com/prometheus/procfs v0.0.0-20180408092902-8b1c2da0d56d +# github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a github.com/prometheus/procfs github.com/prometheus/procfs/nfs github.com/prometheus/procfs/xfs @@ -454,6 +456,8 @@ github.com/stretchr/objx github.com/stretchr/testify/require github.com/stretchr/testify/mock github.com/stretchr/testify/assert +# github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 +github.com/tv42/httpunix # github.com/vmware/govmomi v0.18.0 github.com/vmware/govmomi github.com/vmware/govmomi/find