Merge branch 'master' of https://github.com/hashicorp/vault into request-uuid

This commit is contained in:
Laura Bennett 2016-07-23 21:47:08 -04:00
commit c63cdc23a1
146 changed files with 5650 additions and 815 deletions

View File

@ -6,10 +6,7 @@ services:
- docker
go:
- 1.6.2
# tip is currently using go1.4.1, which we're not compatible with, so always
# fails
# - tip
- 1.7rc3
matrix:
allow_failures:

View File

@ -28,12 +28,24 @@ FEATURES:
environment variable. See the [environment variable
documentation](https://www.vaultproject.io/docs/commands/environment.html)
for more details. [GH-1594]
* **Service Discovery in `vault init`**: The new `-auto` option on `vault init`
will perform service discovery using Consul. When only one node is discovered,
it will be initialized and when more than one node is discovered, they will
be output for easy selection. See `vault init --help` for more details. [GH-1642]
* **MongoDB Secret Backend**: Generate dynamic unique MongoDB database
credentials based on configured roles. Sponsored by
[CommerceHub](http://www.commercehub.com/). [GH-1414]
* **Circonus Metrics Integration**: Vault can now send metrics to
[Circonus](http://www.circonus.com/). See the [configuration
documentation](https://www.vaultproject.io/docs/config/index.html) for
details. [GH-1646]
IMPROVEMENTS:
* auth/aws-ec2: Added a new constraint, 'bound_account_id' to the role
[GH-1523]
* auth/ldap, secret/cassandra, physical/consul: Clients with `tls.Config`
will have `MinVersion` set to TLS 1.2 by default. This is configurable.
* cli: Output formatting in the presence of warnings in the response object
[GH-1533]
* cli: `vault auth` command supports a `-path` option to take in the path at
@ -48,11 +60,9 @@ IMPROVEMENTS:
* core: Response wrapping is now enabled for login endpoints [GH-1588]
* core: The duration of leadership is now exported via events through
telemetry [GH-1625]
* auth/aws-ec2: Added a new constraint, 'bound_account_id' to the role
[GH-1523]
* auth/ldap, secret/cassandra, physical/consul: Clients with `tls.Config`
will have `MinVersion` set to TLS 1.2 by default. This is configurable.
* physical/etcd: Support `ETCD_ADDR` env var for specifying addresses [GH-1576]
* physical/consul: Allowing additional tags to be added to Consul service
registration via `service-tags` option [GH-1643]
* secret/aws: Listing of roles is supported now [GH-1546]
* secret/cassandra: Add `connect_timeout` value for Cassandra connection
configuration [GH-1581]
@ -64,6 +74,8 @@ IMPROVEMENTS:
generated user names and allow the length to be controlled [GH-1604]
* secret/ssh: Added `allowed_roles` to vault-ssh-helper's config and returning
role name as part of response of `verify` API.
* sys/health: Added version information to the response of health status
endpoint [GH-1647]
BUG FIXES:

View File

@ -45,7 +45,7 @@ type InitStatusResponse struct {
}
type InitResponse struct {
Keys []string
Keys []string `json:"keys"`
RecoveryKeys []string `json:"recovery_keys"`
RootToken string `json:"root_token"`
}

View File

@ -2,11 +2,16 @@ package command
import (
"fmt"
"net/url"
"os"
"runtime"
"strings"
consulapi "github.com/hashicorp/consul/api"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/pgpkeys"
"github.com/hashicorp/vault/meta"
"github.com/hashicorp/vault/physical"
)
// InitCommand is a Command that initializes a new Vault server.
@ -17,7 +22,8 @@ type InitCommand struct {
func (c *InitCommand) Run(args []string) int {
var threshold, shares, storedShares, recoveryThreshold, recoveryShares int
var pgpKeys, recoveryPgpKeys pgpkeys.PubKeyFilesFlag
var check bool
var auto, check bool
var consulServiceName string
flags := c.Meta.FlagSet("init", meta.FlagSetDefault)
flags.Usage = func() { c.Ui.Error(c.Help()) }
flags.IntVar(&shares, "key-shares", 5, "")
@ -28,10 +34,145 @@ func (c *InitCommand) Run(args []string) int {
flags.IntVar(&recoveryThreshold, "recovery-threshold", 3, "")
flags.Var(&recoveryPgpKeys, "recovery-pgp-keys", "")
flags.BoolVar(&check, "check", false, "")
flags.BoolVar(&auto, "auto", false, "")
flags.StringVar(&consulServiceName, "consul-service", physical.DefaultServiceName, "")
if err := flags.Parse(args); err != nil {
return 1
}
initRequest := &api.InitRequest{
SecretShares: shares,
SecretThreshold: threshold,
StoredShares: storedShares,
PGPKeys: pgpKeys,
RecoveryShares: recoveryShares,
RecoveryThreshold: recoveryThreshold,
RecoveryPGPKeys: recoveryPgpKeys,
}
// If running in 'auto' mode, run service discovery based on environment
// variables of Consul.
if auto {
// Create configuration for Consul
consulConfig := consulapi.DefaultConfig()
// Create a client to communicate with Consul
consulClient, err := consulapi.NewClient(consulConfig)
if err != nil {
c.Ui.Error(fmt.Sprintf("failed to create Consul client:%v", err))
return 1
}
var uninitializedVaults []string
var initializedVault string
// Query the nodes belonging to the cluster
if services, _, err := consulClient.Catalog().Service(consulServiceName, "", &consulapi.QueryOptions{AllowStale: true}); err == nil {
Loop:
for _, service := range services {
vaultAddress := &url.URL{
Scheme: consulConfig.Scheme,
Host: fmt.Sprintf("%s:%d", service.ServiceAddress, service.ServicePort),
}
// Set VAULT_ADDR to the discovered node
os.Setenv(api.EnvVaultAddress, vaultAddress.String())
// Create a client to communicate with the discovered node
client, err := c.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %v", err))
return 1
}
// Check the initialization status of the discovered node
inited, err := client.Sys().InitStatus()
switch {
case err != nil:
c.Ui.Error(fmt.Sprintf("Error checking initialization status of discovered node: %+q. Err: %v", vaultAddress.String(), err))
return 1
case inited:
// One of the nodes in the cluster is initialized. Break out.
initializedVault = vaultAddress.String()
break Loop
default:
// Vault is uninitialized.
uninitializedVaults = append(uninitializedVaults, vaultAddress.String())
}
}
}
export := "export"
quote := "'"
if runtime.GOOS == "windows" {
export = "set"
quote = ""
}
if initializedVault != "" {
vaultURL, err := url.Parse(initializedVault)
if err != nil {
c.Ui.Error(fmt.Sprintf("Failed to parse Vault address: %+q. Err: %v", initializedVault, err))
}
c.Ui.Output(fmt.Sprintf("Discovered an initialized Vault node at %+q, using Consul service name %+q", vaultURL.String(), consulServiceName))
c.Ui.Output("\nSet the following environment variable to operate on the discovered Vault:\n")
c.Ui.Output(fmt.Sprintf("\t%s VAULT_ADDR=%s%s%s", export, quote, vaultURL.String(), quote))
return 0
}
switch len(uninitializedVaults) {
case 0:
c.Ui.Error(fmt.Sprintf("Failed to discover Vault nodes using Consul service name %+q", consulServiceName))
return 1
case 1:
// There was only one node found in the Vault cluster and it
// was uninitialized.
vaultURL, err := url.Parse(uninitializedVaults[0])
if err != nil {
c.Ui.Error(fmt.Sprintf("Failed to parse Vault address: %+q. Err: %v", uninitializedVaults[0], err))
}
// Set the VAULT_ADDR to the discovered node. This will ensure
// that the client created will operate on the discovered node.
os.Setenv(api.EnvVaultAddress, vaultURL.String())
// Let the client know that initialization is perfomed on the
// discovered node.
c.Ui.Output(fmt.Sprintf("Discovered Vault at %+q using Consul service name %+q\n", vaultURL.String(), consulServiceName))
// Attempt initializing it
ret := c.runInit(check, initRequest)
// Regardless of success or failure, instruct client to update VAULT_ADDR
c.Ui.Output("\nSet the following environment variable to operate on the discovered Vault:\n")
c.Ui.Output(fmt.Sprintf("\t%s VAULT_ADDR=%s%s%s", export, quote, vaultURL.String(), quote))
return ret
default:
// If more than one Vault node were discovered, print out all of them,
// requiring the client to update VAULT_ADDR and to run init again.
c.Ui.Output(fmt.Sprintf("Discovered more than one uninitialized Vaults using Consul service name %+q\n", consulServiceName))
c.Ui.Output("To initialize these Vaults, set any *one* of the following environment variables and run 'vault init':")
// Print valid commands to make setting the variables easier
for _, vaultNode := range uninitializedVaults {
vaultURL, err := url.Parse(vaultNode)
if err != nil {
c.Ui.Error(fmt.Sprintf("Failed to parse Vault address: %+q. Err: %v", vaultNode, err))
}
c.Ui.Output(fmt.Sprintf("\t%s VAULT_ADDR=%s%s%s", export, quote, vaultURL.String(), quote))
}
return 0
}
}
return c.runInit(check, initRequest)
}
func (c *InitCommand) runInit(check bool, initRequest *api.InitRequest) int {
client, err := c.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf(
@ -43,15 +184,7 @@ func (c *InitCommand) Run(args []string) int {
return c.checkStatus(client)
}
resp, err := client.Sys().Init(&api.InitRequest{
SecretShares: shares,
SecretThreshold: threshold,
StoredShares: storedShares,
PGPKeys: pgpKeys,
RecoveryShares: recoveryShares,
RecoveryThreshold: recoveryThreshold,
RecoveryPGPKeys: recoveryPgpKeys,
})
resp, err := client.Sys().Init(initRequest)
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error initializing Vault: %s", err))
@ -67,7 +200,7 @@ func (c *InitCommand) Run(args []string) int {
c.Ui.Output(fmt.Sprintf("Initial Root Token: %s", resp.RootToken))
if storedShares < 1 {
if initRequest.StoredShares < 1 {
c.Ui.Output(fmt.Sprintf(
"\n"+
"Vault initialized with %d keys and a key threshold of %d. Please\n"+
@ -76,10 +209,10 @@ func (c *InitCommand) Run(args []string) int {
"to unseal it again.\n\n"+
"Vault does not store the master key. Without at least %d keys,\n"+
"your Vault will remain permanently sealed.",
shares,
threshold,
threshold,
threshold,
initRequest.SecretShares,
initRequest.SecretThreshold,
initRequest.SecretThreshold,
initRequest.SecretThreshold,
))
} else {
c.Ui.Output(
@ -92,8 +225,8 @@ func (c *InitCommand) Run(args []string) int {
"\n"+
"Recovery key initialized with %d keys and a key threshold of %d. Please\n"+
"securely distribute the above keys.",
recoveryShares,
recoveryThreshold,
initRequest.RecoveryShares,
initRequest.RecoveryThreshold,
))
}
@ -136,39 +269,62 @@ General Options:
` + meta.GeneralOptionsUsage() + `
Init Options:
-check Don't actually initialize, just check if Vault is
already initialized. A return code of 0 means Vault
is initialized; a return code of 2 means Vault is not
initialized; a return code of 1 means an error was
encountered.
-check Don't actually initialize, just check if Vault is
already initialized. A return code of 0 means Vault
is initialized; a return code of 2 means Vault is not
initialized; a return code of 1 means an error was
encountered.
-key-shares=5 The number of key shares to split the master key
into.
-key-shares=5 The number of key shares to split the master key
into.
-key-threshold=3 The number of key shares required to reconstruct
the master key.
-key-threshold=3 The number of key shares required to reconstruct
the master key.
-stored-shares=0 The number of unseal keys to store. This is not
normally available.
-stored-shares=0 The number of unseal keys to store. This is not
normally available.
-pgp-keys If provided, must be a comma-separated list of
files on disk containing binary- or base64-format
public PGP keys, or Keybase usernames specified as
"keybase:<username>". The number of given entries
must match 'key-shares'. The output unseal keys will
be encrypted and hex-encoded, in order, with the
given public keys. If you want to use them with the
'vault unseal' command, you will need to hex decode
and decrypt; this will be the plaintext unseal key.
-pgp-keys If provided, must be a comma-separated list of
files on disk containing binary- or base64-format
public PGP keys, or Keybase usernames specified as
"keybase:<username>". The number of given entries
must match 'key-shares'. The output unseal keys will
be encrypted and hex-encoded, in order, with the
given public keys. If you want to use them with the
'vault unseal' command, you will need to hex decode
and decrypt; this will be the plaintext unseal key.
-recovery-shares=5 The number of key shares to split the recovery key
into. This is not normally available.
-recovery-shares=5 The number of key shares to split the recovery key
into. This is not normally available.
-recovery-threshold=3 The number of key shares required to reconstruct
the recovery key. This is not normally available.
-recovery-threshold=3 The number of key shares required to reconstruct
the recovery key. This is not normally available.
-recovery-pgp-keys If provided, behaves like "pgp-keys" but for the
recovery key shares. This is not normally available.
-recovery-pgp-keys If provided, behaves like "pgp-keys" but for the
recovery key shares. This is not normally available.
-auto If set, performs service discovery using Consul. When
all the nodes of a Vault cluster are registered with
Consul, setting this flag will trigger service discovery
using the service name with which Vault nodes are
registered. This option works well when each Vault
cluster is registered under a unique service name.
Note that, when Consul is serving as Vault's HA backend,
Vault nodes are registered with Consul by default. The
service name can be changed using 'consul-service' flag.
Ensure that environment variables required to communicate
with Consul, like (CONSUL_HTTP_ADDR, CONSUL_HTTP_TOKEN,
CONSUL_HTTP_SSL, et al) are properly set. When only one
Vault node is discovered, it will be initialized and
when more than one Vault node is discovered, they will
be output for easy selection.
-consul-service Service name under which all the nodes of a Vault cluster
are registered with Consul. Note that, when Vault uses
Consul as its HA backend, by default, Vault will register
itself as a service with Consul with the service name "vault".
This name can be modified in Vault's configuration file,
using the "service" option for the Consul backend.
`
return strings.TrimSpace(helpText)
}

View File

@ -17,6 +17,7 @@ import (
"time"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/circonus"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/logutils"
@ -605,6 +606,37 @@ func (c *ServerCommand) setupTelemetry(config *server.Config) error {
fanout = append(fanout, sink)
}
// Configure the Circonus sink
if telConfig.CirconusAPIToken != "" || telConfig.CirconusCheckSubmissionURL != "" {
cfg := &circonus.Config{}
cfg.Interval = telConfig.CirconusSubmissionInterval
cfg.CheckManager.API.TokenKey = telConfig.CirconusAPIToken
cfg.CheckManager.API.TokenApp = telConfig.CirconusAPIApp
cfg.CheckManager.API.URL = telConfig.CirconusAPIURL
cfg.CheckManager.Check.SubmissionURL = telConfig.CirconusCheckSubmissionURL
cfg.CheckManager.Check.ID = telConfig.CirconusCheckID
cfg.CheckManager.Check.ForceMetricActivation = telConfig.CirconusCheckForceMetricActivation
cfg.CheckManager.Check.InstanceID = telConfig.CirconusCheckInstanceID
cfg.CheckManager.Check.SearchTag = telConfig.CirconusCheckSearchTag
cfg.CheckManager.Broker.ID = telConfig.CirconusBrokerID
cfg.CheckManager.Broker.SelectTag = telConfig.CirconusBrokerSelectTag
if cfg.CheckManager.API.TokenApp == "" {
cfg.CheckManager.API.TokenApp = "vault"
}
if cfg.CheckManager.Check.SearchTag == "" {
cfg.CheckManager.Check.SearchTag = "service:vault"
}
sink, err := circonus.NewCirconusSink(cfg)
if err != nil {
return err
}
sink.Start()
fanout = append(fanout, sink)
}
// Initialize the global sink
if len(fanout) > 0 {
fanout = append(fanout, inm)

View File

@ -89,6 +89,70 @@ type Telemetry struct {
StatsdAddr string `hcl:"statsd_address"`
DisableHostname bool `hcl:"disable_hostname"`
// Circonus: see https://github.com/circonus-labs/circonus-gometrics
// for more details on the various configuration options.
// Valid configuration combinations:
// - CirconusAPIToken
// metric management enabled (search for existing check or create a new one)
// - CirconusSubmissionUrl
// metric management disabled (use check with specified submission_url,
// broker must be using a public SSL certificate)
// - CirconusAPIToken + CirconusCheckSubmissionURL
// metric management enabled (use check with specified submission_url)
// - CirconusAPIToken + CirconusCheckID
// metric management enabled (use check with specified id)
// CirconusAPIToken is a valid API Token used to create/manage check. If provided,
// metric management is enabled.
// Default: none
CirconusAPIToken string `hcl:"circonus_api_token"`
// CirconusAPIApp is an app name associated with API token.
// Default: "consul"
CirconusAPIApp string `hcl:"circonus_api_app"`
// CirconusAPIURL is the base URL to use for contacting the Circonus API.
// Default: "https://api.circonus.com/v2"
CirconusAPIURL string `hcl:"circonus_api_url"`
// CirconusSubmissionInterval is the interval at which metrics are submitted to Circonus.
// Default: 10s
CirconusSubmissionInterval string `hcl:"circonus_submission_interval"`
// CirconusCheckSubmissionURL is the check.config.submission_url field from a
// previously created HTTPTRAP check.
// Default: none
CirconusCheckSubmissionURL string `hcl:"circonus_submission_url"`
// CirconusCheckID is the check id (not check bundle id) from a previously created
// HTTPTRAP check. The numeric portion of the check._cid field.
// Default: none
CirconusCheckID string `hcl:"circonus_check_id"`
// CirconusCheckForceMetricActivation will force enabling metrics, as they are encountered,
// if the metric already exists and is NOT active. If check management is enabled, the default
// behavior is to add new metrics as they are encoutered. If the metric already exists in the
// check, it will *NOT* be activated. This setting overrides that behavior.
// Default: "false"
CirconusCheckForceMetricActivation string `hcl:"circonus_check_force_metric_activation"`
// CirconusCheckInstanceID serves to uniquely identify the metrics comming from this "instance".
// It can be used to maintain metric continuity with transient or ephemeral instances as
// they move around within an infrastructure.
// Default: hostname:app
CirconusCheckInstanceID string `hcl:"circonus_check_instance_id"`
// CirconusCheckSearchTag is a special tag which, when coupled with the instance id, helps to
// narrow down the search results when neither a Submission URL or Check ID is provided.
// Default: service:app (e.g. service:consul)
CirconusCheckSearchTag string `hcl:"circonus_check_search_tag"`
// CirconusBrokerID is an explicit broker to use when creating a new check. The numeric portion
// of broker._cid. If metric management is enabled and neither a Submission URL nor Check ID
// is provided, an attempt will be made to search for an existing check using Instance ID and
// Search Tag. If one is not found, a new HTTPTRAP check will be created.
// Default: use Select Tag if provided, otherwise, a random Enterprise Broker associated
// with the specified API token or the default Circonus Broker.
// Default: none
CirconusBrokerID string `hcl:"circonus_broker_id"`
// CirconusBrokerSelectTag is a special tag which will be used to select a broker when
// a Broker ID is not provided. The best use of this is to as a hint for which broker
// should be used based on *where* this particular instance is running.
// (e.g. a specific geo location or datacenter, dc:sfo)
// Default: none
CirconusBrokerSelectTag string `hcl:"circonus_broker_select_tag"`
}
func (s *Telemetry) GoString() string {
@ -490,6 +554,17 @@ func parseTelemetry(result *Config, list *ast.ObjectList) error {
"statsite_address",
"statsd_address",
"disable_hostname",
"circonus_api_token",
"circonus_api_app",
"circonus_api_url",
"circonus_submission_interval",
"circonus_submission_url",
"circonus_check_id",
"circonus_check_force_metric_activation",
"circonus_check_instance_id",
"circonus_check_search_tag",
"circonus_broker_id",
"circonus_broker_select_tag",
}
if err := checkHCLKeys(item.Val, valid); err != nil {
return multierror.Prefix(err, "telemetry:")

View File

@ -100,9 +100,20 @@ func TestLoadConfigFile_json(t *testing.T) {
},
Telemetry: &Telemetry{
StatsiteAddr: "baz",
StatsdAddr: "",
DisableHostname: false,
StatsiteAddr: "baz",
StatsdAddr: "",
DisableHostname: false,
CirconusAPIToken: "",
CirconusAPIApp: "",
CirconusAPIURL: "",
CirconusSubmissionInterval: "",
CirconusCheckSubmissionURL: "",
CirconusCheckID: "",
CirconusCheckForceMetricActivation: "",
CirconusCheckInstanceID: "",
CirconusCheckSearchTag: "",
CirconusBrokerID: "",
CirconusBrokerSelectTag: "",
},
MaxLeaseTTL: 10 * time.Hour,
@ -152,9 +163,20 @@ func TestLoadConfigFile_json2(t *testing.T) {
},
Telemetry: &Telemetry{
StatsiteAddr: "foo",
StatsdAddr: "bar",
DisableHostname: true,
StatsiteAddr: "foo",
StatsdAddr: "bar",
DisableHostname: true,
CirconusAPIToken: "0",
CirconusAPIApp: "vault",
CirconusAPIURL: "http://api.circonus.com/v2",
CirconusSubmissionInterval: "10s",
CirconusCheckSubmissionURL: "https://someplace.com/metrics",
CirconusCheckID: "0",
CirconusCheckForceMetricActivation: "true",
CirconusCheckInstanceID: "node1:vault",
CirconusCheckSearchTag: "service:vault",
CirconusBrokerID: "0",
CirconusBrokerSelectTag: "dc:sfo",
},
}
if !reflect.DeepEqual(config, expected) {

View File

@ -24,6 +24,17 @@
"telemetry":{
"statsd_address":"bar",
"statsite_address":"foo",
"disable_hostname":true
"disable_hostname":true,
"circonus_api_token": "0",
"circonus_api_app": "vault",
"circonus_api_url": "http://api.circonus.com/v2",
"circonus_submission_interval": "10s",
"circonus_submission_url": "https://someplace.com/metrics",
"circonus_check_id": "0",
"circonus_check_force_metric_activation": "true",
"circonus_check_instance_id": "node1:vault",
"circonus_check_search_tag": "service:vault",
"circonus_broker_id": "0",
"circonus_broker_select_tag": "dc:sfo"
}
}

View File

@ -437,7 +437,6 @@ func (p *ParsedCertBundle) GetTLSConfig(usage TLSUsage) (*tls.Config, error) {
}
tlsConfig := &tls.Config{
NextProtos: []string{"h2", "http/1.1"},
MinVersion: tls.VersionTLS12,
}

View File

@ -59,7 +59,7 @@ func SanitizePolicies(policies []string, addDefault bool) []string {
return strutil.RemoveDuplicates(policies)
}
// ComparePolicies checks whether the given policy sets are equivalent, as in,
// EquivalentPolicies checks whether the given policy sets are equivalent, as in,
// they contain the same values. The benefit of this method is that it leaves
// the "default" policy out of its comparisons as it may be added later by core
// after a set of policies has been saved by a backend.

View File

@ -30,6 +30,7 @@ func StrListSubset(super, sub []string) bool {
// The return slice will be sorted and will not contain duplicate or
// empty items. The values will be converted to lower case.
func ParseStrings(input string) []string {
input = strings.TrimSpace(input)
var parsed []string
if input == "" {
// Don't return nil
@ -57,3 +58,49 @@ func RemoveDuplicates(items []string) []string {
sort.Strings(items)
return items
}
// EquivalentSlices checks whether the given string sets are equivalent, as in,
// they contain the same values.
func EquivalentSlices(a, b []string) bool {
if a == nil && b == nil {
return true
}
if a == nil || b == nil {
return false
}
// First we'll build maps to ensure unique values
mapA := map[string]bool{}
mapB := map[string]bool{}
for _, keyA := range a {
mapA[keyA] = true
}
for _, keyB := range b {
mapB[keyB] = true
}
// Now we'll build our checking slices
var sortedA, sortedB []string
for keyA, _ := range mapA {
sortedA = append(sortedA, keyA)
}
for keyB, _ := range mapB {
sortedB = append(sortedB, keyB)
}
sort.Strings(sortedA)
sort.Strings(sortedB)
// Finally, compare
if len(sortedA) != len(sortedB) {
return false
}
for i := range sortedA {
if sortedA[i] != sortedB[i] {
return false
}
}
return true
}

View File

@ -2,6 +2,19 @@ package strutil
import "testing"
func TestStrutil_EquivalentSlices(t *testing.T) {
slice1 := []string{"test2", "test1", "test3"}
slice2 := []string{"test3", "test2", "test1"}
if !EquivalentSlices(slice1, slice2) {
t.Fatalf("bad: expected a match")
}
slice2 = append(slice2, "test4")
if EquivalentSlices(slice1, slice2) {
t.Fatalf("bad: expected a mismatch")
}
}
func TestStrListContains(t *testing.T) {
haystack := []string{
"dev",

View File

@ -7,6 +7,7 @@ import (
"time"
"github.com/hashicorp/vault/vault"
"github.com/hashicorp/vault/version"
)
func handleSysHealth(core *vault.Core) http.Handler {
@ -119,13 +120,15 @@ func getSysHealth(core *vault.Core, r *http.Request) (int, *HealthResponse, erro
Sealed: sealed,
Standby: standby,
ServerTimeUTC: time.Now().UTC().Unix(),
Version: version.GetVersion().String(),
}
return code, body, nil
}
type HealthResponse struct {
Initialized bool `json:"initialized"`
Sealed bool `json:"sealed"`
Standby bool `json:"standby"`
ServerTimeUTC int64 `json:"server_time_utc"`
Initialized bool `json:"initialized"`
Sealed bool `json:"sealed"`
Standby bool `json:"standby"`
ServerTimeUTC int64 `json:"server_time_utc"`
Version string `json:"version"`
}

View File

@ -30,6 +30,7 @@ func TestSysHealth_get(t *testing.T) {
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
expected["server_time_utc"] = actual["server_time_utc"]
expected["version"] = actual["version"]
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
}
@ -50,6 +51,7 @@ func TestSysHealth_get(t *testing.T) {
testResponseStatus(t, resp, 500)
testResponseBody(t, resp, &actual)
expected["server_time_utc"] = actual["server_time_utc"]
expected["version"] = actual["version"]
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
}
@ -79,6 +81,7 @@ func TestSysHealth_customcodes(t *testing.T) {
testResponseBody(t, resp, &actual)
expected["server_time_utc"] = actual["server_time_utc"]
expected["version"] = actual["version"]
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
}
@ -103,6 +106,7 @@ func TestSysHealth_customcodes(t *testing.T) {
testResponseStatus(t, resp, 503)
testResponseBody(t, resp, &actual)
expected["server_time_utc"] = actual["server_time_utc"]
expected["version"] = actual["version"]
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual)
}

View File

@ -20,6 +20,7 @@ import (
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/vault/helper/strutil"
"github.com/hashicorp/vault/helper/tlsutil"
)
@ -38,9 +39,9 @@ const (
// defaultCheckTimeout changes the timeout of TTL checks
defaultCheckTimeout = 5 * time.Second
// defaultServiceName is the default Consul service name used when
// DefaultServiceName is the default Consul service name used when
// advertising a Vault instance.
defaultServiceName = "vault"
DefaultServiceName = "vault"
// reconcileTimeout is how often Vault should query Consul to detect
// and fix any state drift.
@ -62,6 +63,7 @@ type ConsulBackend struct {
advertiseHost string
advertisePort int64
serviceName string
serviceTags []string
disableRegistration bool
checkTimeout time.Duration
@ -77,15 +79,15 @@ func newConsulBackend(conf map[string]string, logger *log.Logger) (Backend, erro
if !ok {
path = "vault/"
}
logger.Printf("[DEBUG]: consul: config path set to %v", path)
logger.Printf("[DEBUG]: physical/consul: config path set to %v", path)
// Ensure path is suffixed but not prefixed
if !strings.HasSuffix(path, "/") {
logger.Printf("[WARN]: consul: appending trailing forward slash to path")
logger.Printf("[WARN]: physical/consul: appending trailing forward slash to path")
path += "/"
}
if strings.HasPrefix(path, "/") {
logger.Printf("[WARN]: consul: trimming path of its forward slash")
logger.Printf("[WARN]: physical/consul: trimming path of its forward slash")
path = strings.TrimPrefix(path, "/")
}
@ -99,14 +101,19 @@ func newConsulBackend(conf map[string]string, logger *log.Logger) (Backend, erro
}
disableRegistration = b
}
logger.Printf("[DEBUG]: consul: config disable_registration set to %v", disableRegistration)
logger.Printf("[DEBUG]: physical/consul: config disable_registration set to %v", disableRegistration)
// Get the service name to advertise in Consul
service, ok := conf["service"]
if !ok {
service = defaultServiceName
service = DefaultServiceName
}
logger.Printf("[DEBUG]: consul: config service set to %s", service)
logger.Printf("[DEBUG]: physical/consul: config service set to %s", service)
// Get the additional tags to attach to the registered service name
tags := conf["service-tags"]
logger.Printf("[DEBUG]: physical/consul: config service-tags set to %s", tags)
checkTimeout := defaultCheckTimeout
checkTimeoutStr, ok := conf["check_timeout"]
@ -122,7 +129,7 @@ func newConsulBackend(conf map[string]string, logger *log.Logger) (Backend, erro
}
checkTimeout = d
logger.Printf("[DEBUG]: consul: config check_timeout set to %v", d)
logger.Printf("[DEBUG]: physical/consul: config check_timeout set to %v", d)
}
// Configure the client
@ -130,15 +137,15 @@ func newConsulBackend(conf map[string]string, logger *log.Logger) (Backend, erro
if addr, ok := conf["address"]; ok {
consulConf.Address = addr
logger.Printf("[DEBUG]: consul: config address set to %d", addr)
logger.Printf("[DEBUG]: physical/consul: config address set to %s", addr)
}
if scheme, ok := conf["scheme"]; ok {
consulConf.Scheme = scheme
logger.Printf("[DEBUG]: consul: config scheme set to %d", scheme)
logger.Printf("[DEBUG]: physical/consul: config scheme set to %s", scheme)
}
if token, ok := conf["token"]; ok {
consulConf.Token = token
logger.Printf("[DEBUG]: consul: config token set")
logger.Printf("[DEBUG]: physical/consul: config token set")
}
if consulConf.Scheme == "https" {
@ -151,7 +158,7 @@ func newConsulBackend(conf map[string]string, logger *log.Logger) (Backend, erro
transport.MaxIdleConnsPerHost = 4
transport.TLSClientConfig = tlsClientConfig
consulConf.HttpClient.Transport = transport
logger.Printf("[DEBUG]: consul: configured TLS")
logger.Printf("[DEBUG]: physical/consul: configured TLS")
}
client, err := api.NewClient(consulConf)
@ -166,7 +173,7 @@ func newConsulBackend(conf map[string]string, logger *log.Logger) (Backend, erro
if err != nil {
return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
}
logger.Printf("[DEBUG]: consul: max_parallel set to %d", maxParInt)
logger.Printf("[DEBUG]: physical/consul: max_parallel set to %d", maxParInt)
}
// Setup the backend
@ -177,6 +184,7 @@ func newConsulBackend(conf map[string]string, logger *log.Logger) (Backend, erro
kv: client.KV(),
permitPool: NewPermitPool(maxParInt),
serviceName: service,
serviceTags: strutil.ParseStrings(tags),
checkTimeout: checkTimeout,
disableRegistration: disableRegistration,
}
@ -386,7 +394,7 @@ func (c *ConsulBackend) NotifyActiveStateChange() error {
default:
// NOTE: If this occurs Vault's active status could be out of
// sync with Consul until reconcileTimer expires.
c.logger.Printf("[WARN]: consul: Concurrent state change notify dropped")
c.logger.Printf("[WARN]: physical/consul: Concurrent state change notify dropped")
}
return nil
@ -398,7 +406,7 @@ func (c *ConsulBackend) NotifySealedStateChange() error {
default:
// NOTE: If this occurs Vault's sealed status could be out of
// sync with Consul until checkTimer expires.
c.logger.Printf("[WARN]: consul: Concurrent sealed state change notify dropped")
c.logger.Printf("[WARN]: physical/consul: Concurrent sealed state change notify dropped")
}
return nil
@ -464,7 +472,7 @@ shutdown:
for !shutdown {
serviceID, err := c.reconcileConsul(registeredServiceID, activeFunc, sealedFunc)
if err != nil {
c.logger.Printf("[WARN]: consul: reconcile unable to talk with Consul backend: %v", err)
c.logger.Printf("[WARN]: physical/consul: reconcile unable to talk with Consul backend: %v", err)
time.Sleep(consulRetryInterval)
continue
}
@ -488,7 +496,7 @@ shutdown:
for !shutdown {
sealed := sealedFunc()
if err := c.runCheck(sealed); err != nil {
c.logger.Printf("[WARN]: consul: check unable to talk with Consul backend: %v", err)
c.logger.Printf("[WARN]: physical/consul: check unable to talk with Consul backend: %v", err)
time.Sleep(consulRetryInterval)
continue
}
@ -497,7 +505,7 @@ shutdown:
}()
}
case <-shutdownCh:
c.logger.Printf("[INFO]: consul: Shutting down consul backend")
c.logger.Printf("[INFO]: physical/consul: Shutting down consul backend")
shutdown = true
break shutdown
}
@ -506,7 +514,7 @@ shutdown:
c.serviceLock.RLock()
defer c.serviceLock.RUnlock()
if err := c.client.Agent().ServiceDeregister(registeredServiceID); err != nil {
c.logger.Printf("[WARN]: consul: service deregistration failed: %v", err)
c.logger.Printf("[WARN]: physical/consul: service deregistration failed: %v", err)
}
}
@ -548,17 +556,16 @@ func (c *ConsulBackend) reconcileConsul(registeredServiceID string, activeFunc a
}
}
tags := serviceTags(active)
tags := c.fetchServiceTags(active)
var reregister bool
switch {
case currentVaultService == nil,
registeredServiceID == "":
case currentVaultService == nil, registeredServiceID == "":
reregister = true
default:
switch {
case len(currentVaultService.ServiceTags) != 1,
currentVaultService.ServiceTags[0] != tags[0]:
case !strutil.EquivalentSlices(currentVaultService.ServiceTags, tags):
reregister = true
}
}
@ -616,13 +623,13 @@ func (c *ConsulBackend) runCheck(sealed bool) error {
}
}
// serviceTags returns all of the relevant tags for Consul.
func serviceTags(active bool) []string {
// fetchServiceTags returns all of the relevant tags for Consul.
func (c *ConsulBackend) fetchServiceTags(active bool) []string {
activeTag := "standby"
if active {
activeTag = "active"
}
return []string{activeTag}
return append(c.serviceTags, activeTag)
}
func (c *ConsulBackend) setAdvertiseAddr(addr string) (err error) {

View File

@ -10,6 +10,7 @@ import (
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/vault/helper/strutil"
)
type consulConf map[string]string
@ -72,6 +73,42 @@ func testSealedFunc(sealedPct float64) sealedFunction {
}
}
func TestConsul_ServiceTags(t *testing.T) {
consulConfig := map[string]string{
"path": "seaTech/",
"service": "astronomy",
"service-tags": "deadbeef, cafeefac, deadc0de, feedface",
"advertiseAddr": "http://127.0.0.2:8200",
"check_timeout": "6s",
"address": "127.0.0.2",
"scheme": "https",
"token": "deadbeef-cafeefac-deadc0de-feedface",
"max_parallel": "4",
"disable_registration": "false",
}
logger := log.New(os.Stderr, "", log.LstdFlags)
be, err := newConsulBackend(consulConfig, logger)
if err != nil {
t.Fatal(err)
}
c, ok := be.(*ConsulBackend)
if !ok {
t.Fatalf("failed to create physical Consul backend")
}
expected := []string{"deadbeef", "cafeefac", "deadc0de", "feedface"}
actual := c.fetchServiceTags(false)
if !strutil.EquivalentSlices(actual, append(expected, "standby")) {
t.Fatalf("bad: expected:%s actual:%s", append(expected, "standby"), actual)
}
actual = c.fetchServiceTags(true)
if !strutil.EquivalentSlices(actual, append(expected, "active")) {
t.Fatalf("bad: expected:%s actual:%s", append(expected, "active"), actual)
}
}
func TestConsul_newConsulBackend(t *testing.T) {
tests := []struct {
name string
@ -196,8 +233,10 @@ func TestConsul_serviceTags(t *testing.T) {
},
}
c := testConsulBackend(t)
for _, test := range tests {
tags := serviceTags(test.active)
tags := c.fetchServiceTags(test.active)
if !reflect.DeepEqual(tags[:], test.tags[:]) {
t.Errorf("Bad %v: %v %v", test.active, tags, test.tags)
}

View File

@ -74,9 +74,9 @@ func (c *Core) HandleRequest(req *logical.Request) (resp *logical.Response, err
}
// Create an audit trail of the response
if err := c.auditBroker.LogResponse(auth, req, resp, err); err != nil {
if auditErr := c.auditBroker.LogResponse(auth, req, resp, err); auditErr != nil {
c.logger.Printf("[ERR] core: failed to audit response (request path: %s): %v",
req.Path, err)
req.Path, auditErr)
return nil, ErrInternalError
}

View File

@ -28,7 +28,6 @@ import (
"net"
"os"
"os/exec"
"os/user"
"regexp"
"runtime"
"strconv"
@ -135,18 +134,6 @@ func (p maybeBrackets) String() string {
// Changed by tests.
var uidFromUsername = uidFromUsernameFn
func uidFromUsernameFn(username string) (uid int, err error) {
if uid := os.Getuid(); uid != 0 && username == os.Getenv("USER") {
return uid, nil
}
u, err := user.Lookup(username)
if err == nil {
uid, err := strconv.Atoi(u.Uid)
return uid, err
}
return 0, err
}
func uidFromLsof(lip net.IP, lport int, rip net.IP, rport int) (uid int, err error) {
seek := fmt.Sprintf("%s:%d->%s:%d", maybeBrackets(lip), lport, maybeBrackets(rip), rport)
seekb := []byte(seek)

39
vendor/camlistore.org/pkg/netutil/ident_cgo.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
// +build !nocgo
/*
Copyright 2016 The Camlistore Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package netutil identifies the system userid responsible for
// localhost TCP connections.
package netutil // import "camlistore.org/pkg/netutil"
import (
"os"
"os/user"
"strconv"
)
func uidFromUsernameFn(username string) (uid int, err error) {
if uid := os.Getuid(); uid != 0 && username == os.Getenv("USER") {
return uid, nil
}
u, err := user.Lookup(username)
if err == nil {
uid, err := strconv.Atoi(u.Uid)
return uid, err
}
return 0, err
}

32
vendor/camlistore.org/pkg/netutil/ident_nocgo.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// +build nocgo
/*
Copyright 2016 The Camlistore Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package netutil identifies the system userid responsible for
// localhost TCP connections.
package netutil // import "camlistore.org/pkg/netutil"
import (
"os"
)
func uidFromUsernameFn(username string) (uid int, err error) {
if uid := os.Getuid(); uid != 0 && username == os.Getenv("USER") {
return uid, nil
}
return 0, err
}

View File

@ -139,3 +139,27 @@ func loopbackIP() net.IP {
}
return nil
}
// RandPort returns a random port to listen on.
func RandPort() (int, error) {
var port int
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
return port, err
}
listener, err := net.ListenTCP("tcp", addr)
if err != nil {
return port, fmt.Errorf("could not listen to find random port: %v", err)
}
randAddr := listener.Addr().(*net.TCPAddr)
if err := listener.Close(); err != nil {
return port, fmt.Errorf("could not close random listener: %v", err)
}
return randAddr.Port, nil
}
// HasPort, given a string of the form "host", "host:port", or
// "[ipv6::address]:port", returns true if the string includes a port.
func HasPort(s string) bool {
return strings.LastIndex(s, ":") > strings.LastIndex(s, "]")
}

View File

@ -187,7 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Copyright 2016 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -0,0 +1,92 @@
// Circonus Metrics Sink
package circonus
import (
"strings"
cgm "github.com/circonus-labs/circonus-gometrics"
)
// CirconusSink provides an interface to forward metrics to Circonus with
// automatic check creation and metric management
type CirconusSink struct {
metrics *cgm.CirconusMetrics
}
// Config options for CirconusSink
// See https://github.com/circonus-labs/circonus-gometrics for configuration options
type Config cgm.Config
// NewCirconusSink - create new metric sink for circonus
//
// one of the following must be supplied:
// - API Token - search for an existing check or create a new check
// - API Token + Check Id - the check identified by check id will be used
// - API Token + Check Submission URL - the check identified by the submission url will be used
// - Check Submission URL - the check identified by the submission url will be used
// metric management will be *disabled*
//
// Note: If submission url is supplied w/o an api token, the public circonus ca cert will be used
// to verify the broker for metrics submission.
func NewCirconusSink(cc *Config) (*CirconusSink, error) {
cfg := cgm.Config{}
if cc != nil {
cfg = cgm.Config(*cc)
}
metrics, err := cgm.NewCirconusMetrics(&cfg)
if err != nil {
return nil, err
}
return &CirconusSink{
metrics: metrics,
}, nil
}
// Start submitting metrics to Circonus (flush every SubmitInterval)
func (s *CirconusSink) Start() {
s.metrics.Start()
}
// Flush manually triggers metric submission to Circonus
func (s *CirconusSink) Flush() {
s.metrics.Flush()
}
// SetGauge sets value for a gauge metric
func (s *CirconusSink) SetGauge(key []string, val float32) {
flatKey := s.flattenKey(key)
s.metrics.SetGauge(flatKey, int64(val))
}
// EmitKey is not implemented in circonus
func (s *CirconusSink) EmitKey(key []string, val float32) {
// NOP
}
// IncrCounter increments a counter metric
func (s *CirconusSink) IncrCounter(key []string, val float32) {
flatKey := s.flattenKey(key)
s.metrics.IncrementByValue(flatKey, uint64(val))
}
// AddSample adds a sample to a histogram metric
func (s *CirconusSink) AddSample(key []string, val float32) {
flatKey := s.flattenKey(key)
s.metrics.RecordValue(flatKey, float64(val))
}
// Flattens key to Circonus metric name
func (s *CirconusSink) flattenKey(parts []string) string {
joined := strings.Join(parts, "`")
return strings.Map(func(r rune) rune {
switch r {
case ' ':
return '_'
default:
return r
}
}, joined)
}

View File

@ -47,16 +47,19 @@ Here's some code to explain it:
type exampleStruct struct {
Name string ``
Email string `valid:"email"`
}
// this, however, will only fail when Email is empty or an invalid email address:
type exampleStruct2 struct {
Name string `valid:"-"`
Email string `valid:"email"`
}
// lastly, this will only fail when Email is an invalid email address but not when it's empty:
type exampleStruct2 struct {
Name string `valid:"-"`
Email string `valid:"email,optional"`
}
```
#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123))

View File

@ -30,7 +30,7 @@ const (
Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
DNSName string = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{1,62}){1}(\.[a-zA-Z0-9]{1}[a-zA-Z0-9_-]{1,62})*$`
URL string = `^((ftp|https?):\/\/)?(\S+(:\S*)?@)?((([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(([a-zA-Z0-9]+([-\.][a-zA-Z0-9]+)*)|((www\.)?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))(:(\d{1,5}))?((\/|\?|#)[^\s]*)?$`
URL string = `^((ftp|https?):\/\/)?(\S+(:\S*)?@)?((([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(([a-zA-Z0-9]([a-zA-Z0-9-]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|((www\.)?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))(:(\d{1,5}))?((\/|\?|#)[^\s]*)?$`
SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
UnixPath string = `^((?:\/[a-zA-Z0-9\.\:]+(?:_[a-zA-Z0-9\:\.]+)*(?:\-[\:a-zA-Z0-9\.]+)*)+\/?)$`

View File

@ -903,7 +903,10 @@ func ErrorsByField(e error) map[string]string {
m[e.(Error).Name] = e.(Error).Err.Error()
case Errors:
for _, item := range e.(Errors).Errors() {
m[item.(Error).Name] = item.(Error).Err.Error()
n := ErrorsByField(item)
for k, v := range n {
m[k] = v
}
}
}

View File

@ -139,6 +139,11 @@ type Config struct {
//
EC2MetadataDisableTimeoutOverride *bool
// SleepDelay is an override for the func the SDK will call when sleeping
// during the lifecycle of a request. Specifically this will be used for
// request delays. This value should only be used for testing. To adjust
// the delay of a request see the aws/client.DefaultRetryer and
// aws/request.Retryer.
SleepDelay func(time.Duration)
}

View File

@ -0,0 +1,191 @@
// Package endpointcreds provides support for retrieving credentials from an
// arbitrary HTTP endpoint.
//
// The credentials endpoint Provider can receive both static and refreshable
// credentials that will expire. Credentials are static when an "Expiration"
// value is not provided in the endpoint's response.
//
// Static credentials will never expire once they have been retrieved. The format
// of the static credentials response:
// {
// "AccessKeyId" : "MUA...",
// "SecretAccessKey" : "/7PC5om....",
// }
//
// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
// value in the response. The format of the refreshable credentials response:
// {
// "AccessKeyId" : "MUA...",
// "SecretAccessKey" : "/7PC5om....",
// "Token" : "AQoDY....=",
// "Expiration" : "2016-02-25T06:03:31Z"
// }
//
// Errors should be returned in the following format and only returned with 400
// or 500 HTTP status codes.
// {
// "code": "ErrorCode",
// "message": "Helpful error message."
// }
package endpointcreds
import (
"encoding/json"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
)
// ProviderName is the name of the credentials provider.
const ProviderName = `CredentialsEndpointProvider`
// Provider satisfies the credentials.Provider interface, and is a client to
// retrieve credentials from an arbitrary endpoint.
type Provider struct {
staticCreds bool
credentials.Expiry
// Requires a AWS Client to make HTTP requests to the endpoint with.
// the Endpoint the request will be made to is provided by the aws.Config's
// Endpoint value.
Client *client.Client
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
}
// NewProviderClient returns a credentials Provider for retrieving AWS credentials
// from arbitrary endpoint.
func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
p := &Provider{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: "CredentialsEndpoint",
Endpoint: endpoint,
},
handlers,
),
}
p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
p.Client.Handlers.Validate.Clear()
p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
for _, option := range options {
option(p)
}
return p
}
// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
// from an arbitrary endpoint concurrently. The client will request the
func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
}
// IsExpired returns true if the credentials retrieved are expired, or not yet
// retrieved.
func (p *Provider) IsExpired() bool {
if p.staticCreds {
return false
}
return p.Expiry.IsExpired()
}
// Retrieve will attempt to request the credentials from the endpoint the Provider
// was configured for. And error will be returned if the retrieval fails.
func (p *Provider) Retrieve() (credentials.Value, error) {
resp, err := p.getCredentials()
if err != nil {
return credentials.Value{ProviderName: ProviderName},
awserr.New("CredentialsEndpointError", "failed to load credentials", err)
}
if resp.Expiration != nil {
p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
} else {
p.staticCreds = true
}
return credentials.Value{
AccessKeyID: resp.AccessKeyID,
SecretAccessKey: resp.SecretAccessKey,
SessionToken: resp.Token,
ProviderName: ProviderName,
}, nil
}
type getCredentialsOutput struct {
Expiration *time.Time
AccessKeyID string
SecretAccessKey string
Token string
}
type errorOutput struct {
Code string `json:"code"`
Message string `json:"message"`
}
func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
op := &request.Operation{
Name: "GetCredentials",
HTTPMethod: "GET",
}
out := &getCredentialsOutput{}
req := p.Client.NewRequest(op, nil, out)
req.HTTPRequest.Header.Set("Accept", "application/json")
return out, req.Send()
}
func validateEndpointHandler(r *request.Request) {
if len(r.ClientInfo.Endpoint) == 0 {
r.Error = aws.ErrMissingEndpoint
}
}
func unmarshalHandler(r *request.Request) {
defer r.HTTPResponse.Body.Close()
out := r.Data.(*getCredentialsOutput)
if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
r.Error = awserr.New("SerializationError",
"failed to decode endpoint credentials",
err,
)
}
}
func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
var errOut errorOutput
if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil {
r.Error = awserr.New("SerializationError",
"failed to decode endpoint credentials",
err,
)
}
// Response body format is not consistent between metadata endpoints.
// Grab the error message as a string and include that as the source error
r.Error = awserr.New(errOut.Code, errOut.Message, nil)
}

View File

@ -8,6 +8,7 @@
package defaults
import (
"fmt"
"net/http"
"os"
"time"
@ -16,6 +17,7 @@ import (
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/endpoints"
@ -83,16 +85,43 @@ func Handlers() request.Handlers {
// is available if you need to reset the credentials of an
// existing service client or session's Config.
func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true)
return credentials.NewCredentials(&credentials.ChainProvider{
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
Providers: []credentials.Provider{
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
&ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion),
ExpiryWindow: 5 * time.Minute,
},
}})
remoteCredProvider(*cfg, handlers),
},
})
}
func remoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")
if len(ecsCredURI) > 0 {
return ecsCredProvider(cfg, handlers, ecsCredURI)
}
return ec2RoleProvider(cfg, handlers)
}
func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) credentials.Provider {
const host = `169.254.170.2`
return endpointcreds.NewProviderClient(cfg, handlers,
fmt.Sprintf("http://%s%s", host, uri),
func(p *endpointcreds.Provider) {
p.ExpiryWindow = 5 * time.Minute
},
)
}
func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName,
aws.StringValue(cfg.Region), true)
return &ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.NewClient(cfg, handlers, endpoint, signingRegion),
ExpiryWindow: 5 * time.Minute,
}
}

View File

@ -39,6 +39,7 @@ type Request struct {
RetryDelay time.Duration
NotHoist bool
SignedHeaderVals http.Header
LastSignedAt time.Time
built bool
}
@ -72,15 +73,11 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
if method == "" {
method = "POST"
}
p := operation.HTTPPath
if p == "" {
p = "/"
}
httpReq, _ := http.NewRequest(method, "", nil)
var err error
httpReq.URL, err = url.Parse(clientInfo.Endpoint + p)
httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath)
if err != nil {
httpReq.URL = &url.URL{}
err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)

View File

@ -118,6 +118,11 @@ type Signer struct {
// with pre-signed requests preventing headers from being added to the
// request's query string.
DisableHeaderHoisting bool
// currentTimeFn returns the time value which represents the current time.
// This value should only be used for testing. If it is nil the default
// time.Now will be used.
currentTimeFn func() time.Time
}
// NewSigner returns a Signer pointer configured with the credentials and optional
@ -150,6 +155,7 @@ type signingCtx struct {
formattedTime string
formattedShortTime string
bodyDigest string
signedHeaders string
canonicalHeaders string
canonicalString string
@ -217,6 +223,11 @@ func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region st
}
func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
currentTimeFn := v4.currentTimeFn
if currentTimeFn == nil {
currentTimeFn = time.Now
}
ctx := &signingCtx{
Request: r,
Body: body,
@ -229,12 +240,12 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi
}
if ctx.isRequestSigned() {
if !v4.Credentials.IsExpired() && time.Now().Before(ctx.Time.Add(10*time.Minute)) {
if !v4.Credentials.IsExpired() && currentTimeFn().Before(ctx.Time.Add(10*time.Minute)) {
// If the request is already signed, and the credentials have not
// expired, and the request is not too old ignore the signing request.
return ctx.SignedHeaderVals, nil
}
ctx.Time = time.Now()
ctx.Time = currentTimeFn()
ctx.handlePresignRemoval()
}
@ -303,6 +314,9 @@ var SignRequestHandler = request.NamedHandler{
// If the credentials of the request's config are set to
// credentials.AnonymousCredentials the request will not be signed.
func SignSDKRequest(req *request.Request) {
signSDKRequestWithCurrTime(req, time.Now)
}
func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) {
// If the request does not need to be signed ignore the signing of the
// request if the AnonymousCredentials object is used.
if req.Config.Credentials == credentials.AnonymousCredentials {
@ -323,11 +337,23 @@ func SignSDKRequest(req *request.Request) {
v4.Debug = req.Config.LogLevel.Value()
v4.Logger = req.Config.Logger
v4.DisableHeaderHoisting = req.NotHoist
v4.currentTimeFn = curTimeFn
})
signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.Body, name, region, req.ExpireTime, req.Time)
signingTime := req.Time
if !req.LastSignedAt.IsZero() {
signingTime = req.LastSignedAt
}
signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.Body, name, region, req.ExpireTime, signingTime)
if err != nil {
req.Error = err
req.SignedHeaderVals = nil
return
}
req.SignedHeaderVals = signedHeaders
req.Error = err
req.LastSignedAt = curTimeFn()
}
const logSignInfoMsg = `DEBUG: Request Signiture:
@ -364,6 +390,7 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) {
}
}
ctx.buildBodyDigest()
ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
ctx.buildCanonicalString() // depends on canon headers / signed headers
ctx.buildStringToSign() // depends on canon string
@ -485,7 +512,7 @@ func (ctx *signingCtx) buildCanonicalString() {
ctx.Request.URL.RawQuery,
ctx.canonicalHeaders + "\n",
ctx.signedHeaders,
ctx.bodyDigest(),
ctx.bodyDigest,
}, "\n")
}
@ -508,7 +535,7 @@ func (ctx *signingCtx) buildSignature() {
ctx.signature = hex.EncodeToString(signature)
}
func (ctx *signingCtx) bodyDigest() string {
func (ctx *signingCtx) buildBodyDigest() {
hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
if hash == "" {
if ctx.isPresign && ctx.ServiceName == "s3" {
@ -518,9 +545,11 @@ func (ctx *signingCtx) bodyDigest() string {
} else {
hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
}
ctx.Request.Header.Add("X-Amz-Content-Sha256", hash)
if ctx.ServiceName == "s3" {
ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
}
}
return hash
ctx.bodyDigest = hash
}
// isRequestSigned returns if the request is currently signed or presigned

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.2.1"
const SDKVersion = "1.2.7"

View File

@ -185,14 +185,32 @@ func (d *Decoder) decodeBinary(b []byte, v reflect.Value) error {
return nil
}
switch v.Interface().(type) {
case []byte:
if v.Kind() != reflect.Slice {
return &UnmarshalTypeError{Value: "binary", Type: v.Type()}
}
if v.Type() == byteSliceType {
// Optimization for []byte types
if v.IsNil() || v.Cap() < len(b) {
v.Set(reflect.MakeSlice(byteSliceType, len(b), len(b)))
} else if v.Len() != len(b) {
v.SetLen(len(b))
}
copy(v.Interface().([]byte), b)
return nil
}
switch v.Type().Elem().Kind() {
case reflect.Uint8:
// Fallback to reflection copy for type aliased of []byte type
if v.IsNil() || v.Cap() < len(b) {
v.Set(reflect.MakeSlice(v.Type(), len(b), len(b)))
} else if v.Len() != len(b) {
v.SetLen(len(b))
}
for i := 0; i < len(b); i++ {
v.Index(i).SetUint(uint64(b[i]))
}
default:
if v.Kind() == reflect.Array && v.Type().Elem().Kind() == reflect.Uint8 {
reflect.Copy(v, reflect.ValueOf(b))
@ -467,8 +485,11 @@ func (d *Decoder) decodeString(s *string, v reflect.Value, fieldTag tag) error {
}
switch v.Kind() {
case reflect.String, reflect.Interface:
v.Set(reflect.ValueOf(*s))
case reflect.String:
v.SetString(*s)
case reflect.Interface:
// Ensure type aliasing is handled properly
v.Set(reflect.ValueOf(*s).Convert(v.Type()))
default:
return &UnmarshalTypeError{Value: "string", Type: v.Type()}
}

View File

@ -57,4 +57,6 @@
// the json.Marshaler and json.Unmarshaler interfaces have been removed and
// replaced with have been replaced with dynamodbattribute.Marshaler and
// dynamodbattribute.Unmarshaler interfaces.
//
// `time.Time` is marshaled as RFC3339 format.
package dynamodbattribute

View File

@ -183,6 +183,12 @@ func (e *Encoder) Encode(in interface{}) (*dynamodb.AttributeValue, error) {
}
func (e *Encoder) encode(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
// We should check for omitted values first before dereferencing.
if fieldTag.OmitEmpty && emptyValue(v) {
encodeNull(av)
return nil
}
// Handle both pointers and interface conversion into types
v = valueElem(v)
@ -192,11 +198,6 @@ func (e *Encoder) encode(av *dynamodb.AttributeValue, v reflect.Value, fieldTag
}
}
if fieldTag.OmitEmpty && emptyValue(v) {
encodeNull(av)
return nil
}
switch v.Kind() {
case reflect.Invalid:
encodeNull(av)
@ -235,6 +236,9 @@ func (e *Encoder) encodeStruct(av *dynamodb.AttributeValue, v reflect.Value) err
fv := v.FieldByIndex(f.Index)
elem := &dynamodb.AttributeValue{}
err := e.encode(elem, fv, f.tag)
if err != nil {
return err
}
skip, err := keepOrOmitEmpty(f.OmitEmpty, elem, err)
if err != nil {
return err
@ -279,13 +283,14 @@ func (e *Encoder) encodeMap(av *dynamodb.AttributeValue, v reflect.Value, fieldT
}
func (e *Encoder) encodeSlice(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
switch typed := v.Interface().(type) {
case []byte:
if len(typed) == 0 {
switch v.Type().Elem().Kind() {
case reflect.Uint8:
b := v.Bytes()
if len(b) == 0 {
encodeNull(av)
return nil
}
av.B = append([]byte{}, typed...)
av.B = append([]byte{}, b...)
default:
var elemFn func(dynamodb.AttributeValue) error
@ -356,21 +361,24 @@ func (e *Encoder) encodeList(v reflect.Value, fieldTag tag, elemFn func(dynamodb
}
func (e *Encoder) encodeScalar(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
switch typed := v.Interface().(type) {
case bool:
av.BOOL = new(bool)
*av.BOOL = typed
case string:
if err := e.encodeString(av, v); err != nil {
return err
}
case Number:
s := string(typed)
if v.Type() == numberType {
s := v.String()
if fieldTag.AsString {
av.S = &s
} else {
av.N = &s
}
return nil
}
switch v.Kind() {
case reflect.Bool:
av.BOOL = new(bool)
*av.BOOL = v.Bool()
case reflect.String:
if err := e.encodeString(av, v); err != nil {
return err
}
default:
// Fallback to encoding numbers, will return invalid type if not supported
if err := e.encodeNumber(av, v); err != nil {
@ -391,31 +399,13 @@ func (e *Encoder) encodeNumber(av *dynamodb.AttributeValue, v reflect.Value) err
}
var out string
switch typed := v.Interface().(type) {
case int:
out = encodeInt(int64(typed))
case int8:
out = encodeInt(int64(typed))
case int16:
out = encodeInt(int64(typed))
case int32:
out = encodeInt(int64(typed))
case int64:
out = encodeInt(typed)
case uint:
out = encodeUint(uint64(typed))
case uint8:
out = encodeUint(uint64(typed))
case uint16:
out = encodeUint(uint64(typed))
case uint32:
out = encodeUint(uint64(typed))
case uint64:
out = encodeUint(typed)
case float32:
out = encodeFloat(float64(typed))
case float64:
out = encodeFloat(typed)
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out = encodeInt(v.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
out = encodeUint(v.Uint())
case reflect.Float32, reflect.Float64:
out = encodeFloat(v.Float())
default:
return &unsupportedMarshalTypeError{Type: v.Type()}
}
@ -430,12 +420,13 @@ func (e *Encoder) encodeString(av *dynamodb.AttributeValue, v reflect.Value) err
return err
}
switch typed := v.Interface().(type) {
case string:
if len(typed) == 0 && e.NullEmptyString {
switch v.Kind() {
case reflect.String:
s := v.String()
if len(s) == 0 && e.NullEmptyString {
encodeNull(av)
} else {
av.S = &typed
av.S = &s
}
default:
return &unsupportedMarshalTypeError{Type: v.Type()}

View File

@ -3015,7 +3015,13 @@ func (c *IAM) GetGroupPolicyRequest(input *GetGroupPolicyInput) (req *request.Re
// Retrieves the specified inline policy document that is embedded in the specified
// IAM group.
//
// An IAM group can also have managed policies attached to it. To retrieve
// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986).
// You can use a URL decoding method to convert the policy back to plain JSON
// text. For example, if you use Java, you can use the decode method of the
// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs
// provide similar functionality.
//
// An IAM group can also have managed policies attached to it. To retrieve
// a managed policy document that is attached to a group, use GetPolicy to determine
// the policy's default version, then use GetPolicyVersion to retrieve the policy
// document.
@ -3284,7 +3290,13 @@ func (c *IAM) GetPolicyVersionRequest(input *GetPolicyVersionInput) (req *reques
// Retrieves information about the specified version of the specified managed
// policy, including the policy document.
//
// To list the available versions for a policy, use ListPolicyVersions.
// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986).
// You can use a URL decoding method to convert the policy back to plain JSON
// text. For example, if you use Java, you can use the decode method of the
// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs
// provide similar functionality.
//
// To list the available versions for a policy, use ListPolicyVersions.
//
// This API retrieves information about managed policies. To retrieve information
// about an inline policy that is embedded in a user, group, or role, use the
@ -3347,6 +3359,12 @@ func (c *IAM) GetRoleRequest(input *GetRoleInput) (req *request.Request, output
// Retrieves information about the specified role, including the role's path,
// GUID, ARN, and the role's trust policy that grants permission to assume the
// role. For more information about roles, see Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html).
//
// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986).
// You can use a URL decoding method to convert the policy back to plain JSON
// text. For example, if you use Java, you can use the decode method of the
// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs
// provide similar functionality.
func (c *IAM) GetRole(input *GetRoleInput) (*GetRoleOutput, error) {
req, out := c.GetRoleRequest(input)
err := req.Send()
@ -3397,8 +3415,14 @@ func (c *IAM) GetRolePolicyRequest(input *GetRolePolicyInput) (req *request.Requ
// Retrieves the specified inline policy document that is embedded with the
// specified IAM role.
//
// An IAM role can also have managed policies attached to it. To retrieve a
// managed policy document that is attached to a role, use GetPolicy to determine
// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986).
// You can use a URL decoding method to convert the policy back to plain JSON
// text. For example, if you use Java, you can use the decode method of the
// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs
// provide similar functionality.
//
// An IAM role can also have managed policies attached to it. To retrieve
// a managed policy document that is attached to a role, use GetPolicy to determine
// the policy's default version, then use GetPolicyVersion to retrieve the policy
// document.
//
@ -3668,8 +3692,14 @@ func (c *IAM) GetUserPolicyRequest(input *GetUserPolicyInput) (req *request.Requ
// Retrieves the specified inline policy document that is embedded in the specified
// IAM user.
//
// An IAM user can also have managed policies attached to it. To retrieve a
// managed policy document that is attached to a user, use GetPolicy to determine
// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986).
// You can use a URL decoding method to convert the policy back to plain JSON
// text. For example, if you use Java, you can use the decode method of the
// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs
// provide similar functionality.
//
// An IAM user can also have managed policies attached to it. To retrieve
// a managed policy document that is attached to a user, use GetPolicy to determine
// the policy's default version, then use GetPolicyVersion to retrieve the policy
// document.
//
@ -5189,6 +5219,12 @@ func (c *IAM) ListSSHPublicKeysRequest(input *ListSSHPublicKeysInput) (req *requ
Name: opListSSHPublicKeys,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"Marker"},
OutputTokens: []string{"Marker"},
LimitToken: "MaxItems",
TruncationToken: "IsTruncated",
},
}
if input == nil {
@ -5218,6 +5254,31 @@ func (c *IAM) ListSSHPublicKeys(input *ListSSHPublicKeysInput) (*ListSSHPublicKe
return out, err
}
// ListSSHPublicKeysPages iterates over the pages of a ListSSHPublicKeys operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See ListSSHPublicKeys method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListSSHPublicKeys operation.
// pageNum := 0
// err := client.ListSSHPublicKeysPages(params,
// func(page *ListSSHPublicKeysOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *IAM) ListSSHPublicKeysPages(input *ListSSHPublicKeysInput, fn func(p *ListSSHPublicKeysOutput, lastPage bool) (shouldContinue bool)) error {
page, _ := c.ListSSHPublicKeysRequest(input)
page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
return page.EachPage(func(p interface{}, lastPage bool) bool {
return fn(p.(*ListSSHPublicKeysOutput), lastPage)
})
}
const opListServerCertificates = "ListServerCertificates"
// ListServerCertificatesRequest generates a "aws/request.Request" representing the
@ -6161,6 +6222,12 @@ func (c *IAM) SimulateCustomPolicyRequest(input *SimulateCustomPolicyInput) (req
Name: opSimulateCustomPolicy,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"Marker"},
OutputTokens: []string{"Marker"},
LimitToken: "MaxItems",
TruncationToken: "IsTruncated",
},
}
if input == nil {
@ -6196,6 +6263,31 @@ func (c *IAM) SimulateCustomPolicy(input *SimulateCustomPolicyInput) (*SimulateP
return out, err
}
// SimulateCustomPolicyPages iterates over the pages of a SimulateCustomPolicy operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See SimulateCustomPolicy method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a SimulateCustomPolicy operation.
// pageNum := 0
// err := client.SimulateCustomPolicyPages(params,
// func(page *SimulatePolicyResponse, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *IAM) SimulateCustomPolicyPages(input *SimulateCustomPolicyInput, fn func(p *SimulatePolicyResponse, lastPage bool) (shouldContinue bool)) error {
page, _ := c.SimulateCustomPolicyRequest(input)
page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
return page.EachPage(func(p interface{}, lastPage bool) bool {
return fn(p.(*SimulatePolicyResponse), lastPage)
})
}
const opSimulatePrincipalPolicy = "SimulatePrincipalPolicy"
// SimulatePrincipalPolicyRequest generates a "aws/request.Request" representing the
@ -6225,6 +6317,12 @@ func (c *IAM) SimulatePrincipalPolicyRequest(input *SimulatePrincipalPolicyInput
Name: opSimulatePrincipalPolicy,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"Marker"},
OutputTokens: []string{"Marker"},
LimitToken: "MaxItems",
TruncationToken: "IsTruncated",
},
}
if input == nil {
@ -6270,6 +6368,31 @@ func (c *IAM) SimulatePrincipalPolicy(input *SimulatePrincipalPolicyInput) (*Sim
return out, err
}
// SimulatePrincipalPolicyPages iterates over the pages of a SimulatePrincipalPolicy operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See SimulatePrincipalPolicy method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a SimulatePrincipalPolicy operation.
// pageNum := 0
// err := client.SimulatePrincipalPolicyPages(params,
// func(page *SimulatePolicyResponse, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *IAM) SimulatePrincipalPolicyPages(input *SimulatePrincipalPolicyInput, fn func(p *SimulatePolicyResponse, lastPage bool) (shouldContinue bool)) error {
page, _ := c.SimulatePrincipalPolicyRequest(input)
page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
return page.EachPage(func(p interface{}, lastPage bool) bool {
return fn(p.(*SimulatePolicyResponse), lastPage)
})
}
const opUpdateAccessKey = "UpdateAccessKey"
// UpdateAccessKeyRequest generates a "aws/request.Request" representing the

View File

@ -23,6 +23,8 @@ func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
// Bucket exists in a differnt region, and request needs
// to be made to the correct region.
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
r.Error = awserr.NewRequestFailure(
awserr.New("BucketRegionError",
@ -35,25 +37,29 @@ func unmarshalError(r *request.Request) {
return
}
if r.HTTPResponse.ContentLength == 0 {
// No body, use status code to generate an awserr.Error
r.Error = awserr.NewRequestFailure(
awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
var errCode, errMsg string
// Attempt to parse error from body if it is known
resp := &xmlErrorResponse{}
err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
if err != nil && err != io.EOF {
r.Error = awserr.New("SerializationError", "failed to decode S3 XML error response", nil)
errCode = "SerializationError"
errMsg = "failed to decode S3 XML error response"
} else {
r.Error = awserr.NewRequestFailure(
awserr.New(resp.Code, resp.Message, nil),
r.HTTPResponse.StatusCode,
r.RequestID,
)
errCode = resp.Code
errMsg = resp.Message
}
// Fallback to status code converted to message if still no error code
if len(errCode) == 0 {
statusText := http.StatusText(r.HTTPResponse.StatusCode)
errCode = strings.Replace(statusText, " ", "", -1)
errMsg = statusText
}
r.Error = awserr.NewRequestFailure(
awserr.New(errCode, errMsg, nil),
r.HTTPResponse.StatusCode,
r.RequestID,
)
}

View File

@ -0,0 +1,179 @@
# Circonus metrics tracking for Go applications
This library supports named counters, gauges and histograms.
It also provides convenience wrappers for registering latency
instrumented functions with Go's builtin http server.
Initializing only requires setting an ApiToken.
## Example
**rough and simple**
```go
package main
import (
"log"
"math/rand"
"os"
"time"
cgm "github.com/circonus-labs/circonus-gometrics"
)
func main() {
log.Println("Configuring cgm")
cmc := &cgm.Config{}
// Interval at which metrics are submitted to Circonus, default: 10 seconds
cmc.Interval = "10s" // 10 seconds
// Enable debug messages, default: false
cmc.Debug = false
// Send debug messages to specific log.Logger instance
// default: if debug stderr, else, discard
//cmc.CheckManager.Log = ...
// Circonus API configuration options
//
// Token, no default (blank disables check manager)
cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN")
// App name, default: circonus-gometrics
cmc.CheckManager.API.TokenApp = os.Getenv("CIRCONUS_API_APP")
// URL, default: https://api.circonus.com/v2
cmc.CheckManager.API.URL = os.Getenv("CIRCONUS_API_URL")
// Check configuration options
//
// precedence 1 - explicit submission_url
// precedence 2 - specific check id (note: not a check bundle id)
// precedence 3 - search using instanceId and searchTag
// otherwise: if an applicable check is NOT specified or found, an
// attempt will be made to automatically create one
//
// Pre-existing httptrap check submission_url
cmc.CheckManager.Check.SubmissionURL = os.Getenv("CIRCONUS_SUBMISION_URL")
// Pre-existing httptrap check id (check not check bundle)
cmc.CheckManager.Check.ID = ""
// if neither a submission url nor check id are provided, an attempt will be made to find an existing
// httptrap check by using the circonus api to search for a check matching the following criteria:
// an active check,
// of type httptrap,
// where the target/host is equal to InstanceId - see below
// and the check has a tag equal to SearchTag - see below
// Instance ID - an identifier for the 'group of metrics emitted by this process or service'
// this is used as the value for check.target (aka host)
// default: 'hostname':'program name'
// note: for a persistent instance that is ephemeral or transient where metric continuity is
// desired set this explicitly so that the current hostname will not be used.
cmc.CheckManager.Check.InstanceID = ""
// Search tag - a specific tag which, when coupled with the instanceId serves to identify the
// origin and/or grouping of the metrics
// default: service:application name (e.g. service:consul)
cmc.CheckManager.Check.SearchTag = ""
// Check secret, default: generated when a check needs to be created
cmc.CheckManager.Check.Secret = ""
// Check tags, array of strings, additional tags to add to a new check, default: none
//cmc.CheckManager.Check.Tags = []string{"category:tagname"}
// max amount of time to to hold on to a submission url
// when a given submission fails (due to retries) if the
// time the url was last updated is > than this, the trap
// url will be refreshed (e.g. if the broker is changed
// in the UI) default 5 minutes
cmc.CheckManager.Check.MaxURLAge = "5m"
// custom display name for check, default: "InstanceId /cgm"
cmc.CheckManager.Check.DisplayName = ""
// force metric activation - if a metric has been disabled via the UI
// the default behavior is to *not* re-activate the metric; this setting
// overrides the behavior and will re-activate the metric when it is
// encountered. "(true|false)", default "false"
cmc.CheckManager.Check.ForceMetricActivation = "false"
// Broker configuration options
//
// Broker ID of specific broker to use, default: random enterprise broker or
// Circonus default if no enterprise brokers are available.
// default: only used if set
cmc.CheckManager.Broker.ID = ""
// used to select a broker with the same tag (e.g. can be used to dictate that a broker
// serving a specific location should be used. "dc:sfo", "location:new_york", "zone:us-west")
// if more than one broker has the tag, one will be selected randomly from the resulting list
// default: not used unless != ""
cmc.CheckManager.Broker.SelectTag = ""
// longest time to wait for a broker connection (if latency is > the broker will
// be considered invalid and not available for selection.), default: 500 milliseconds
cmc.CheckManager.Broker.MaxResponseTime = "500ms"
// if broker Id or SelectTag are not specified, a broker will be selected randomly
// from the list of brokers available to the api token. enterprise brokers take precedence
// viable brokers are "active", have the "httptrap" module enabled, are reachable and respond
// within MaxResponseTime.
log.Println("Creating new cgm instance")
metrics, err := cgm.NewCirconusMetrics(cmc)
if err != nil {
panic(err)
}
src := rand.NewSource(time.Now().UnixNano())
rnd := rand.New(src)
log.Println("Starting cgm internal auto-flush timer")
metrics.Start()
log.Println("Starting to send metrics")
// number of "sets" of metrics to send (a minute worth)
max := 60
for i := 1; i < max; i++ {
log.Printf("\tmetric set %d of %d", i, 60)
metrics.Timing("ding", rnd.Float64()*10)
metrics.Increment("dong")
metrics.Gauge("dang", 10)
time.Sleep(1000 * time.Millisecond)
}
log.Println("Flushing any outstanding metrics manually")
metrics.Flush()
}
```
### HTTP Handler wrapping
```
http.HandleFunc("/", metrics.TrackHTTPLatency("/", handler_func))
```
### HTTP latency example
```
package main
import (
"os"
"fmt"
"net/http"
cgm "github.com/circonus-labs/circonus-gometrics"
)
func main() {
cmc := &cgm.Config{}
cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN")
metrics, err := cgm.NewCirconusMetrics(cmc)
if err != nil {
panic(err)
}
metrics.Start()
http.HandleFunc("/", metrics.TrackHTTPLatency("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello, %s!", r.URL.Path[1:])
}))
http.ListenAndServe(":8080", http.DefaultServeMux)
}
```

View File

@ -0,0 +1,201 @@
// Package api provides methods for interacting with the Circonus API
package api
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strings"
"time"
"github.com/hashicorp/go-retryablehttp"
)
const (
// a few sensible defaults
defaultAPIURL = "https://api.circonus.com/v2"
defaultAPIApp = "circonus-gometrics"
minRetryWait = 10 * time.Millisecond
maxRetryWait = 50 * time.Millisecond
maxRetries = 3
)
// TokenKeyType - Circonus API Token key
type TokenKeyType string
// TokenAppType - Circonus API Token app name
type TokenAppType string
// IDType Circonus object id (numeric portion of cid)
type IDType int
// CIDType Circonus object cid
type CIDType string
// URLType submission url type
type URLType string
// SearchQueryType search query
type SearchQueryType string
// SearchTagType search/select tag type
type SearchTagType string
// Config options for Circonus API
type Config struct {
URL string
TokenKey string
TokenApp string
Log *log.Logger
Debug bool
}
// API Circonus API
type API struct {
apiURL *url.URL
key TokenKeyType
app TokenAppType
Debug bool
Log *log.Logger
}
// NewAPI returns a new Circonus API
func NewAPI(ac *Config) (*API, error) {
if ac == nil {
return nil, errors.New("Invalid API configuration (nil)")
}
key := TokenKeyType(ac.TokenKey)
if key == "" {
return nil, errors.New("API Token is required")
}
app := TokenAppType(ac.TokenApp)
if app == "" {
app = defaultAPIApp
}
au := string(ac.URL)
if au == "" {
au = defaultAPIURL
}
if !strings.Contains(au, "/") {
// if just a hostname is passed, ASSume "https" and a path prefix of "/v2"
au = fmt.Sprintf("https://%s/v2", ac.URL)
}
if last := len(au) - 1; last >= 0 && au[last] == '/' {
au = au[:last]
}
apiURL, err := url.Parse(au)
if err != nil {
return nil, err
}
a := &API{apiURL, key, app, ac.Debug, ac.Log}
if a.Log == nil {
if a.Debug {
a.Log = log.New(os.Stderr, "", log.LstdFlags)
} else {
a.Log = log.New(ioutil.Discard, "", log.LstdFlags)
}
}
return a, nil
}
// Get API request
func (a *API) Get(reqPath string) ([]byte, error) {
return a.apiCall("GET", reqPath, nil)
}
// Delete API request
func (a *API) Delete(reqPath string) ([]byte, error) {
return a.apiCall("DELETE", reqPath, nil)
}
// Post API request
func (a *API) Post(reqPath string, data []byte) ([]byte, error) {
return a.apiCall("POST", reqPath, data)
}
// Put API request
func (a *API) Put(reqPath string, data []byte) ([]byte, error) {
return a.apiCall("PUT", reqPath, data)
}
// apiCall call Circonus API
func (a *API) apiCall(reqMethod string, reqPath string, data []byte) ([]byte, error) {
dataReader := bytes.NewReader(data)
reqURL := a.apiURL.String()
if reqPath[:1] != "/" {
reqURL += "/"
}
if reqPath[:3] == "/v2" {
reqURL += reqPath[3:len(reqPath)]
} else {
reqURL += reqPath
}
req, err := retryablehttp.NewRequest(reqMethod, reqURL, dataReader)
if err != nil {
return nil, fmt.Errorf("[ERROR] creating API request: %s %+v", reqURL, err)
}
req.Header.Add("Accept", "application/json")
req.Header.Add("X-Circonus-Auth-Token", string(a.key))
req.Header.Add("X-Circonus-App-Name", string(a.app))
client := retryablehttp.NewClient()
client.RetryWaitMin = minRetryWait
client.RetryWaitMax = maxRetryWait
client.RetryMax = maxRetries
client.Logger = a.Log
resp, err := client.Do(req)
if err != nil {
stdClient := &http.Client{}
dataReader.Seek(0, 0)
stdRequest, _ := http.NewRequest(reqMethod, reqURL, dataReader)
stdRequest.Header.Add("Accept", "application/json")
stdRequest.Header.Add("X-Circonus-Auth-Token", string(a.key))
stdRequest.Header.Add("X-Circonus-App-Name", string(a.app))
res, errSC := stdClient.Do(stdRequest)
if errSC != nil {
return nil, fmt.Errorf("[ERROR] fetching %s: %s", reqURL, errSC)
}
if res != nil && res.Body != nil {
defer res.Body.Close()
body, _ := ioutil.ReadAll(res.Body)
if a.Debug {
a.Log.Printf("[DEBUG] %v\n", string(body))
}
return nil, fmt.Errorf("[ERROR] %s", string(body))
}
return nil, fmt.Errorf("[ERROR] fetching %s: %s", reqURL, err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("[ERROR] reading body %+v", err)
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
msg := fmt.Sprintf("API response code %d: %s", resp.StatusCode, string(body))
if a.Debug {
a.Log.Printf("[DEBUG] %s\n", msg)
}
return nil, fmt.Errorf("[ERROR] %s", msg)
}
return body, nil
}

View File

@ -0,0 +1,85 @@
package api
import (
"encoding/json"
"fmt"
)
// BrokerDetail instance attributes
type BrokerDetail struct {
CN string `json:"cn"`
IP string `json:"ipaddress"`
MinVer int `json:"minimum_version_required"`
Modules []string `json:"modules"`
Port int `json:"port"`
Skew string `json:"skew"`
Status string `json:"status"`
Version int `json:"version"`
}
// Broker definition
type Broker struct {
Cid string `json:"_cid"`
Details []BrokerDetail `json:"_details"`
Latitude string `json:"_latitude"`
Longitude string `json:"_longitude"`
Name string `json:"_name"`
Tags []string `json:"_tags"`
Type string `json:"_type"`
}
// FetchBrokerByID fetch a broker configuration by [group]id
func (a *API) FetchBrokerByID(id IDType) (*Broker, error) {
cid := CIDType(fmt.Sprintf("/broker/%d", id))
return a.FetchBrokerByCID(cid)
}
// FetchBrokerByCID fetch a broker configuration by cid
func (a *API) FetchBrokerByCID(cid CIDType) (*Broker, error) {
result, err := a.Get(string(cid))
if err != nil {
return nil, err
}
response := new(Broker)
if err := json.Unmarshal(result, &response); err != nil {
return nil, err
}
return response, nil
}
// FetchBrokerListByTag return list of brokers with a specific tag
func (a *API) FetchBrokerListByTag(searchTag SearchTagType) ([]Broker, error) {
query := SearchQueryType(fmt.Sprintf("f__tags_has=%s", searchTag))
return a.BrokerSearch(query)
}
// BrokerSearch return a list of brokers matching a query/filter
func (a *API) BrokerSearch(query SearchQueryType) ([]Broker, error) {
queryURL := fmt.Sprintf("/broker?%s", string(query))
result, err := a.Get(queryURL)
if err != nil {
return nil, err
}
var brokers []Broker
json.Unmarshal(result, &brokers)
return brokers, nil
}
// FetchBrokerList return list of all brokers available to the api token/app
func (a *API) FetchBrokerList() ([]Broker, error) {
result, err := a.Get("/broker")
if err != nil {
return nil, err
}
var response []Broker
json.Unmarshal(result, &response)
return response, nil
}

View File

@ -0,0 +1,109 @@
package api
import (
"encoding/json"
"fmt"
"net/url"
"strings"
)
// CheckDetails is an arbitrary json structure, we would only care about submission_url
type CheckDetails struct {
SubmissionURL string `json:"submission_url"`
}
// Check definition
type Check struct {
Cid string `json:"_cid"`
Active bool `json:"_active"`
BrokerCid string `json:"_broker"`
CheckBundleCid string `json:"_check_bundle"`
CheckUUID string `json:"_check_uuid"`
Details CheckDetails `json:"_details"`
}
// FetchCheckByID fetch a check configuration by id
func (a *API) FetchCheckByID(id IDType) (*Check, error) {
cid := CIDType(fmt.Sprintf("/check/%d", int(id)))
return a.FetchCheckByCID(cid)
}
// FetchCheckByCID fetch a check configuration by cid
func (a *API) FetchCheckByCID(cid CIDType) (*Check, error) {
result, err := a.Get(string(cid))
if err != nil {
return nil, err
}
check := new(Check)
json.Unmarshal(result, check)
return check, nil
}
// FetchCheckBySubmissionURL fetch a check configuration by submission_url
func (a *API) FetchCheckBySubmissionURL(submissionURL URLType) (*Check, error) {
u, err := url.Parse(string(submissionURL))
if err != nil {
return nil, err
}
// valid trap url: scheme://host[:port]/module/httptrap/UUID/secret
// does it smell like a valid trap url path
if u.Path[:17] != "/module/httptrap/" {
return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', unrecognized path", submissionURL)
}
// extract uuid/secret
pathParts := strings.Split(u.Path[17:len(u.Path)], "/")
if len(pathParts) != 2 {
return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', UUID not where expected", submissionURL)
}
uuid := pathParts[0]
query := SearchQueryType(fmt.Sprintf("f__check_uuid=%s", uuid))
checks, err := a.CheckSearch(query)
if err != nil {
return nil, err
}
if len(checks) == 0 {
return nil, fmt.Errorf("[ERROR] No checks found with UUID %s", uuid)
}
numActive := 0
checkID := -1
for idx, check := range checks {
if check.Active {
numActive++
checkID = idx
}
}
if numActive > 1 {
return nil, fmt.Errorf("[ERROR] Multiple checks with same UUID %s", uuid)
}
return &checks[checkID], nil
}
// CheckSearch returns a list of checks matching a query/filter
func (a *API) CheckSearch(query SearchQueryType) ([]Check, error) {
queryURL := fmt.Sprintf("/check?%s", string(query))
result, err := a.Get(queryURL)
if err != nil {
return nil, err
}
var checks []Check
json.Unmarshal(result, &checks)
return checks, nil
}

View File

@ -0,0 +1,128 @@
package api
import (
"encoding/json"
"fmt"
)
// CheckBundleConfig configuration specific to check type
type CheckBundleConfig struct {
AsyncMetrics bool `json:"async_metrics"`
Secret string `json:"secret"`
SubmissionURL string `json:"submission_url"`
}
// CheckBundleMetric individual metric configuration
type CheckBundleMetric struct {
Name string `json:"name"`
Type string `json:"type"`
Units string `json:"units"`
Status string `json:"status"`
}
// CheckBundle definition
type CheckBundle struct {
CheckUUIDs []string `json:"_check_uuids,omitempty"`
Checks []string `json:"_checks,omitempty"`
Cid string `json:"_cid,omitempty"`
Created int `json:"_created,omitempty"`
LastModified int `json:"_last_modified,omitempty"`
LastModifedBy string `json:"_last_modifed_by,omitempty"`
ReverseConnectUrls []string `json:"_reverse_connection_urls,omitempty"`
Brokers []string `json:"brokers"`
Config CheckBundleConfig `json:"config"`
DisplayName string `json:"display_name"`
Metrics []CheckBundleMetric `json:"metrics"`
MetricLimit int `json:"metric_limit"`
Notes string `json:"notes"`
Period int `json:"period"`
Status string `json:"status"`
Tags []string `json:"tags"`
Target string `json:"target"`
Timeout int `json:"timeout"`
Type string `json:"type"`
}
// FetchCheckBundleByID fetch a check bundle configuration by id
func (a *API) FetchCheckBundleByID(id IDType) (*CheckBundle, error) {
cid := CIDType(fmt.Sprintf("/check_bundle/%d", id))
return a.FetchCheckBundleByCID(cid)
}
// FetchCheckBundleByCID fetch a check bundle configuration by id
func (a *API) FetchCheckBundleByCID(cid CIDType) (*CheckBundle, error) {
result, err := a.Get(string(cid))
if err != nil {
return nil, err
}
checkBundle := &CheckBundle{}
json.Unmarshal(result, checkBundle)
return checkBundle, nil
}
// CheckBundleSearch returns list of check bundles matching a search query
// - a search query not a filter (see: https://login.circonus.com/resources/api#searching)
func (a *API) CheckBundleSearch(searchCriteria SearchQueryType) ([]CheckBundle, error) {
apiPath := fmt.Sprintf("/check_bundle?search=%s", searchCriteria)
response, err := a.Get(apiPath)
if err != nil {
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
}
var results []CheckBundle
err = json.Unmarshal(response, &results)
if err != nil {
return nil, fmt.Errorf("[ERROR] Parsing JSON response %+v", err)
}
return results, nil
}
// CreateCheckBundle create a new check bundle (check)
func (a *API) CreateCheckBundle(config CheckBundle) (*CheckBundle, error) {
cfgJSON, err := json.Marshal(config)
if err != nil {
return nil, err
}
response, err := a.Post("/check_bundle", cfgJSON)
if err != nil {
return nil, err
}
checkBundle := &CheckBundle{}
err = json.Unmarshal(response, checkBundle)
if err != nil {
return nil, err
}
return checkBundle, nil
}
// UpdateCheckBundle updates a check bundle configuration
func (a *API) UpdateCheckBundle(config *CheckBundle) (*CheckBundle, error) {
if a.Debug {
a.Log.Printf("[DEBUG] Updating check bundle with new metrics.")
}
cfgJSON, err := json.Marshal(config)
if err != nil {
return nil, err
}
response, err := a.Put(config.Cid, cfgJSON)
if err != nil {
return nil, err
}
checkBundle := &CheckBundle{}
err = json.Unmarshal(response, checkBundle)
if err != nil {
return nil, err
}
return checkBundle, nil
}

View File

@ -0,0 +1,195 @@
package checkmgr
import (
"fmt"
"math/rand"
"net"
"net/url"
"reflect"
"strings"
"time"
"github.com/circonus-labs/circonus-gometrics/api"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
// Get Broker to use when creating a check
func (cm *CheckManager) getBroker() (*api.Broker, error) {
if cm.brokerID != 0 {
broker, err := cm.apih.FetchBrokerByID(cm.brokerID)
if err != nil {
return nil, err
}
if !cm.isValidBroker(broker) {
return nil, fmt.Errorf(
"[ERROR] designated broker %d [%s] is invalid (not active, does not support required check type, or connectivity issue)",
cm.brokerID,
broker.Name)
}
return broker, nil
}
broker, err := cm.selectBroker()
if err != nil {
return nil, fmt.Errorf("[ERROR] Unable to fetch suitable broker %s", err)
}
return broker, nil
}
// Get CN of Broker associated with submission_url to satisfy no IP SANS in certs
func (cm *CheckManager) getBrokerCN(broker *api.Broker, submissionURL api.URLType) (string, error) {
u, err := url.Parse(string(submissionURL))
if err != nil {
return "", err
}
hostParts := strings.Split(u.Host, ":")
host := hostParts[0]
if net.ParseIP(host) == nil { // it's a non-ip string
return u.Host, nil
}
cn := ""
for _, detail := range broker.Details {
if detail.IP == host {
cn = detail.CN
break
}
}
if cn == "" {
return "", fmt.Errorf("[ERROR] Unable to match URL host (%s) to Broker", u.Host)
}
return cn, nil
}
// Select a broker for use when creating a check, if a specific broker
// was not specified.
func (cm *CheckManager) selectBroker() (*api.Broker, error) {
var brokerList []api.Broker
var err error
if cm.brokerSelectTag != "" {
brokerList, err = cm.apih.FetchBrokerListByTag(cm.brokerSelectTag)
if err != nil {
return nil, err
}
} else {
brokerList, err = cm.apih.FetchBrokerList()
if err != nil {
return nil, err
}
}
if len(brokerList) == 0 {
return nil, fmt.Errorf("zero brokers found")
}
validBrokers := make(map[string]api.Broker)
haveEnterprise := false
for _, broker := range brokerList {
if cm.isValidBroker(&broker) {
validBrokers[broker.Cid] = broker
if broker.Type == "enterprise" {
haveEnterprise = true
}
}
}
if haveEnterprise { // eliminate non-enterprise brokers from valid brokers
for k, v := range validBrokers {
if v.Type != "enterprise" {
delete(validBrokers, k)
}
}
}
if len(validBrokers) == 0 {
return nil, fmt.Errorf("found %d broker(s), zero are valid", len(brokerList))
}
validBrokerKeys := reflect.ValueOf(validBrokers).MapKeys()
selectedBroker := validBrokers[validBrokerKeys[rand.Intn(len(validBrokerKeys))].String()]
if cm.Debug {
cm.Log.Printf("[DEBUG] Selected broker '%s'\n", selectedBroker.Name)
}
return &selectedBroker, nil
}
// Verify broker supports the check type to be used
func (cm *CheckManager) brokerSupportsCheckType(checkType CheckTypeType, details *api.BrokerDetail) bool {
for _, module := range details.Modules {
if CheckTypeType(module) == checkType {
return true
}
}
return false
}
// Is the broker valid (active, supports check type, and reachable)
func (cm *CheckManager) isValidBroker(broker *api.Broker) bool {
brokerPort := 0
valid := false
for _, detail := range broker.Details {
brokerPort = 43191
// broker must be active
if detail.Status != statusActive {
if cm.Debug {
cm.Log.Printf("[DEBUG] Broker '%s' is not active.\n", broker.Name)
}
continue
}
// broker must have module loaded for the check type to be used
if !cm.brokerSupportsCheckType(cm.checkType, &detail) {
if cm.Debug {
cm.Log.Printf("[DEBUG] Broker '%s' does not support '%s' checks.\n", broker.Name, cm.checkType)
}
continue
}
// broker must be reachable and respond within designated time
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", detail.IP, brokerPort), cm.brokerMaxResponseTime)
if err != nil {
if detail.CN != "trap.noit.circonus.net" {
if cm.Debug {
cm.Log.Printf("[DEBUG] Broker '%s' unable to connect, %v\n", broker.Name, err)
}
continue // not able to reach the broker (or respone slow enough for it to be considered not usable)
}
// if circonus trap broker, try port 443
brokerPort = 443
conn, err = net.DialTimeout("tcp", fmt.Sprintf("%s:%d", detail.CN, brokerPort), cm.brokerMaxResponseTime)
if err != nil {
if cm.Debug {
cm.Log.Printf("[DEBUG] Broker '%s' unable to connect %v\n", broker.Name, err)
}
continue // not able to reach the broker on 443 either (or respone slow enough for it to be considered not usable)
}
}
conn.Close()
if cm.Debug {
cm.Log.Printf("[DEBUG] Broker '%s' is valid\n", broker.Name)
}
valid = true
break
}
return valid
}

View File

@ -0,0 +1,83 @@
package checkmgr
import (
"crypto/x509"
"encoding/json"
"fmt"
)
// Default Circonus CA certificate
var circonusCA = []byte(`-----BEGIN CERTIFICATE-----
MIID4zCCA0ygAwIBAgIJAMelf8skwVWPMA0GCSqGSIb3DQEBBQUAMIGoMQswCQYD
VQQGEwJVUzERMA8GA1UECBMITWFyeWxhbmQxETAPBgNVBAcTCENvbHVtYmlhMRcw
FQYDVQQKEw5DaXJjb251cywgSW5jLjERMA8GA1UECxMIQ2lyY29udXMxJzAlBgNV
BAMTHkNpcmNvbnVzIENlcnRpZmljYXRlIEF1dGhvcml0eTEeMBwGCSqGSIb3DQEJ
ARYPY2FAY2lyY29udXMubmV0MB4XDTA5MTIyMzE5MTcwNloXDTE5MTIyMTE5MTcw
NlowgagxCzAJBgNVBAYTAlVTMREwDwYDVQQIEwhNYXJ5bGFuZDERMA8GA1UEBxMI
Q29sdW1iaWExFzAVBgNVBAoTDkNpcmNvbnVzLCBJbmMuMREwDwYDVQQLEwhDaXJj
b251czEnMCUGA1UEAxMeQ2lyY29udXMgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR4w
HAYJKoZIhvcNAQkBFg9jYUBjaXJjb251cy5uZXQwgZ8wDQYJKoZIhvcNAQEBBQAD
gY0AMIGJAoGBAKz2X0/0vJJ4ad1roehFyxUXHdkjJA9msEKwT2ojummdUB3kK5z6
PDzDL9/c65eFYWqrQWVWZSLQK1D+v9xJThCe93v6QkSJa7GZkCq9dxClXVtBmZH3
hNIZZKVC6JMA9dpRjBmlFgNuIdN7q5aJsv8VZHH+QrAyr9aQmhDJAmk1AgMBAAGj
ggERMIIBDTAdBgNVHQ4EFgQUyNTsgZHSkhhDJ5i+6IFlPzKYxsUwgd0GA1UdIwSB
1TCB0oAUyNTsgZHSkhhDJ5i+6IFlPzKYxsWhga6kgaswgagxCzAJBgNVBAYTAlVT
MREwDwYDVQQIEwhNYXJ5bGFuZDERMA8GA1UEBxMIQ29sdW1iaWExFzAVBgNVBAoT
DkNpcmNvbnVzLCBJbmMuMREwDwYDVQQLEwhDaXJjb251czEnMCUGA1UEAxMeQ2ly
Y29udXMgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR4wHAYJKoZIhvcNAQkBFg9jYUBj
aXJjb251cy5uZXSCCQDHpX/LJMFVjzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEB
BQUAA4GBAAHBtl15BwbSyq0dMEBpEdQYhHianU/rvOMe57digBmox7ZkPEbB/baE
sYJysziA2raOtRxVRtcxuZSMij2RiJDsLxzIp1H60Xhr8lmf7qF6Y+sZl7V36KZb
n2ezaOoRtsQl9dhqEMe8zgL76p9YZ5E69Al0mgiifTteyNjjMuIW
-----END CERTIFICATE-----`)
// CACert contains cert returned from Circonus API
type CACert struct {
Contents string `json:"contents"`
}
// loadCACert loads the CA cert for the broker designated by the submission url
func (cm *CheckManager) loadCACert() {
if cm.certPool != nil {
return
}
cm.certPool = x509.NewCertPool()
cert, err := cm.fetchCert()
if err != nil {
if cm.Debug {
cm.Log.Printf("[DEBUG] Unable to fetch ca.crt, using default. %+v\n", err)
}
}
if cert == nil {
cert = circonusCA
}
cm.certPool.AppendCertsFromPEM(cert)
}
// fetchCert fetches CA certificate using Circonus API
func (cm *CheckManager) fetchCert() ([]byte, error) {
if !cm.enabled {
return circonusCA, nil
}
response, err := cm.apih.Get("/pki/ca.crt")
if err != nil {
return nil, err
}
cadata := new(CACert)
err = json.Unmarshal(response, cadata)
if err != nil {
return nil, err
}
if cadata.Contents == "" {
return nil, fmt.Errorf("[ERROR] Unable to find ca cert %+v", cadata)
}
return []byte(cadata.Contents), nil
}

View File

@ -0,0 +1,207 @@
package checkmgr
import (
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/circonus-labs/circonus-gometrics/api"
)
// Initialize CirconusMetrics instance. Attempt to find a check otherwise create one.
// use cases:
//
// check [bundle] by submission url
// check [bundle] by *check* id (note, not check_bundle id)
// check [bundle] by search
// create check [bundle]
func (cm *CheckManager) initializeTrapURL() error {
if cm.trapURL != "" {
return nil
}
cm.trapmu.Lock()
defer cm.trapmu.Unlock()
if cm.checkSubmissionURL != "" {
if !cm.enabled {
cm.trapURL = cm.checkSubmissionURL
cm.trapLastUpdate = time.Now()
return nil
}
}
if !cm.enabled {
return errors.New("Unable to initialize trap, check manager is disabled.")
}
var err error
var check *api.Check
var checkBundle *api.CheckBundle
var broker *api.Broker
if cm.checkSubmissionURL != "" {
check, err = cm.apih.FetchCheckBySubmissionURL(cm.checkSubmissionURL)
if err != nil {
return err
}
// extract check id from check object returned from looking up using submission url
// set m.CheckId to the id
// set m.SubmissionUrl to "" to prevent trying to search on it going forward
// use case: if the broker is changed in the UI metrics would stop flowing
// unless the new submission url can be fetched with the API (which is no
// longer possible using the original submission url)
var id int
id, err = strconv.Atoi(strings.Replace(check.Cid, "/check/", "", -1))
if err == nil {
cm.checkID = api.IDType(id)
cm.checkSubmissionURL = ""
} else {
cm.Log.Printf(
"[WARN] SubmissionUrl check to Check ID: unable to convert %s to int %q\n",
check.Cid, err)
}
} else if cm.checkID > 0 {
check, err = cm.apih.FetchCheckByID(cm.checkID)
if err != nil {
return err
}
} else {
searchCriteria := fmt.Sprintf(
"(active:1)(host:\"%s\")(type:\"%s\")(tags:%s)",
cm.checkInstanceID, cm.checkType, cm.checkSearchTag)
checkBundle, err = cm.checkBundleSearch(searchCriteria)
if err != nil {
return err
}
if checkBundle == nil {
// err==nil && checkBundle==nil is "no check bundles matched"
// an error *should* be returned for any other invalid scenario
checkBundle, broker, err = cm.createNewCheck()
if err != nil {
return err
}
}
}
if checkBundle == nil {
if check != nil {
checkBundle, err = cm.apih.FetchCheckBundleByCID(api.CIDType(check.CheckBundleCid))
if err != nil {
return err
}
} else {
return fmt.Errorf("[ERROR] Unable to retrieve, find, or create check")
}
}
if broker == nil {
broker, err = cm.apih.FetchBrokerByCID(api.CIDType(checkBundle.Brokers[0]))
if err != nil {
return err
}
}
// retain to facilitate metric management (adding new metrics specifically)
cm.checkBundle = checkBundle
cm.inventoryMetrics()
// url to which metrics should be PUT
cm.trapURL = api.URLType(checkBundle.Config.SubmissionURL)
// used when sending as "ServerName" get around certs not having IP SANS
// (cert created with server name as CN but IP used in trap url)
cn, err := cm.getBrokerCN(broker, cm.trapURL)
if err != nil {
return err
}
cm.trapCN = BrokerCNType(cn)
cm.trapLastUpdate = time.Now()
return nil
}
// Search for a check bundle given a predetermined set of criteria
func (cm *CheckManager) checkBundleSearch(criteria string) (*api.CheckBundle, error) {
checkBundles, err := cm.apih.CheckBundleSearch(api.SearchQueryType(criteria))
if err != nil {
return nil, err
}
if len(checkBundles) == 0 {
return nil, nil // trigger creation of a new check
}
numActive := 0
checkID := -1
for idx, check := range checkBundles {
if check.Status == statusActive {
numActive++
checkID = idx
}
}
if numActive > 1 {
return nil, fmt.Errorf("[ERROR] Multiple possibilities multiple check bundles match criteria %s\n", criteria)
}
return &checkBundles[checkID], nil
}
// Create a new check to receive metrics
func (cm *CheckManager) createNewCheck() (*api.CheckBundle, *api.Broker, error) {
checkSecret := string(cm.checkSecret)
if checkSecret == "" {
secret, err := cm.makeSecret()
if err != nil {
secret = "myS3cr3t"
}
checkSecret = secret
}
broker, err := cm.getBroker()
if err != nil {
return nil, nil, err
}
config := api.CheckBundle{
Brokers: []string{broker.Cid},
Config: api.CheckBundleConfig{AsyncMetrics: true, Secret: checkSecret},
DisplayName: string(cm.checkDisplayName),
Metrics: []api.CheckBundleMetric{},
MetricLimit: 0,
Notes: "",
Period: 60,
Status: statusActive,
Tags: append([]string{string(cm.checkSearchTag)}, cm.checkTags...),
Target: string(cm.checkInstanceID),
Timeout: 10,
Type: string(cm.checkType),
}
checkBundle, err := cm.apih.CreateCheckBundle(config)
if err != nil {
return nil, nil, err
}
return checkBundle, broker, nil
}
// Create a dynamic secret to use with a new check
func (cm *CheckManager) makeSecret() (string, error) {
hash := sha256.New()
x := make([]byte, 2048)
if _, err := rand.Read(x); err != nil {
return "", err
}
hash.Write(x)
return hex.EncodeToString(hash.Sum(nil))[0:16], nil
}

View File

@ -0,0 +1,361 @@
// Package checkmgr provides a check management interace to circonus-gometrics
package checkmgr
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"log"
"net/url"
"os"
"path"
"strconv"
"sync"
"time"
"github.com/circonus-labs/circonus-gometrics/api"
)
// Check management offers:
//
// Create a check if one cannot be found matching specific criteria
// Manage metrics in the supplied check (enabling new metrics as they are submitted)
//
// To disable check management, leave Config.Api.Token.Key blank
//
// use cases:
// configure without api token - check management disabled
// - configuration parameters other than Check.SubmissionUrl, Debug and Log are ignored
// - note: SubmissionUrl is **required** in this case as there is no way to derive w/o api
// configure with api token - check management enabled
// - all otehr configuration parameters affect how the trap url is obtained
// 1. provided (Check.SubmissionUrl)
// 2. via check lookup (CheckConfig.Id)
// 3. via a search using CheckConfig.InstanceId + CheckConfig.SearchTag
// 4. a new check is created
const (
defaultCheckType = "httptrap"
defaultTrapMaxURLAge = "60s" // 60 seconds
defaultBrokerMaxResponseTime = "500ms" // 500 milliseconds
defaultForceMetricActivation = "false"
statusActive = "active"
)
// CheckConfig options for check
type CheckConfig struct {
// a specific submission url
SubmissionURL string
// a specific check id (not check bundle id)
ID string
// unique instance id string
// used to search for a check to use
// used as check.target when creating a check
InstanceID string
// unique check searching tag
// used to search for a check to use (combined with instanceid)
// used as a regular tag when creating a check
SearchTag string
// a custom display name for the check (as viewed in UI Checks)
DisplayName string
// httptrap check secret (for creating a check)
Secret string
// additional tags to add to a check (when creating a check)
// these tags will not be added to an existing check
Tags []string
// max amount of time to to hold on to a submission url
// when a given submission fails (due to retries) if the
// time the url was last updated is > than this, the trap
// url will be refreshed (e.g. if the broker is changed
// in the UI) **only relevant when check management is enabled**
// e.g. 5m, 30m, 1h, etc.
MaxURLAge string
// force metric activation - if a metric has been disabled via the UI
// the default behavior is to *not* re-activate the metric; this setting
// overrides the behavior and will re-activate the metric when it is
// encountered. "(true|false)", default "false"
ForceMetricActivation string
}
// BrokerConfig options for broker
type BrokerConfig struct {
// a specific broker id (numeric portion of cid)
ID string
// a tag that can be used to select 1-n brokers from which to select
// when creating a new check (e.g. datacenter:abc)
SelectTag string
// for a broker to be considered viable it must respond to a
// connection attempt within this amount of time e.g. 200ms, 2s, 1m
MaxResponseTime string
}
// Config options
type Config struct {
Log *log.Logger
Debug bool
// Circonus API config
API api.Config
// Check specific configuration options
Check CheckConfig
// Broker specific configuration options
Broker BrokerConfig
}
// CheckTypeType check type
type CheckTypeType string
// CheckInstanceIDType check instance id
type CheckInstanceIDType string
// CheckSecretType check secret
type CheckSecretType string
// CheckTagsType check tags
type CheckTagsType []string
// CheckDisplayNameType check display name
type CheckDisplayNameType string
// BrokerCNType broker common name
type BrokerCNType string
// CheckManager settings
type CheckManager struct {
enabled bool
Log *log.Logger
Debug bool
apih *api.API
// check
checkType CheckTypeType
checkID api.IDType
checkInstanceID CheckInstanceIDType
checkSearchTag api.SearchTagType
checkSecret CheckSecretType
checkTags CheckTagsType
checkSubmissionURL api.URLType
checkDisplayName CheckDisplayNameType
forceMetricActivation bool
// broker
brokerID api.IDType
brokerSelectTag api.SearchTagType
brokerMaxResponseTime time.Duration
// state
checkBundle *api.CheckBundle
availableMetrics map[string]bool
trapURL api.URLType
trapCN BrokerCNType
trapLastUpdate time.Time
trapMaxURLAge time.Duration
trapmu sync.Mutex
certPool *x509.CertPool
}
// Trap config
type Trap struct {
URL *url.URL
TLS *tls.Config
}
// NewCheckManager returns a new check manager
func NewCheckManager(cfg *Config) (*CheckManager, error) {
if cfg == nil {
return nil, errors.New("Invalid Check Manager configuration (nil).")
}
cm := &CheckManager{
enabled: false,
}
cm.Debug = cfg.Debug
cm.Log = cfg.Log
if cm.Log == nil {
if cm.Debug {
cm.Log = log.New(os.Stderr, "", log.LstdFlags)
} else {
cm.Log = log.New(ioutil.Discard, "", log.LstdFlags)
}
}
if cfg.Check.SubmissionURL != "" {
cm.checkSubmissionURL = api.URLType(cfg.Check.SubmissionURL)
}
// Blank API Token *disables* check management
if cfg.API.TokenKey == "" {
if cm.checkSubmissionURL == "" {
return nil, errors.New("Invalid check manager configuration (no API token AND no submission url).")
}
if err := cm.initializeTrapURL(); err != nil {
return nil, err
}
return cm, nil
}
// enable check manager
cm.enabled = true
// initialize api handle
cfg.API.Debug = cm.Debug
cfg.API.Log = cm.Log
apih, err := api.NewAPI(&cfg.API)
if err != nil {
return nil, err
}
cm.apih = apih
// initialize check related data
cm.checkType = defaultCheckType
idSetting := "0"
if cfg.Check.ID != "" {
idSetting = cfg.Check.ID
}
id, err := strconv.Atoi(idSetting)
if err != nil {
return nil, err
}
cm.checkID = api.IDType(id)
cm.checkInstanceID = CheckInstanceIDType(cfg.Check.InstanceID)
cm.checkDisplayName = CheckDisplayNameType(cfg.Check.DisplayName)
cm.checkSearchTag = api.SearchTagType(cfg.Check.SearchTag)
cm.checkSecret = CheckSecretType(cfg.Check.Secret)
cm.checkTags = cfg.Check.Tags
fma := defaultForceMetricActivation
if cfg.Check.ForceMetricActivation != "" {
fma = cfg.Check.ForceMetricActivation
}
fm, err := strconv.ParseBool(fma)
if err != nil {
return nil, err
}
cm.forceMetricActivation = fm
_, an := path.Split(os.Args[0])
hn, err := os.Hostname()
if err != nil {
hn = "unknown"
}
if cm.checkInstanceID == "" {
cm.checkInstanceID = CheckInstanceIDType(fmt.Sprintf("%s:%s", hn, an))
}
if cm.checkSearchTag == "" {
cm.checkSearchTag = api.SearchTagType(fmt.Sprintf("service:%s", an))
}
if cm.checkDisplayName == "" {
cm.checkDisplayName = CheckDisplayNameType(fmt.Sprintf("%s /cgm", string(cm.checkInstanceID)))
}
dur := cfg.Check.MaxURLAge
if dur == "" {
dur = defaultTrapMaxURLAge
}
maxDur, err := time.ParseDuration(dur)
if err != nil {
return nil, err
}
cm.trapMaxURLAge = maxDur
// setup broker
idSetting = "0"
if cfg.Broker.ID != "" {
idSetting = cfg.Broker.ID
}
id, err = strconv.Atoi(idSetting)
if err != nil {
return nil, err
}
cm.brokerID = api.IDType(id)
cm.brokerSelectTag = api.SearchTagType(cfg.Broker.SelectTag)
dur = cfg.Broker.MaxResponseTime
if dur == "" {
dur = defaultBrokerMaxResponseTime
}
maxDur, err = time.ParseDuration(dur)
if err != nil {
return nil, err
}
cm.brokerMaxResponseTime = maxDur
// metrics
cm.availableMetrics = make(map[string]bool)
if err := cm.initializeTrapURL(); err != nil {
return nil, err
}
return cm, nil
}
// GetTrap return the trap url
func (cm *CheckManager) GetTrap() (*Trap, error) {
if cm.trapURL == "" {
if err := cm.initializeTrapURL(); err != nil {
return nil, err
}
}
trap := &Trap{}
u, err := url.Parse(string(cm.trapURL))
if err != nil {
return nil, err
}
trap.URL = u
if u.Scheme == "https" {
if cm.certPool == nil {
cm.loadCACert()
}
t := &tls.Config{
RootCAs: cm.certPool,
}
if cm.trapCN != "" {
t.ServerName = string(cm.trapCN)
}
trap.TLS = t
}
return trap, nil
}
// ResetTrap URL, force request to the API for the submission URL and broker ca cert
func (cm *CheckManager) ResetTrap() error {
if cm.trapURL == "" {
return nil
}
cm.trapURL = ""
cm.certPool = nil
err := cm.initializeTrapURL()
return err
}
// RefreshTrap check when the last time the URL was reset, reset if needed
func (cm *CheckManager) RefreshTrap() {
if cm.trapURL == "" {
return
}
if time.Since(cm.trapLastUpdate) >= cm.trapMaxURLAge {
cm.ResetTrap()
}
}

View File

@ -0,0 +1,75 @@
package checkmgr
import (
"github.com/circonus-labs/circonus-gometrics/api"
)
// IsMetricActive checks whether a given metric name is currently active(enabled)
func (cm *CheckManager) IsMetricActive(name string) bool {
active, _ := cm.availableMetrics[name]
return active
}
// ActivateMetric determines if a given metric should be activated
func (cm *CheckManager) ActivateMetric(name string) bool {
active, exists := cm.availableMetrics[name]
if !exists {
return true
}
if !active && cm.forceMetricActivation {
return true
}
return false
}
// AddNewMetrics updates a check bundle with new metrics
func (cm *CheckManager) AddNewMetrics(newMetrics map[string]*api.CheckBundleMetric) {
// only if check manager is enabled
if !cm.enabled {
return
}
// only if checkBundle has been populated
if cm.checkBundle == nil {
return
}
newCheckBundle := cm.checkBundle
numCurrMetrics := len(newCheckBundle.Metrics)
numNewMetrics := len(newMetrics)
if numCurrMetrics+numNewMetrics >= cap(newCheckBundle.Metrics) {
nm := make([]api.CheckBundleMetric, numCurrMetrics+numNewMetrics)
copy(nm, newCheckBundle.Metrics)
newCheckBundle.Metrics = nm
}
newCheckBundle.Metrics = newCheckBundle.Metrics[0 : numCurrMetrics+numNewMetrics]
i := 0
for _, metric := range newMetrics {
newCheckBundle.Metrics[numCurrMetrics+i] = *metric
i++
}
checkBundle, err := cm.apih.UpdateCheckBundle(newCheckBundle)
if err != nil {
cm.Log.Printf("[ERROR] updating check bundle with new metrics %v", err)
return
}
cm.checkBundle = checkBundle
cm.inventoryMetrics()
}
// inventoryMetrics creates list of active metrics in check bundle
func (cm *CheckManager) inventoryMetrics() {
availableMetrics := make(map[string]bool)
for _, metric := range cm.checkBundle.Metrics {
availableMetrics[metric.Name] = metric.Status == "active"
}
cm.availableMetrics = availableMetrics
}

View File

@ -0,0 +1,250 @@
// Package circonusgometrics provides instrumentation for your applications in the form
// of counters, gauges and histograms and allows you to publish them to
// Circonus
//
// Counters
//
// A counter is a monotonically-increasing, unsigned, 64-bit integer used to
// represent the number of times an event has occurred. By tracking the deltas
// between measurements of a counter over intervals of time, an aggregation
// layer can derive rates, acceleration, etc.
//
// Gauges
//
// A gauge returns instantaneous measurements of something using signed, 64-bit
// integers. This value does not need to be monotonic.
//
// Histograms
//
// A histogram tracks the distribution of a stream of values (e.g. the number of
// seconds it takes to handle requests). Circonus can calculate complex
// analytics on these.
//
// Reporting
//
// A period push to a Circonus httptrap is confgurable.
package circonusgometrics
import (
"errors"
"io/ioutil"
"log"
"os"
"sync"
"time"
"github.com/circonus-labs/circonus-gometrics/api"
"github.com/circonus-labs/circonus-gometrics/checkmgr"
)
const (
defaultFlushInterval = "10s" // 10 * time.Second
)
// Config options for circonus-gometrics
type Config struct {
Log *log.Logger
Debug bool
// API, Check and Broker configuration options
CheckManager checkmgr.Config
// how frequenly to submit metrics to Circonus, default 10 seconds
Interval string
}
// CirconusMetrics state
type CirconusMetrics struct {
Log *log.Logger
Debug bool
flushInterval time.Duration
flushing bool
flushmu sync.Mutex
check *checkmgr.CheckManager
counters map[string]uint64
cm sync.Mutex
counterFuncs map[string]func() uint64
cfm sync.Mutex
gauges map[string]string
gm sync.Mutex
gaugeFuncs map[string]func() int64
gfm sync.Mutex
histograms map[string]*Histogram
hm sync.Mutex
text map[string]string
tm sync.Mutex
textFuncs map[string]func() string
tfm sync.Mutex
}
// NewCirconusMetrics returns a CirconusMetrics instance
func NewCirconusMetrics(cfg *Config) (*CirconusMetrics, error) {
if cfg == nil {
return nil, errors.New("Invalid configuration (nil).")
}
cm := &CirconusMetrics{
counters: make(map[string]uint64),
counterFuncs: make(map[string]func() uint64),
gauges: make(map[string]string),
gaugeFuncs: make(map[string]func() int64),
histograms: make(map[string]*Histogram),
text: make(map[string]string),
textFuncs: make(map[string]func() string),
}
cm.Debug = cfg.Debug
if cm.Debug {
if cfg.Log == nil {
cm.Log = log.New(os.Stderr, "", log.LstdFlags)
} else {
cm.Log = cfg.Log
}
}
if cm.Log == nil {
cm.Log = log.New(ioutil.Discard, "", log.LstdFlags)
}
fi := defaultFlushInterval
if cfg.Interval != "" {
fi = cfg.Interval
}
dur, err := time.ParseDuration(fi)
if err != nil {
return nil, err
}
cm.flushInterval = dur
cfg.CheckManager.Debug = cm.Debug
cfg.CheckManager.Log = cm.Log
check, err := checkmgr.NewCheckManager(&cfg.CheckManager)
if err != nil {
return nil, err
}
cm.check = check
if _, err := cm.check.GetTrap(); err != nil {
return nil, err
}
return cm, nil
}
// Start initializes the CirconusMetrics instance based on
// configuration settings and sets the httptrap check url to
// which metrics should be sent. It then starts a perdiodic
// submission process of all metrics collected.
func (m *CirconusMetrics) Start() {
go func() {
for _ = range time.NewTicker(m.flushInterval).C {
m.Flush()
}
}()
}
// Flush metrics kicks off the process of sending metrics to Circonus
func (m *CirconusMetrics) Flush() {
if m.flushing {
return
}
m.flushmu.Lock()
m.flushing = true
m.flushmu.Unlock()
if m.Debug {
m.Log.Println("[DEBUG] Flushing metrics")
}
// check for new metrics and enable them automatically
newMetrics := make(map[string]*api.CheckBundleMetric)
counters, gauges, histograms, text := m.snapshot()
output := make(map[string]interface{})
for name, value := range counters {
send := m.check.IsMetricActive(name)
if !send && m.check.ActivateMetric(name) {
send = true
newMetrics[name] = &api.CheckBundleMetric{
Name: name,
Type: "numeric",
Status: "active",
}
}
if send {
output[name] = map[string]interface{}{
"_type": "n",
"_value": value,
}
}
}
for name, value := range gauges {
send := m.check.IsMetricActive(name)
if !send && m.check.ActivateMetric(name) {
send = true
newMetrics[name] = &api.CheckBundleMetric{
Name: name,
Type: "numeric",
Status: "active",
}
}
if send {
output[name] = map[string]interface{}{
"_type": "n",
"_value": value,
}
}
}
for name, value := range histograms {
send := m.check.IsMetricActive(name)
if !send && m.check.ActivateMetric(name) {
send = true
newMetrics[name] = &api.CheckBundleMetric{
Name: name,
Type: "histogram",
Status: "active",
}
}
if send {
output[name] = map[string]interface{}{
"_type": "n",
"_value": value.DecStrings(),
}
}
}
for name, value := range text {
send := m.check.IsMetricActive(name)
if !send && m.check.ActivateMetric(name) {
send = true
newMetrics[name] = &api.CheckBundleMetric{
Name: name,
Type: "text",
Status: "active",
}
}
if send {
output[name] = map[string]interface{}{
"_type": "s",
"_value": value,
}
}
}
m.submit(output, newMetrics)
m.flushmu.Lock()
m.flushing = false
m.flushmu.Unlock()
}

View File

@ -0,0 +1,51 @@
package circonusgometrics
// A Counter is a monotonically increasing unsigned integer.
//
// Use a counter to derive rates (e.g., record total number of requests, derive
// requests per second).
// Increment counter by 1
func (m *CirconusMetrics) Increment(metric string) {
m.Add(metric, 1)
}
// IncrementByValue updates counter by supplied value
func (m *CirconusMetrics) IncrementByValue(metric string, val uint64) {
m.Add(metric, val)
}
// Set a counter to specific value
func (m *CirconusMetrics) Set(metric string, val uint64) {
m.cm.Lock()
defer m.cm.Unlock()
m.counters[metric] = val
}
// Add updates counter by supplied value
func (m *CirconusMetrics) Add(metric string, val uint64) {
m.cm.Lock()
defer m.cm.Unlock()
m.counters[metric] += val
}
// RemoveCounter removes the named counter
func (m *CirconusMetrics) RemoveCounter(metric string) {
m.cm.Lock()
defer m.cm.Unlock()
delete(m.counters, metric)
}
// SetCounterFunc set counter to a function [called at flush interval]
func (m *CirconusMetrics) SetCounterFunc(metric string, fn func() uint64) {
m.cfm.Lock()
defer m.cfm.Unlock()
m.counterFuncs[metric] = fn
}
// RemoveCounterFunc removes the named counter function
func (m *CirconusMetrics) RemoveCounterFunc(metric string) {
m.cfm.Lock()
defer m.cfm.Unlock()
delete(m.counterFuncs, metric)
}

View File

@ -0,0 +1,77 @@
package circonusgometrics
// A Gauge is an instantaneous measurement of a value.
//
// Use a gauge to track metrics which increase and decrease (e.g., amount of
// free memory).
import (
"fmt"
)
// Gauge sets a gauge to a value
func (m *CirconusMetrics) Gauge(metric string, val interface{}) {
m.SetGauge(metric, val)
}
// SetGauge sets a gauge to a value
func (m *CirconusMetrics) SetGauge(metric string, val interface{}) {
m.gm.Lock()
defer m.gm.Unlock()
m.gauges[metric] = m.gaugeValString(val)
}
// RemoveGauge removes a gauge
func (m *CirconusMetrics) RemoveGauge(metric string) {
m.gm.Lock()
defer m.gm.Unlock()
delete(m.gauges, metric)
}
// SetGaugeFunc sets a gauge to a function [called at flush interval]
func (m *CirconusMetrics) SetGaugeFunc(metric string, fn func() int64) {
m.gfm.Lock()
defer m.gfm.Unlock()
m.gaugeFuncs[metric] = fn
}
// RemoveGaugeFunc removes a gauge function
func (m *CirconusMetrics) RemoveGaugeFunc(metric string) {
m.gfm.Lock()
defer m.gfm.Unlock()
delete(m.gaugeFuncs, metric)
}
// gaugeValString converts an interface value (of a supported type) to a string
func (m *CirconusMetrics) gaugeValString(val interface{}) string {
vs := ""
switch v := val.(type) {
default:
// ignore it, unsupported type
case int:
vs = fmt.Sprintf("%d", v)
case int8:
vs = fmt.Sprintf("%d", v)
case int16:
vs = fmt.Sprintf("%d", v)
case int32:
vs = fmt.Sprintf("%d", v)
case int64:
vs = fmt.Sprintf("%d", v)
case uint:
vs = fmt.Sprintf("%d", v)
case uint8:
vs = fmt.Sprintf("%d", v)
case uint16:
vs = fmt.Sprintf("%d", v)
case uint32:
vs = fmt.Sprintf("%d", v)
case uint64:
vs = fmt.Sprintf("%d", v)
case float32:
vs = fmt.Sprintf("%f", v)
case float64:
vs = fmt.Sprintf("%f", v)
}
return vs
}

View File

@ -0,0 +1,73 @@
package circonusgometrics
import (
"sync"
"github.com/circonus-labs/circonusllhist"
)
// Histogram measures the distribution of a stream of values.
type Histogram struct {
name string
hist *circonusllhist.Histogram
rw sync.RWMutex
}
// Timing adds a value to a histogram
func (m *CirconusMetrics) Timing(metric string, val float64) {
m.SetHistogramValue(metric, val)
}
// RecordValue adds a value to a histogram
func (m *CirconusMetrics) RecordValue(metric string, val float64) {
m.SetHistogramValue(metric, val)
}
// SetHistogramValue adds a value to a histogram
func (m *CirconusMetrics) SetHistogramValue(metric string, val float64) {
m.NewHistogram(metric)
m.histograms[metric].rw.Lock()
defer m.histograms[metric].rw.Unlock()
m.histograms[metric].hist.RecordValue(val)
}
// RemoveHistogram removes a histogram
func (m *CirconusMetrics) RemoveHistogram(metric string) {
m.hm.Lock()
defer m.hm.Unlock()
delete(m.histograms, metric)
}
// NewHistogram returns a histogram instance.
func (m *CirconusMetrics) NewHistogram(metric string) *Histogram {
m.hm.Lock()
defer m.hm.Unlock()
if hist, ok := m.histograms[metric]; ok {
return hist
}
hist := &Histogram{
name: metric,
hist: circonusllhist.New(),
}
m.histograms[metric] = hist
return hist
}
// Name returns the name from a histogram instance
func (h *Histogram) Name() string {
return h.name
}
// RecordValue records the given value to a histogram instance
func (h *Histogram) RecordValue(v float64) {
h.rw.Lock()
defer h.rw.Unlock()
h.hist.RecordValue(v)
}

View File

@ -0,0 +1,122 @@
package circonusgometrics
import (
"bytes"
"encoding/json"
"errors"
"io/ioutil"
"log"
"net"
"net/http"
"strconv"
"time"
"github.com/circonus-labs/circonus-gometrics/api"
"github.com/hashicorp/go-retryablehttp"
)
func (m *CirconusMetrics) submit(output map[string]interface{}, newMetrics map[string]*api.CheckBundleMetric) {
if len(newMetrics) > 0 {
m.check.AddNewMetrics(newMetrics)
}
str, err := json.Marshal(output)
if err != nil {
m.Log.Printf("[ERROR] marshling output %+v", err)
return
}
numStats, err := m.trapCall(str)
if err != nil {
m.Log.Printf("[ERROR] %+v\n", err)
return
}
if m.Debug {
m.Log.Printf("[DEBUG] %d stats sent\n", numStats)
}
}
func (m *CirconusMetrics) trapCall(payload []byte) (int, error) {
trap, err := m.check.GetTrap()
if err != nil {
return 0, err
}
dataReader := bytes.NewReader(payload)
req, err := retryablehttp.NewRequest("PUT", trap.URL.String(), dataReader)
if err != nil {
return 0, err
}
req.Header.Add("Accept", "application/json")
client := retryablehttp.NewClient()
if trap.URL.Scheme == "https" {
client.HTTPClient.Transport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: trap.TLS,
DisableKeepAlives: true,
MaxIdleConnsPerHost: -1,
DisableCompression: true,
}
} else {
client.HTTPClient.Transport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
DisableKeepAlives: true,
MaxIdleConnsPerHost: -1,
DisableCompression: true,
}
}
client.RetryWaitMin = 10 * time.Millisecond
client.RetryWaitMax = 50 * time.Millisecond
client.RetryMax = 3
client.Logger = m.Log
attempts := -1
client.RequestLogHook = func(logger *log.Logger, req *http.Request, retryNumber int) {
attempts = retryNumber
}
resp, err := client.Do(req)
if err != nil {
if attempts == client.RetryMax {
m.check.RefreshTrap()
}
return 0, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
m.Log.Printf("[ERROR] reading body, proceeding. %s\n", err)
}
var response map[string]interface{}
err = json.Unmarshal(body, &response)
if err != nil {
m.Log.Printf("[ERROR] parsing body, proceeding. %s\n", err)
}
if resp.StatusCode != 200 {
return 0, errors.New("[ERROR] bad response code: " + strconv.Itoa(resp.StatusCode))
}
switch v := response["stats"].(type) {
case float64:
return int(v), nil
case int:
return v, nil
default:
}
return 0, errors.New("[ERROR] bad response type")
}

View File

@ -0,0 +1,37 @@
package circonusgometrics
// A Text metric is an arbitrary string
//
// SetText sets a text metric
func (m *CirconusMetrics) SetText(metric string, val string) {
m.SetTextValue(metric, val)
}
// SetTextValue sets a text metric
func (m *CirconusMetrics) SetTextValue(metric string, val string) {
m.tm.Lock()
defer m.tm.Unlock()
m.text[metric] = val
}
// RemoveText removes a text metric
func (m *CirconusMetrics) RemoveText(metric string) {
m.tm.Lock()
defer m.tm.Unlock()
delete(m.text, metric)
}
// SetTextFunc sets a text metric to a function [called at flush interval]
func (m *CirconusMetrics) SetTextFunc(metric string, fn func() string) {
m.tfm.Lock()
defer m.tfm.Unlock()
m.textFuncs[metric] = fn
}
// RemoveTextFunc a text metric function
func (m *CirconusMetrics) RemoveTextFunc(metric string) {
m.tfm.Lock()
defer m.tfm.Unlock()
delete(m.textFuncs, metric)
}

View File

@ -0,0 +1,19 @@
package circonusgometrics
import (
"net/http"
"time"
)
// TrackHTTPLatency wraps Handler functions registered with an http.ServerMux tracking latencies.
// Metrics are of the for go`HTTP`<method>`<name>`latency and are tracked in a histogram in units
// of seconds (as a float64) providing nanosecond ganularity.
func (m *CirconusMetrics) TrackHTTPLatency(name string, handler func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
return func(rw http.ResponseWriter, req *http.Request) {
start := time.Now().UnixNano()
handler(rw, req)
elapsed := time.Now().UnixNano() - start
//hist := m.NewHistogram("go`HTTP`" + req.Method + "`" + name + "`latency")
m.RecordValue("go`HTTP`"+req.Method+"`"+name+"`latency", float64(elapsed)/float64(time.Second))
}
}

View File

@ -0,0 +1,96 @@
package circonusgometrics
import (
"github.com/circonus-labs/circonusllhist"
)
// Reset removes all existing counters and gauges.
func (m *CirconusMetrics) Reset() {
m.cm.Lock()
defer m.cm.Unlock()
m.cfm.Lock()
defer m.cfm.Unlock()
m.gm.Lock()
defer m.gm.Unlock()
m.gfm.Lock()
defer m.gfm.Unlock()
m.hm.Lock()
defer m.hm.Unlock()
m.tm.Lock()
defer m.tm.Unlock()
m.tfm.Lock()
defer m.tfm.Unlock()
m.counters = make(map[string]uint64)
m.counterFuncs = make(map[string]func() uint64)
m.gauges = make(map[string]string)
m.gaugeFuncs = make(map[string]func() int64)
m.histograms = make(map[string]*Histogram)
m.text = make(map[string]string)
m.textFuncs = make(map[string]func() string)
}
// snapshot returns a copy of the values of all registered counters and gauges.
func (m *CirconusMetrics) snapshot() (c map[string]uint64, g map[string]string, h map[string]*circonusllhist.Histogram, t map[string]string) {
m.cm.Lock()
defer m.cm.Unlock()
m.cfm.Lock()
defer m.cfm.Unlock()
m.gm.Lock()
defer m.gm.Unlock()
m.gfm.Lock()
defer m.gfm.Unlock()
m.hm.Lock()
defer m.hm.Unlock()
m.tm.Lock()
defer m.tm.Unlock()
m.tfm.Lock()
defer m.tfm.Unlock()
c = make(map[string]uint64, len(m.counters)+len(m.counterFuncs))
for n, v := range m.counters {
c[n] = v
}
for n, f := range m.counterFuncs {
c[n] = f()
}
//g = make(map[string]int64, len(m.gauges)+len(m.gaugeFuncs))
g = make(map[string]string, len(m.gauges)+len(m.gaugeFuncs))
for n, v := range m.gauges {
g[n] = v
}
for n, f := range m.gaugeFuncs {
g[n] = m.gaugeValString(f())
}
h = make(map[string]*circonusllhist.Histogram, len(m.histograms))
for n, hist := range m.histograms {
h[n] = hist.hist.CopyAndReset()
}
t = make(map[string]string, len(m.text)+len(m.textFuncs))
for n, v := range m.text {
t[n] = v
}
for n, f := range m.textFuncs {
t[n] = f()
}
return
}

28
vendor/github.com/circonus-labs/circonusllhist/LICENSE generated vendored Normal file
View File

@ -0,0 +1,28 @@
Copyright (c) 2016 Circonus, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name Circonus, Inc. nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,555 @@
// Copyright 2016, Circonus, Inc. All rights reserved.
// See the LICENSE file.
// Package circllhist provides an implementation of Circonus' fixed log-linear
// histogram data structure. This allows tracking of histograms in a
// composable way such that accurate error can be reasoned about.
package circonusllhist
import (
"bytes"
"errors"
"fmt"
"math"
"sync"
)
const (
DEFAULT_HIST_SIZE = int16(100)
)
var power_of_ten = [...]float64{
1, 10, 100, 1000, 10000, 100000, 1e+06, 1e+07, 1e+08, 1e+09, 1e+10,
1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20,
1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30,
1e+31, 1e+32, 1e+33, 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40,
1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50,
1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60,
1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, 1e+68, 1e+69, 1e+70,
1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80,
1e+81, 1e+82, 1e+83, 1e+84, 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90,
1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100,
1e+101, 1e+102, 1e+103, 1e+104, 1e+105, 1e+106, 1e+107, 1e+108, 1e+109,
1e+110, 1e+111, 1e+112, 1e+113, 1e+114, 1e+115, 1e+116, 1e+117, 1e+118,
1e+119, 1e+120, 1e+121, 1e+122, 1e+123, 1e+124, 1e+125, 1e+126, 1e+127,
1e-128, 1e-127, 1e-126, 1e-125, 1e-124, 1e-123, 1e-122, 1e-121, 1e-120,
1e-119, 1e-118, 1e-117, 1e-116, 1e-115, 1e-114, 1e-113, 1e-112, 1e-111,
1e-110, 1e-109, 1e-108, 1e-107, 1e-106, 1e-105, 1e-104, 1e-103, 1e-102,
1e-101, 1e-100, 1e-99, 1e-98, 1e-97, 1e-96,
1e-95, 1e-94, 1e-93, 1e-92, 1e-91, 1e-90, 1e-89, 1e-88, 1e-87, 1e-86,
1e-85, 1e-84, 1e-83, 1e-82, 1e-81, 1e-80, 1e-79, 1e-78, 1e-77, 1e-76,
1e-75, 1e-74, 1e-73, 1e-72, 1e-71, 1e-70, 1e-69, 1e-68, 1e-67, 1e-66,
1e-65, 1e-64, 1e-63, 1e-62, 1e-61, 1e-60, 1e-59, 1e-58, 1e-57, 1e-56,
1e-55, 1e-54, 1e-53, 1e-52, 1e-51, 1e-50, 1e-49, 1e-48, 1e-47, 1e-46,
1e-45, 1e-44, 1e-43, 1e-42, 1e-41, 1e-40, 1e-39, 1e-38, 1e-37, 1e-36,
1e-35, 1e-34, 1e-33, 1e-32, 1e-31, 1e-30, 1e-29, 1e-28, 1e-27, 1e-26,
1e-25, 1e-24, 1e-23, 1e-22, 1e-21, 1e-20, 1e-19, 1e-18, 1e-17, 1e-16,
1e-15, 1e-14, 1e-13, 1e-12, 1e-11, 1e-10, 1e-09, 1e-08, 1e-07, 1e-06,
1e-05, 0.0001, 0.001, 0.01, 0.1,
}
// A Bracket is a part of a cumulative distribution.
type Bin struct {
val int8
exp int8
count uint64
}
func NewBinRaw(val int8, exp int8, count uint64) *Bin {
return &Bin{
val: val,
exp: exp,
count: count,
}
}
func NewBin() *Bin {
return NewBinRaw(0, 0, 0)
}
func NewBinFromFloat64(d float64) *Bin {
hb := NewBinRaw(0, 0, 0)
hb.SetFromFloat64(d)
return hb
}
func (hb *Bin) SetFromFloat64(d float64) *Bin {
hb.val = -1
if math.IsInf(d, 0) || math.IsNaN(d) {
return hb
}
if d == 0.0 {
hb.val = 0
return hb
}
sign := 1
if math.Signbit(d) {
sign = -1
}
d = math.Abs(d)
big_exp := int(math.Floor(math.Log10(d)))
hb.exp = int8(big_exp)
if int(hb.exp) != big_exp { //rolled
hb.exp = 0
if big_exp < 0 {
hb.val = 0
}
return hb
}
d = d / hb.PowerOfTen()
d = d * 10
hb.val = int8(sign * int(math.Floor(d+1e-13)))
if hb.val == 100 || hb.val == -100 {
if hb.exp < 127 {
hb.val = hb.val / 10
hb.exp++
} else {
hb.val = 0
hb.exp = 0
}
}
if hb.val == 0 {
hb.exp = 0
return hb
}
if !((hb.val >= 10 && hb.val < 100) ||
(hb.val <= -10 && hb.val > -100)) {
hb.val = -1
hb.exp = 0
}
return hb
}
func (hb *Bin) PowerOfTen() float64 {
idx := int(hb.exp)
if idx < 0 {
idx = 256 + idx
}
return power_of_ten[idx]
}
func (hb *Bin) IsNaN() bool {
if hb.val > 99 || hb.val < -99 {
return true
}
return false
}
func (hb *Bin) Val() int8 {
return hb.val
}
func (hb *Bin) Exp() int8 {
return hb.exp
}
func (hb *Bin) Count() uint64 {
return hb.count
}
func (hb *Bin) Value() float64 {
if hb.IsNaN() {
return math.NaN()
}
if hb.val < 10 && hb.val > -10 {
return 0.0
}
return (float64(hb.val) / 10.0) * hb.PowerOfTen()
}
func (hb *Bin) BinWidth() float64 {
if hb.IsNaN() {
return math.NaN()
}
if hb.val < 10 && hb.val > -10 {
return 0.0
}
return hb.PowerOfTen() / 10.0
}
func (hb *Bin) Midpoint() float64 {
if hb.IsNaN() {
return math.NaN()
}
out := hb.Value()
if out == 0 {
return 0
}
interval := hb.BinWidth()
if out < 0 {
interval = interval * -1
}
return out + interval/2.0
}
func (hb *Bin) Left() float64 {
if hb.IsNaN() {
return math.NaN()
}
out := hb.Value()
if out >= 0 {
return out
}
return out - hb.BinWidth()
}
func (h1 *Bin) Compare(h2 *Bin) int {
if h1.val == h2.val && h1.exp == h2.exp {
return 0
}
if h1.val == -1 {
return 1
}
if h2.val == -1 {
return -1
}
if h1.val == 0 {
if h2.val > 0 {
return 1
}
return -1
}
if h2.val == 0 {
if h1.val < 0 {
return 1
}
return -1
}
if h1.val < 0 && h2.val > 0 {
return 1
}
if h1.val > 0 && h2.val < 0 {
return -1
}
if h1.exp == h2.exp {
if h1.val < h2.val {
return 1
}
return -1
}
if h1.exp > h2.exp {
if h1.val < 0 {
return 1
}
return -1
}
if h1.exp < h2.exp {
if h1.val < 0 {
return -1
}
return 1
}
return 0
}
// This histogram structure tracks values are two decimal digits of precision
// with a bounded error that remains bounded upon composition
type Histogram struct {
mutex sync.Mutex
bvs []Bin
used int16
allocd int16
}
// New returns a new Histogram
func New() *Histogram {
return &Histogram{
allocd: DEFAULT_HIST_SIZE,
used: 0,
bvs: make([]Bin, DEFAULT_HIST_SIZE),
}
}
// Max returns the approximate maximum recorded value.
func (h *Histogram) Max() float64 {
return h.ValueAtQuantile(1.0)
}
// Min returns the approximate minimum recorded value.
func (h *Histogram) Min() float64 {
return h.ValueAtQuantile(0.0)
}
// Mean returns the approximate arithmetic mean of the recorded values.
func (h *Histogram) Mean() float64 {
return h.ApproxMean()
}
// Reset forgets all bins in the histogram (they remain allocated)
func (h *Histogram) Reset() {
h.mutex.Lock()
h.used = 0
h.mutex.Unlock()
}
// RecordValue records the given value, returning an error if the value is out
// of range.
func (h *Histogram) RecordValue(v float64) error {
return h.RecordValues(v, 1)
}
// RecordCorrectedValue records the given value, correcting for stalls in the
// recording process. This only works for processes which are recording values
// at an expected interval (e.g., doing jitter analysis). Processes which are
// recording ad-hoc values (e.g., latency for incoming requests) can't take
// advantage of this.
// CH Compat
func (h *Histogram) RecordCorrectedValue(v, expectedInterval int64) error {
if err := h.RecordValue(float64(v)); err != nil {
return err
}
if expectedInterval <= 0 || v <= expectedInterval {
return nil
}
missingValue := v - expectedInterval
for missingValue >= expectedInterval {
if err := h.RecordValue(float64(missingValue)); err != nil {
return err
}
missingValue -= expectedInterval
}
return nil
}
// find where a new bin should go
func (h *Histogram) InternalFind(hb *Bin) (bool, int16) {
if h.used == 0 {
return false, 0
}
rv := -1
idx := int16(0)
l := int16(0)
r := h.used - 1
for l < r {
check := (r + l) / 2
rv = h.bvs[check].Compare(hb)
if rv == 0 {
l = check
r = check
} else if rv > 0 {
l = check + 1
} else {
r = check - 1
}
}
if rv != 0 {
rv = h.bvs[l].Compare(hb)
}
idx = l
if rv == 0 {
return true, idx
}
if rv < 0 {
return false, idx
}
idx++
return false, idx
}
func (h *Histogram) InsertBin(hb *Bin, count int64) uint64 {
h.mutex.Lock()
defer h.mutex.Unlock()
if count == 0 {
return 0
}
found, idx := h.InternalFind(hb)
if !found {
if h.used == h.allocd {
new_bvs := make([]Bin, h.allocd+DEFAULT_HIST_SIZE)
if idx > 0 {
copy(new_bvs[0:], h.bvs[0:idx])
}
if idx < h.used {
copy(new_bvs[idx+1:], h.bvs[idx:])
}
h.allocd = h.allocd + DEFAULT_HIST_SIZE
h.bvs = new_bvs
} else {
copy(h.bvs[idx+1:], h.bvs[idx:h.used])
}
h.bvs[idx].val = hb.val
h.bvs[idx].exp = hb.exp
h.bvs[idx].count = uint64(count)
h.used++
return h.bvs[idx].count
}
var newval uint64
if count < 0 {
newval = h.bvs[idx].count - uint64(-count)
} else {
newval = h.bvs[idx].count + uint64(count)
}
if newval < h.bvs[idx].count { //rolled
newval = ^uint64(0)
}
h.bvs[idx].count = newval
return newval - h.bvs[idx].count
}
// RecordValues records n occurrences of the given value, returning an error if
// the value is out of range.
func (h *Histogram) RecordValues(v float64, n int64) error {
var hb Bin
hb.SetFromFloat64(v)
h.InsertBin(&hb, n)
return nil
}
// Approximate mean
func (h *Histogram) ApproxMean() float64 {
h.mutex.Lock()
defer h.mutex.Unlock()
divisor := 0.0
sum := 0.0
for i := int16(0); i < h.used; i++ {
midpoint := h.bvs[i].Midpoint()
cardinality := float64(h.bvs[i].count)
divisor += cardinality
sum += midpoint * cardinality
}
if divisor == 0.0 {
return math.NaN()
}
return sum / divisor
}
// Approximate sum
func (h *Histogram) ApproxSum() float64 {
h.mutex.Lock()
defer h.mutex.Unlock()
sum := 0.0
for i := int16(0); i < h.used; i++ {
midpoint := h.bvs[i].Midpoint()
cardinality := float64(h.bvs[i].count)
sum += midpoint * cardinality
}
return sum
}
func (h *Histogram) ApproxQuantile(q_in []float64) ([]float64, error) {
h.mutex.Lock()
defer h.mutex.Unlock()
q_out := make([]float64, len(q_in))
i_q, i_b := 0, int16(0)
total_cnt, bin_width, bin_left, lower_cnt, upper_cnt := 0.0, 0.0, 0.0, 0.0, 0.0
if len(q_in) == 0 {
return q_out, nil
}
// Make sure the requested quantiles are in order
for i_q = 1; i_q < len(q_in); i_q++ {
if q_in[i_q-1] > q_in[i_q] {
return nil, errors.New("out of order")
}
}
// Add up the bins
for i_b = 0; i_b < h.used; i_b++ {
if !h.bvs[i_b].IsNaN() {
total_cnt += float64(h.bvs[i_b].count)
}
}
if total_cnt == 0.0 {
return nil, errors.New("empty_histogram")
}
for i_q = 0; i_q < len(q_in); i_q++ {
if q_in[i_q] < 0.0 || q_in[i_q] > 1.0 {
return nil, errors.New("out of bound quantile")
}
q_out[i_q] = total_cnt * q_in[i_q]
}
for i_b = 0; i_b < h.used; i_b++ {
if h.bvs[i_b].IsNaN() {
continue
}
bin_width = h.bvs[i_b].BinWidth()
bin_left = h.bvs[i_b].Left()
lower_cnt = upper_cnt
upper_cnt = lower_cnt + float64(h.bvs[i_b].count)
break
}
for i_q = 0; i_q < len(q_in); i_q++ {
for i_b < (h.used-1) && upper_cnt < q_out[i_q] {
i_b++
bin_width = h.bvs[i_b].BinWidth()
bin_left = h.bvs[i_b].Left()
lower_cnt = upper_cnt
upper_cnt = lower_cnt + float64(h.bvs[i_b].count)
}
if lower_cnt == q_out[i_q] {
q_out[i_q] = bin_left
} else if upper_cnt == q_out[i_q] {
q_out[i_q] = bin_left + bin_width
} else {
if bin_width == 0 {
q_out[i_q] = bin_left
} else {
q_out[i_q] = bin_left + (q_out[i_q]-lower_cnt)/(upper_cnt-lower_cnt)*bin_width
}
}
}
return q_out, nil
}
// ValueAtQuantile returns the recorded value at the given quantile (0..1).
func (h *Histogram) ValueAtQuantile(q float64) float64 {
h.mutex.Lock()
defer h.mutex.Unlock()
q_in := make([]float64, 1)
q_in[0] = q
q_out, err := h.ApproxQuantile(q_in)
if err == nil && len(q_out) == 1 {
return q_out[0]
}
return math.NaN()
}
// SignificantFigures returns the significant figures used to create the
// histogram
// CH Compat
func (h *Histogram) SignificantFigures() int64 {
return 2
}
// Equals returns true if the two Histograms are equivalent, false if not.
func (h *Histogram) Equals(other *Histogram) bool {
h.mutex.Lock()
other.mutex.Lock()
defer h.mutex.Unlock()
defer other.mutex.Unlock()
switch {
case
h.used != other.used:
return false
default:
for i := int16(0); i < h.used; i++ {
if h.bvs[i].Compare(&other.bvs[i]) != 0 {
return false
}
if h.bvs[i].count != other.bvs[i].count {
return false
}
}
}
return true
}
func (h *Histogram) CopyAndReset() *Histogram {
h.mutex.Lock()
defer h.mutex.Unlock()
newhist := &Histogram{
allocd: h.allocd,
used: h.used,
bvs: h.bvs,
}
h.allocd = DEFAULT_HIST_SIZE
h.bvs = make([]Bin, DEFAULT_HIST_SIZE)
h.used = 0
return newhist
}
func (h *Histogram) DecStrings() []string {
h.mutex.Lock()
defer h.mutex.Unlock()
out := make([]string, h.used)
for i, bin := range h.bvs[0:h.used] {
var buffer bytes.Buffer
buffer.WriteString("H[")
buffer.WriteString(fmt.Sprintf("%3.1e", bin.Value()))
buffer.WriteString("]=")
buffer.WriteString(fmt.Sprintf("%v", bin.count))
out[i] = buffer.String()
}
return out
}

View File

@ -37,6 +37,10 @@ var (
ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
errTooManyRedirectChecks = errors.New("client: too many redirect checks")
// oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
// that Do() will not retry a request
oneShotCtxValue interface{}
)
var DefaultRequestTimeout = 5 * time.Second
@ -335,6 +339,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
var body []byte
var err error
cerr := &ClusterError{}
isOneShot := ctx.Value(&oneShotCtxValue) != nil
for i := pinned; i < leps+pinned; i++ {
k := i % leps
@ -348,6 +353,9 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
if err == context.Canceled || err == context.DeadlineExceeded {
return nil, nil, err
}
if isOneShot {
return nil, nil, err
}
continue
}
if resp.StatusCode/100 == 5 {
@ -358,6 +366,9 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
default:
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
}
if isOneShot {
return nil, nil, cerr.Errors[0]
}
continue
}
if k != pinned {

View File

@ -337,7 +337,11 @@ func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions
act.Dir = opts.Dir
}
resp, body, err := k.client.Do(ctx, act)
doCtx := ctx
if act.PrevExist == PrevNoExist {
doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
}
resp, body, err := k.client.Do(doCtx, act)
if err != nil {
return nil, err
}
@ -385,7 +389,8 @@ func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOption
act.Recursive = opts.Recursive
}
resp, body, err := k.client.Do(ctx, act)
doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
resp, body, err := k.client.Do(doCtx, act)
if err != nil {
return nil, err
}

View File

@ -23,6 +23,11 @@ import (
)
func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
return purgeFile(dirname, suffix, max, interval, stop, nil)
}
// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil.
func purgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string) <-chan error {
errC := make(chan error, 1)
go func() {
for {
@ -38,6 +43,7 @@ func PurgeFile(dirname string, suffix string, max uint, interval time.Duration,
}
}
sort.Strings(newfnames)
fnames = newfnames
for len(newfnames) > int(max) {
f := path.Join(dirname, newfnames[0])
l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode)
@ -56,6 +62,11 @@ func PurgeFile(dirname string, suffix string, max uint, interval time.Duration,
plog.Infof("purged file %s successfully", f)
newfnames = newfnames[1:]
}
if purgec != nil {
for i := 0; i < len(fnames)-len(newfnames); i++ {
purgec <- fnames[i]
}
}
select {
case <-time.After(interval):
case <-stop:

View File

@ -34,27 +34,30 @@ import (
"github.com/coreos/etcd/pkg/tlsutil"
)
func NewListener(addr string, scheme string, tlscfg *tls.Config) (l net.Listener, err error) {
if scheme == "unix" || scheme == "unixs" {
// unix sockets via unix://laddr
l, err = NewUnixListener(addr)
} else {
l, err = net.Listen("tcp", addr)
}
if err != nil {
func NewListener(addr, scheme string, tlscfg *tls.Config) (l net.Listener, err error) {
if l, err = newListener(addr, scheme); err != nil {
return nil, err
}
return wrapTLS(addr, scheme, tlscfg, l)
}
if scheme == "https" || scheme == "unixs" {
if tlscfg == nil {
return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", scheme+"://"+addr)
}
l = tls.NewListener(l, tlscfg)
func newListener(addr string, scheme string) (net.Listener, error) {
if scheme == "unix" || scheme == "unixs" {
// unix sockets via unix://laddr
return NewUnixListener(addr)
}
return net.Listen("tcp", addr)
}
return l, nil
func wrapTLS(addr, scheme string, tlscfg *tls.Config, l net.Listener) (net.Listener, error) {
if scheme != "https" && scheme != "unixs" {
return l, nil
}
if tlscfg == nil {
l.Close()
return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", scheme+"://"+addr)
}
return tls.NewListener(l, tlscfg), nil
}
type TLSInfo struct {

View File

@ -24,15 +24,19 @@ import (
// If read/write on the accepted connection blocks longer than its time limit,
// it will return timeout error.
func NewTimeoutListener(addr string, scheme string, tlscfg *tls.Config, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) {
ln, err := NewListener(addr, scheme, tlscfg)
ln, err := newListener(addr, scheme)
if err != nil {
return nil, err
}
return &rwTimeoutListener{
ln = &rwTimeoutListener{
Listener: ln,
rdtimeoutd: rdtimeoutd,
wtimeoutd: wtimeoutd,
}, nil
}
if ln, err = wrapTLS(addr, scheme, tlscfg, ln); err != nil {
return nil, err
}
return ln, nil
}
type rwTimeoutListener struct {

View File

@ -34,6 +34,9 @@ func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.
// the timed out connection will timeout soon after it is idle.
// it should not be put back to http transport as an idle connection for future usage.
tr.MaxIdleConnsPerHost = -1
} else {
// allow more idle connections between peers to avoid unncessary port allocation.
tr.MaxIdleConnsPerHost = 1024
}
tr.Dial = (&rwTimeoutDialer{

View File

@ -431,7 +431,7 @@ func strctVal(s interface{}) reflect.Value {
v := reflect.ValueOf(s)
// if pointer get the underlying element≤
if v.Kind() == reflect.Ptr {
for v.Kind() == reflect.Ptr {
v = v.Elem()
}

View File

@ -303,7 +303,7 @@ func (p7 *PKCS7) GetOnlySigner() *x509.Certificate {
}
// ErrUnsupportedAlgorithm tells you when our quick dev assumptions have failed
var ErrUnsupportedAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA, DES, DES-EDE3 and AES-256-CBC supported")
var ErrUnsupportedAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA, DES, DES-EDE3, AES-256-CBC and AES-128-GCM supported")
// ErrNotEncryptedContent is returned when attempting to Decrypt data that is not encrypted data
var ErrNotEncryptedContent = errors.New("pkcs7: content data is a decryptable data type")
@ -333,10 +333,14 @@ func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pk crypto.PrivateKey) ([]byte,
var oidEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7}
var oidEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7}
var oidEncryptionAlgorithmAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42}
var oidEncryptionAlgorithmAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6}
func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) {
alg := eci.ContentEncryptionAlgorithm.Algorithm
if !alg.Equal(oidEncryptionAlgorithmDESCBC) && !alg.Equal(oidEncryptionAlgorithmDESEDE3CBC) && !alg.Equal(oidEncryptionAlgorithmAES256CBC) {
if !alg.Equal(oidEncryptionAlgorithmDESCBC) &&
!alg.Equal(oidEncryptionAlgorithmDESEDE3CBC) &&
!alg.Equal(oidEncryptionAlgorithmAES256CBC) &&
!alg.Equal(oidEncryptionAlgorithmAES128GCM) {
fmt.Printf("Unsupported Content Encryption Algorithm: %s\n", alg)
return nil, ErrUnsupportedAlgorithm
}
@ -371,12 +375,44 @@ func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) {
case alg.Equal(oidEncryptionAlgorithmDESEDE3CBC):
block, err = des.NewTripleDESCipher(key)
case alg.Equal(oidEncryptionAlgorithmAES256CBC):
fallthrough
case alg.Equal(oidEncryptionAlgorithmAES128GCM):
block, err = aes.NewCipher(key)
}
if err != nil {
return nil, err
}
if alg.Equal(oidEncryptionAlgorithmAES128GCM) {
params := aesGCMParameters{}
paramBytes := eci.ContentEncryptionAlgorithm.Parameters.Bytes
_, err := asn1.Unmarshal(paramBytes, &params)
if err != nil {
return nil, err
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return nil, err
}
if len(params.Nonce) != gcm.NonceSize() {
return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect")
}
if params.ICVLen != gcm.Overhead() {
return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect")
}
plaintext, err := gcm.Open(nil, params.Nonce, cyphertext, nil)
if err != nil {
return nil, err
}
return plaintext, nil
}
iv := eci.ContentEncryptionAlgorithm.Parameters.Bytes
if len(iv) != block.BlockSize() {
return nil, errors.New("pkcs7: encryption algorithm parameters are malformed")
@ -696,27 +732,98 @@ func DegenerateCertificate(cert []byte) ([]byte, error) {
return asn1.Marshal(signedContent)
}
// Encrypt creates and returns an envelope data PKCS7 structure with encrypted
// recipient keys for each recipient public key
// TODO(fullsailor): Add support for encrypting content with other algorithms
func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) {
const (
EncryptionAlgorithmDESCBC = iota
EncryptionAlgorithmAES128GCM
)
// ContentEncryptionAlgorithm determines the algorithm used to encrypt the
// plaintext message. Change the value of this variable to change which
// algorithm is used in the Encrypt() function.
var ContentEncryptionAlgorithm = EncryptionAlgorithmDESCBC
// ErrUnsupportedEncryptionAlgorithm is returned when attempting to encrypt
// content with an unsupported algorithm.
var ErrUnsupportedEncryptionAlgorithm = errors.New("pkcs7: cannot encrypt content: only DES-CBC and AES-128-GCM supported")
const nonceSize = 12
type aesGCMParameters struct {
Nonce []byte `asn1:"tag:4"`
ICVLen int
}
func encryptAES128GCM(content []byte) ([]byte, *encryptedContentInfo, error) {
// Create AES key and nonce
key := make([]byte, 16)
nonce := make([]byte, nonceSize)
_, err := rand.Read(key)
if err != nil {
return nil, nil, err
}
_, err = rand.Read(nonce)
if err != nil {
return nil, nil, err
}
// Encrypt content
block, err := aes.NewCipher(key)
if err != nil {
return nil, nil, err
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return nil, nil, err
}
ciphertext := gcm.Seal(nil, nonce, content, nil)
// Prepare ASN.1 Encrypted Content Info
paramSeq := aesGCMParameters{
Nonce: nonce,
ICVLen: gcm.Overhead(),
}
paramBytes, err := asn1.Marshal(paramSeq)
if err != nil {
return nil, nil, err
}
eci := encryptedContentInfo{
ContentType: oidData,
ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{
Algorithm: oidEncryptionAlgorithmAES128GCM,
Parameters: asn1.RawValue{
Tag: asn1.TagSequence,
Bytes: paramBytes,
},
},
EncryptedContent: marshalEncryptedContent(ciphertext),
}
return key, &eci, nil
}
func encryptDESCBC(content []byte) ([]byte, *encryptedContentInfo, error) {
// Create DES key & CBC IV
key := make([]byte, 8)
iv := make([]byte, des.BlockSize)
_, err := rand.Read(key)
if err != nil {
return nil, err
return nil, nil, err
}
_, err = rand.Read(iv)
if err != nil {
return nil, err
return nil, nil, err
}
// Encrypt padded content
block, err := des.NewCipher(key)
if err != nil {
return nil, err
return nil, nil, err
}
mode := cipher.NewCBCEncrypter(block, iv)
plaintext, err := pad(content, mode.BlockSize())
@ -733,6 +840,41 @@ func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) {
EncryptedContent: marshalEncryptedContent(cyphertext),
}
return key, &eci, nil
}
// Encrypt creates and returns an envelope data PKCS7 structure with encrypted
// recipient keys for each recipient public key.
//
// The algorithm used to perform encryption is determined by the current value
// of the global ContentEncryptionAlgorithm package variable. By default, the
// value is EncryptionAlgorithmDESCBC. To use a different algorithm, change the
// value before calling Encrypt(). For example:
//
// ContentEncryptionAlgorithm = EncryptionAlgorithmAES128GCM
//
// TODO(fullsailor): Add support for encrypting content with other algorithms
func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) {
var eci *encryptedContentInfo
var key []byte
var err error
// Apply chosen symmetric encryption method
switch ContentEncryptionAlgorithm {
case EncryptionAlgorithmDESCBC:
key, eci, err = encryptDESCBC(content)
case EncryptionAlgorithmAES128GCM:
key, eci, err = encryptAES128GCM(content)
default:
return nil, ErrUnsupportedEncryptionAlgorithm
}
if err != nil {
return nil, err
}
// Prepare each recipient's encrypted cipher key
recipientInfos := make([]recipientInfo, len(recipients))
for i, recipient := range recipients {
@ -757,7 +899,7 @@ func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) {
// Prepare envelope content
envelope := envelopedData{
EncryptedContentInfo: eci,
EncryptedContentInfo: *eci,
Version: 0,
RecipientInfos: recipientInfos,
}

View File

@ -44,7 +44,7 @@ Please add `-u` flag to update in the future.
### Loading from data sources
A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error.
A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many data sources as you want**. Passing other types will simply return an error.
```go
cfg, err := ini.Load([]byte("raw data"), "filename")
@ -56,7 +56,7 @@ Or start with an empty object:
cfg := ini.Empty()
```
When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later.
When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later.
```go
err := cfg.Append("other file", []byte("other raw data"))
@ -68,6 +68,8 @@ If you have a list of files with possibilities that some of them may not availab
cfg, err := ini.LooseLoad("filename", "filename_404")
```
The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual.
When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing.
```go
@ -83,7 +85,7 @@ key1, err := cfg.GetKey("Key")
key2, err := cfg.GetKey("KeY")
```
The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual.
If you want to give more advanced load options, use `LoadSources` and take a look at [`LoadOptions`](https://github.com/go-ini/ini/blob/v1.16.1/ini.go#L156).
### Working with sections
@ -269,6 +271,16 @@ cfg.Section("advance").Key("two_lines").String() // how about continuation lines
cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
```
Well, I hate continuation lines, how do I disable that?
```go
cfg, err := ini.LoadSources(ini.LoadOptions{
IgnoreContinuation: true,
}, "filename")
```
Holy crap!
Note that single quotes around values will be stripped:
```ini

View File

@ -61,6 +61,8 @@ err := cfg.Append("other file", []byte("other raw data"))
cfg, err := ini.LooseLoad("filename", "filename_404")
```
更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。
有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写:
```go
@ -76,7 +78,7 @@ key1, err := cfg.GetKey("Key")
key2, err := cfg.GetKey("KeY")
```
更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载
如果您想要更加自定义的加载选项,可以使用 `LoadSources` 方法并参见 [`LoadOptions`](https://github.com/go-ini/ini/blob/v1.16.1/ini.go#L156)
### 操作分区Section
@ -262,6 +264,16 @@ cfg.Section("advance").Key("two_lines").String() // how about continuation lines
cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
```
可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢?
```go
cfg, err := ini.LoadSources(ini.LoadOptions{
IgnoreContinuation: true,
}, "filename")
```
哇靠给力啊!
需要注意的是,值两侧的单引号会被自动剔除:
```ini

View File

@ -36,7 +36,7 @@ const (
// Maximum allowed depth when recursively substituing variable names.
_DEPTH_VALUES = 99
_VERSION = "1.16.1"
_VERSION = "1.18.0"
)
// Version returns current package version literal.
@ -154,10 +154,12 @@ func parseDataSource(source interface{}) (dataSource, error) {
}
type LoadOptions struct {
// Loose indicats whether the parser should ignore nonexistent files or return error.
// Loose indicates whether the parser should ignore nonexistent files or return error.
Loose bool
// Insensitive indicates whether the parser forces all section and key names to lowercase.
Insensitive bool
// IgnoreContinuation indicates whether to ignore continuation lines while parsing.
IgnoreContinuation bool
}
func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {

View File

@ -178,7 +178,7 @@ func hasSurroundedQuote(in string, quote byte) bool {
strings.IndexByte(in[1:], quote) == len(in)-2
}
func (p *parser) readValue(in []byte) (string, error) {
func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) {
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
if len(line) == 0 {
return "", nil
@ -205,8 +205,8 @@ func (p *parser) readValue(in []byte) (string, error) {
// Won't be able to reach here if value only contains whitespace.
line = strings.TrimSpace(line)
// Check continuation lines
if line[len(line)-1] == '\\' {
// Check continuation lines when desired.
if !ignoreContinuation && line[len(line)-1] == '\\' {
return p.readContinuationLines(line[:len(line)-1])
}
@ -302,7 +302,7 @@ func (f *File) parse(reader io.Reader) (err error) {
}
key.isAutoIncr = isAutoIncr
value, err := p.readValue(line[offset:])
value, err := p.readValue(line[offset:], f.options.IgnoreContinuation)
if err != nil {
return err
}

View File

@ -76,6 +76,59 @@ func parseDelim(actual string) string {
var reflectTime = reflect.TypeOf(time.Now()).Kind()
// setSliceWithProperType sets proper values to slice based on its type.
func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
strs := key.Strings(delim)
numVals := len(strs)
if numVals == 0 {
return nil
}
var vals interface{}
sliceOf := field.Type().Elem().Kind()
switch sliceOf {
case reflect.String:
vals = strs
case reflect.Int:
vals = key.Ints(delim)
case reflect.Int64:
vals = key.Int64s(delim)
case reflect.Uint:
vals = key.Uints(delim)
case reflect.Uint64:
vals = key.Uint64s(delim)
case reflect.Float64:
vals = key.Float64s(delim)
case reflectTime:
vals = key.Times(delim)
default:
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
}
slice := reflect.MakeSlice(field.Type(), numVals, numVals)
for i := 0; i < numVals; i++ {
switch sliceOf {
case reflect.String:
slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
case reflect.Int:
slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
case reflect.Int64:
slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
case reflect.Uint:
slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
case reflect.Uint64:
slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
case reflect.Float64:
slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
case reflectTime:
slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
}
}
field.Set(slice)
return nil
}
// setWithProperType sets proper value to field based on its type,
// but it does not return error for failing parsing,
// because we want to use default value that is already assigned to strcut.
@ -132,29 +185,7 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
}
field.Set(reflect.ValueOf(timeVal))
case reflect.Slice:
vals := key.Strings(delim)
numVals := len(vals)
if numVals == 0 {
return nil
}
sliceOf := field.Type().Elem().Kind()
var times []time.Time
if sliceOf == reflectTime {
times = key.Times(delim)
}
slice := reflect.MakeSlice(field.Type(), numVals, numVals)
for i := 0; i < numVals; i++ {
switch sliceOf {
case reflectTime:
slice.Index(i).Set(reflect.ValueOf(times[i]))
default:
slice.Index(i).Set(reflect.ValueOf(vals[i]))
}
}
field.Set(slice)
return setSliceWithProperType(key, field, delim)
default:
return fmt.Errorf("unsupported type '%s'", t)
}
@ -239,34 +270,53 @@ func MapTo(v, source interface{}, others ...interface{}) error {
return MapToWithMapper(v, nil, source, others...)
}
// reflectWithProperType does the opposite thing with setWithProperType.
// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error {
slice := field.Slice(0, field.Len())
if field.Len() == 0 {
return nil
}
var buf bytes.Buffer
sliceOf := field.Type().Elem().Kind()
for i := 0; i < field.Len(); i++ {
switch sliceOf {
case reflect.String:
buf.WriteString(slice.Index(i).String())
case reflect.Int, reflect.Int64:
buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
case reflect.Uint, reflect.Uint64:
buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
case reflect.Float64:
buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
case reflectTime:
buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
default:
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
}
buf.WriteString(delim)
}
key.SetValue(buf.String()[:buf.Len()-1])
return nil
}
// reflectWithProperType does the opposite thing as setWithProperType.
func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
switch t.Kind() {
case reflect.String:
key.SetValue(field.String())
case reflect.Bool,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Float64,
reflectTime:
key.SetValue(fmt.Sprint(field))
case reflect.Bool:
key.SetValue(fmt.Sprint(field.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
key.SetValue(fmt.Sprint(field.Int()))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
key.SetValue(fmt.Sprint(field.Uint()))
case reflect.Float32, reflect.Float64:
key.SetValue(fmt.Sprint(field.Float()))
case reflectTime:
key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
case reflect.Slice:
vals := field.Slice(0, field.Len())
if field.Len() == 0 {
return nil
}
var buf bytes.Buffer
isTime := fmt.Sprint(field.Type()) == "[]time.Time"
for i := 0; i < field.Len(); i++ {
if isTime {
buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339))
} else {
buf.WriteString(fmt.Sprint(vals.Index(i)))
}
buf.WriteString(delim)
}
key.SetValue(buf.String()[:buf.Len()-1])
return reflectSliceWithProperType(key, field, delim)
default:
return fmt.Errorf("unsupported type '%s'", t)
}
@ -294,7 +344,7 @@ func (s *Section) reflectFrom(val reflect.Value) error {
}
if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
(tpField.Type.Kind() == reflect.Struct) {
(tpField.Type.Kind() == reflect.Struct && tpField.Type.Kind() != reflectTime) {
// Note: The only error here is section doesn't exist.
sec, err := s.f.GetSection(fieldName)
if err != nil {

42
vendor/github.com/go-ldap/ldap/Makefile generated vendored Normal file
View File

@ -0,0 +1,42 @@
.PHONY: default install build test quicktest fmt vet lint
default: fmt vet lint build quicktest
install:
go get -t -v ./...
build:
go build -v ./...
test:
go test -v -cover ./...
quicktest:
go test ./...
# Capture output and force failure when there is non-empty output
fmt:
@echo gofmt -l .
@OUTPUT=`gofmt -l . 2>&1`; \
if [ "$$OUTPUT" ]; then \
echo "gofmt must be run on the following files:"; \
echo "$$OUTPUT"; \
exit 1; \
fi
# Only run on go1.5+
vet:
go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult .
# https://github.com/golang/lint
# go get github.com/golang/lint/golint
# Capture output and force failure when there is non-empty output
# Only run on go1.5+
lint:
@echo golint ./...
@OUTPUT=`golint ./... 2>&1`; \
if [ "$$OUTPUT" ]; then \
echo "golint errors:"; \
echo "$$OUTPUT"; \
exit 1; \
fi

View File

@ -13,41 +13,39 @@ Import the latest version with:
import "gopkg.in/ldap.v2"
## Required Libraries:
- gopkg.in/asn1-ber.v1
## Working:
## Features:
- Connecting to LDAP server
- Connecting to LDAP server (non-TLS, TLS, STARTTLS)
- Binding to LDAP server
- Searching for entries
- Compiling string filters to LDAP filters
- Filter Compile / Decompile
- Paging Search Results
- Modify Requests / Responses
- Add Requests / Responses
- Delete Requests / Responses
- Better Unicode support
## Examples:
- search
- modify
## Tests Implemented:
## Contributing:
- Filter Compile / Decompile
## TODO:
- [x] Add Requests / Responses
- [x] Delete Requests / Responses
- [x] Modify DN Requests / Responses
- [ ] Compare Requests / Responses
- [ ] Implement Tests / Benchmarks
Bug reports and pull requests are welcome!
Before submitting a pull request, please make sure tests and verification scripts pass:
```
make all
```
To set up a pre-push hook to run the tests and verify scripts before pushing:
```
ln -s ../../.githooks/pre-push .git/hooks/pre-push
```
---
The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/)

View File

@ -16,8 +16,11 @@ import (
"gopkg.in/asn1-ber.v1"
)
// Attribute represents an LDAP attribute
type Attribute struct {
// Type is the name of the LDAP attribute
Type string
// Vals are the LDAP attribute values
Vals []string
}
@ -32,8 +35,11 @@ func (a *Attribute) encode() *ber.Packet {
return seq
}
// AddRequest represents an LDAP AddRequest operation
type AddRequest struct {
DN string
// DN identifies the entry being added
DN string
// Attributes list the attributes of the new entry
Attributes []Attribute
}
@ -48,10 +54,12 @@ func (a AddRequest) encode() *ber.Packet {
return request
}
// Attribute adds an attribute with the given type and values
func (a *AddRequest) Attribute(attrType string, attrVals []string) {
a.Attributes = append(a.Attributes, Attribute{Type: attrType, Vals: attrVals})
}
// NewAddRequest returns an AddRequest for the given DN, with no attributes
func NewAddRequest(dn string) *AddRequest {
return &AddRequest{
DN: dn,
@ -59,6 +67,7 @@ func NewAddRequest(dn string) *AddRequest {
}
// Add performs the given AddRequest
func (l *Conn) Add(addRequest *AddRequest) error {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))

View File

@ -10,16 +10,22 @@ import (
"gopkg.in/asn1-ber.v1"
)
// SimpleBindRequest represents a username/password bind operation
type SimpleBindRequest struct {
// Username is the name of the Directory object that the client wishes to bind as
Username string
// Password is the credentials to bind with
Password string
// Controls are optional controls to send with the bind request
Controls []Control
}
// SimpleBindResult contains the response from the server
type SimpleBindResult struct {
Controls []Control
}
// NewSimpleBindRequest returns a bind request
func NewSimpleBindRequest(username string, password string, controls []Control) *SimpleBindRequest {
return &SimpleBindRequest{
Username: username,
@ -39,6 +45,7 @@ func (bindRequest *SimpleBindRequest) encode() *ber.Packet {
return request
}
// SimpleBind performs the simple bind operation defined in the given request
func (l *Conn) SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))
@ -90,6 +97,7 @@ func (l *Conn) SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResu
return result, nil
}
// Bind performs a bind with the given username and password
func (l *Conn) Bind(username, password string) error {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))

View File

@ -17,18 +17,27 @@ import (
)
const (
MessageQuit = 0
MessageRequest = 1
// MessageQuit causes the processMessages loop to exit
MessageQuit = 0
// MessageRequest sends a request to the server
MessageRequest = 1
// MessageResponse receives a response from the server
MessageResponse = 2
MessageFinish = 3
MessageTimeout = 4
// MessageFinish indicates the client considers a particular message ID to be finished
MessageFinish = 3
// MessageTimeout indicates the client-specified timeout for a particular message ID has been reached
MessageTimeout = 4
)
// PacketResponse contains the packet or error encountered reading a response
type PacketResponse struct {
// Packet is the packet read from the server
Packet *ber.Packet
Error error
// Error is an error encountered while reading
Error error
}
// ReadPacket returns the packet or an error
func (pr *PacketResponse) ReadPacket() (*ber.Packet, error) {
if (pr == nil) || (pr.Packet == nil && pr.Error == nil) {
return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve response"))
@ -37,8 +46,10 @@ func (pr *PacketResponse) ReadPacket() (*ber.Packet, error) {
}
type messageContext struct {
id int64
done chan struct{}
id int64
// close(done) should only be called from finishMessage()
done chan struct{}
// close(responses) should only be called from processMessages(), and only sent to from sendResponse()
responses chan *PacketResponse
}
@ -140,6 +151,7 @@ func NewConn(conn net.Conn, isTLS bool) *Conn {
}
}
// Start initializes goroutines to read responses and process messages
func (l *Conn) Start() {
go l.reader()
go l.processMessages()
@ -167,7 +179,7 @@ func (l *Conn) Close() {
l.wgClose.Wait()
}
// Sets the time after a request is sent that a MessageTimeout triggers
// SetTimeout sets the time after a request is sent that a MessageTimeout triggers
func (l *Conn) SetTimeout(timeout time.Duration) {
if timeout > 0 {
l.requestTimeout = timeout
@ -253,15 +265,14 @@ func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags)
l.Debug.Printf("flags&startTLS = %d", flags&startTLS)
if l.isStartingTLS {
l.messageMutex.Unlock()
return nil, NewError(ErrorNetwork, errors.New("ldap: connection is in startls phase."))
return nil, NewError(ErrorNetwork, errors.New("ldap: connection is in startls phase"))
}
if flags&startTLS != 0 {
if l.outstandingRequests != 0 {
l.messageMutex.Unlock()
return nil, NewError(ErrorNetwork, errors.New("ldap: cannot StartTLS with outstanding requests"))
} else {
l.isStartingTLS = true
}
l.isStartingTLS = true
}
l.outstandingRequests++

View File

@ -12,35 +12,48 @@ import (
)
const (
ControlTypePaging = "1.2.840.113556.1.4.319"
ControlTypeBeheraPasswordPolicy = "1.3.6.1.4.1.42.2.27.8.5.1"
// ControlTypePaging - https://www.ietf.org/rfc/rfc2696.txt
ControlTypePaging = "1.2.840.113556.1.4.319"
// ControlTypeBeheraPasswordPolicy - https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
ControlTypeBeheraPasswordPolicy = "1.3.6.1.4.1.42.2.27.8.5.1"
// ControlTypeVChuPasswordMustChange - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
ControlTypeVChuPasswordMustChange = "2.16.840.1.113730.3.4.4"
ControlTypeVChuPasswordWarning = "2.16.840.1.113730.3.4.5"
ControlTypeManageDsaIT = "2.16.840.1.113730.3.4.2"
// ControlTypeVChuPasswordWarning - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
ControlTypeVChuPasswordWarning = "2.16.840.1.113730.3.4.5"
// ControlTypeManageDsaIT - https://tools.ietf.org/html/rfc3296
ControlTypeManageDsaIT = "2.16.840.1.113730.3.4.2"
)
// ControlTypeMap maps controls to text descriptions
var ControlTypeMap = map[string]string{
ControlTypePaging: "Paging",
ControlTypeBeheraPasswordPolicy: "Password Policy - Behera Draft",
ControlTypeManageDsaIT: "Manage DSA IT",
}
// Control defines an interface controls provide to encode and describe themselves
type Control interface {
// GetControlType returns the OID
GetControlType() string
// Encode returns the ber packet representation
Encode() *ber.Packet
// String returns a human-readable description
String() string
}
// ControlString implements the Control interface for simple controls
type ControlString struct {
ControlType string
Criticality bool
ControlValue string
}
// GetControlType returns the OID
func (c *ControlString) GetControlType() string {
return c.ControlType
}
// Encode returns the ber packet representation
func (c *ControlString) Encode() *ber.Packet {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, c.ControlType, "Control Type ("+ControlTypeMap[c.ControlType]+")"))
@ -51,19 +64,25 @@ func (c *ControlString) Encode() *ber.Packet {
return packet
}
// String returns a human-readable description
func (c *ControlString) String() string {
return fmt.Sprintf("Control Type: %s (%q) Criticality: %t Control Value: %s", ControlTypeMap[c.ControlType], c.ControlType, c.Criticality, c.ControlValue)
}
// ControlPaging implements the paging control described in https://www.ietf.org/rfc/rfc2696.txt
type ControlPaging struct {
// PagingSize indicates the page size
PagingSize uint32
Cookie []byte
// Cookie is an opaque value returned by the server to track a paging cursor
Cookie []byte
}
// GetControlType returns the OID
func (c *ControlPaging) GetControlType() string {
return ControlTypePaging
}
// Encode returns the ber packet representation
func (c *ControlPaging) Encode() *ber.Packet {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypePaging, "Control Type ("+ControlTypeMap[ControlTypePaging]+")"))
@ -81,6 +100,7 @@ func (c *ControlPaging) Encode() *ber.Packet {
return packet
}
// String returns a human-readable description
func (c *ControlPaging) String() string {
return fmt.Sprintf(
"Control Type: %s (%q) Criticality: %t PagingSize: %d Cookie: %q",
@ -91,21 +111,29 @@ func (c *ControlPaging) String() string {
c.Cookie)
}
// SetCookie stores the given cookie in the paging control
func (c *ControlPaging) SetCookie(cookie []byte) {
c.Cookie = cookie
}
// ControlBeheraPasswordPolicy implements the control described in https://tools.ietf.org/html/draft-behera-ldap-password-policy-10
type ControlBeheraPasswordPolicy struct {
Expire int64
Grace int64
Error int8
// Expire contains the number of seconds before a password will expire
Expire int64
// Grace indicates the remaining number of times a user will be allowed to authenticate with an expired password
Grace int64
// Error indicates the error code
Error int8
// ErrorString is a human readable error
ErrorString string
}
// GetControlType returns the OID
func (c *ControlBeheraPasswordPolicy) GetControlType() string {
return ControlTypeBeheraPasswordPolicy
}
// Encode returns the ber packet representation
func (c *ControlBeheraPasswordPolicy) Encode() *ber.Packet {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeBeheraPasswordPolicy, "Control Type ("+ControlTypeMap[ControlTypeBeheraPasswordPolicy]+")"))
@ -113,6 +141,7 @@ func (c *ControlBeheraPasswordPolicy) Encode() *ber.Packet {
return packet
}
// String returns a human-readable description
func (c *ControlBeheraPasswordPolicy) String() string {
return fmt.Sprintf(
"Control Type: %s (%q) Criticality: %t Expire: %d Grace: %d Error: %d, ErrorString: %s",
@ -125,39 +154,49 @@ func (c *ControlBeheraPasswordPolicy) String() string {
c.ErrorString)
}
// ControlVChuPasswordMustChange implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
type ControlVChuPasswordMustChange struct {
// MustChange indicates if the password is required to be changed
MustChange bool
}
// GetControlType returns the OID
func (c *ControlVChuPasswordMustChange) GetControlType() string {
return ControlTypeVChuPasswordMustChange
}
// Encode returns the ber packet representation
func (c *ControlVChuPasswordMustChange) Encode() *ber.Packet {
return nil
}
// String returns a human-readable description
func (c *ControlVChuPasswordMustChange) String() string {
return fmt.Sprintf(
"Control Type: %s (%q) Criticality: %t MustChange: %b",
"Control Type: %s (%q) Criticality: %t MustChange: %v",
ControlTypeMap[ControlTypeVChuPasswordMustChange],
ControlTypeVChuPasswordMustChange,
false,
c.MustChange)
}
// ControlVChuPasswordWarning implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00
type ControlVChuPasswordWarning struct {
// Expire indicates the time in seconds until the password expires
Expire int64
}
// GetControlType returns the OID
func (c *ControlVChuPasswordWarning) GetControlType() string {
return ControlTypeVChuPasswordWarning
}
// Encode returns the ber packet representation
func (c *ControlVChuPasswordWarning) Encode() *ber.Packet {
return nil
}
// String returns a human-readable description
func (c *ControlVChuPasswordWarning) String() string {
return fmt.Sprintf(
"Control Type: %s (%q) Criticality: %t Expire: %b",
@ -167,14 +206,18 @@ func (c *ControlVChuPasswordWarning) String() string {
c.Expire)
}
// ControlManageDsaIT implements the control described in https://tools.ietf.org/html/rfc3296
type ControlManageDsaIT struct {
// Criticality indicates if this control is required
Criticality bool
}
// GetControlType returns the OID
func (c *ControlManageDsaIT) GetControlType() string {
return ControlTypeManageDsaIT
}
// Encode returns the ber packet representation
func (c *ControlManageDsaIT) Encode() *ber.Packet {
//FIXME
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control")
@ -185,6 +228,7 @@ func (c *ControlManageDsaIT) Encode() *ber.Packet {
return packet
}
// String returns a human-readable description
func (c *ControlManageDsaIT) String() string {
return fmt.Sprintf(
"Control Type: %s (%q) Criticality: %t",
@ -193,10 +237,12 @@ func (c *ControlManageDsaIT) String() string {
c.Criticality)
}
// NewControlManageDsaIT returns a ControlManageDsaIT control
func NewControlManageDsaIT(Criticality bool) *ControlManageDsaIT {
return &ControlManageDsaIT{Criticality: Criticality}
}
// FindControl returns the first control of the given type in the list, or nil
func FindControl(controls []Control, controlType string) Control {
for _, c := range controls {
if c.GetControlType() == controlType {
@ -206,6 +252,7 @@ func FindControl(controls []Control, controlType string) Control {
return nil
}
// DecodeControl returns a control read from the given packet, or nil if no recognized control can be made
func DecodeControl(packet *ber.Packet) Control {
ControlType := packet.Children[0].Value.(string)
Criticality := false
@ -303,6 +350,7 @@ func DecodeControl(packet *ber.Packet) Control {
return c
}
// NewControlString returns a generic control
func NewControlString(controlType string, criticality bool, controlValue string) *ControlString {
return &ControlString{
ControlType: controlType,
@ -311,10 +359,12 @@ func NewControlString(controlType string, criticality bool, controlValue string)
}
}
// NewControlPaging returns a paging control
func NewControlPaging(pagingSize uint32) *ControlPaging {
return &ControlPaging{PagingSize: pagingSize}
}
// NewControlBeheraPasswordPolicy returns a ControlBeheraPasswordPolicy
func NewControlBeheraPasswordPolicy() *ControlBeheraPasswordPolicy {
return &ControlBeheraPasswordPolicy{
Expire: -1,

View File

@ -12,8 +12,11 @@ import (
"gopkg.in/asn1-ber.v1"
)
// DelRequest implements an LDAP deletion request
type DelRequest struct {
DN string
// DN is the name of the directory entry to delete
DN string
// Controls hold optional controls to send with the request
Controls []Control
}
@ -23,6 +26,7 @@ func (d DelRequest) encode() *ber.Packet {
return request
}
// NewDelRequest creates a delete request for the given DN and controls
func NewDelRequest(DN string,
Controls []Control) *DelRequest {
return &DelRequest{
@ -31,6 +35,7 @@ func NewDelRequest(DN string,
}
}
// Del executes the given delete request
func (l *Conn) Del(delRequest *DelRequest) error {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))

21
vendor/github.com/go-ldap/ldap/dn.go generated vendored
View File

@ -55,19 +55,25 @@ import (
ber "gopkg.in/asn1-ber.v1"
)
// AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514
type AttributeTypeAndValue struct {
Type string
// Type is the attribute type
Type string
// Value is the attribute value
Value string
}
// RelativeDN represents a relativeDistinguishedName from https://tools.ietf.org/html/rfc4514
type RelativeDN struct {
Attributes []*AttributeTypeAndValue
}
// DN represents a distinguishedName from https://tools.ietf.org/html/rfc4514
type DN struct {
RDNs []*RelativeDN
}
// ParseDN returns a distinguishedName or an error
func ParseDN(str string) (*DN, error) {
dn := new(DN)
dn.RDNs = make([]*RelativeDN, 0)
@ -94,11 +100,9 @@ func ParseDN(str string) (*DN, error) {
dst := []byte{0}
n, err := enchex.Decode([]byte(dst), []byte(str[i:i+2]))
if err != nil {
return nil, errors.New(
fmt.Sprintf("Failed to decode escaped character: %s", err))
return nil, fmt.Errorf("Failed to decode escaped character: %s", err)
} else if n != 1 {
return nil, errors.New(
fmt.Sprintf("Expected 1 byte when un-escaping, got %d", n))
return nil, fmt.Errorf("Expected 1 byte when un-escaping, got %d", n)
}
buffer.WriteByte(dst[0])
i++
@ -119,12 +123,11 @@ func ParseDN(str string) (*DN, error) {
} else {
data = str[i:]
}
raw_ber, err := enchex.DecodeString(data)
rawBER, err := enchex.DecodeString(data)
if err != nil {
return nil, errors.New(
fmt.Sprintf("Failed to decode BER encoding: %s", err))
return nil, fmt.Errorf("Failed to decode BER encoding: %s", err)
}
packet := ber.DecodePacket(raw_ber)
packet := ber.DecodePacket(rawBER)
buffer.WriteString(packet.Data.String())
i += len(data) - 1
}

View File

@ -56,6 +56,7 @@ const (
ErrorUnexpectedResponse = 205
)
// LDAPResultCodeMap contains string descriptions for LDAP error codes
var LDAPResultCodeMap = map[uint8]string{
LDAPResultSuccess: "Success",
LDAPResultOperationsError: "Operations Error",
@ -115,8 +116,11 @@ func getLDAPResultCode(packet *ber.Packet) (code uint8, description string) {
return ErrorNetwork, "Invalid packet format"
}
// Error holds LDAP error information
type Error struct {
Err error
// Err is the underlying error
Err error
// ResultCode is the LDAP error code
ResultCode uint8
}
@ -124,10 +128,12 @@ func (e *Error) Error() string {
return fmt.Sprintf("LDAP Result Code %d %q: %s", e.ResultCode, LDAPResultCodeMap[e.ResultCode], e.Err.Error())
}
// NewError creates an LDAP error with the given code and underlying error
func NewError(resultCode uint8, err error) error {
return &Error{ResultCode: resultCode, Err: err}
}
// IsErrorWithCode returns true if the given error is an LDAP error with the given result code
func IsErrorWithCode(err error, desiredResultCode uint8) bool {
if err == nil {
return false

View File

@ -15,6 +15,7 @@ import (
"gopkg.in/asn1-ber.v1"
)
// Filter choices
const (
FilterAnd = 0
FilterOr = 1
@ -28,6 +29,7 @@ const (
FilterExtensibleMatch = 9
)
// FilterMap contains human readable descriptions of Filter choices
var FilterMap = map[uint64]string{
FilterAnd: "And",
FilterOr: "Or",
@ -41,18 +43,21 @@ var FilterMap = map[uint64]string{
FilterExtensibleMatch: "Extensible Match",
}
// SubstringFilter options
const (
FilterSubstringsInitial = 0
FilterSubstringsAny = 1
FilterSubstringsFinal = 2
)
// FilterSubstringsMap contains human readable descriptions of SubstringFilter choices
var FilterSubstringsMap = map[uint64]string{
FilterSubstringsInitial: "Substrings Initial",
FilterSubstringsAny: "Substrings Any",
FilterSubstringsFinal: "Substrings Final",
}
// MatchingRuleAssertion choices
const (
MatchingRuleAssertionMatchingRule = 1
MatchingRuleAssertionType = 2
@ -60,6 +65,7 @@ const (
MatchingRuleAssertionDNAttributes = 4
)
// MatchingRuleAssertionMap contains human readable descriptions of MatchingRuleAssertion choices
var MatchingRuleAssertionMap = map[uint64]string{
MatchingRuleAssertionMatchingRule: "Matching Rule Assertion Matching Rule",
MatchingRuleAssertionType: "Matching Rule Assertion Type",
@ -67,6 +73,7 @@ var MatchingRuleAssertionMap = map[uint64]string{
MatchingRuleAssertionDNAttributes: "Matching Rule Assertion DN Attributes",
}
// CompileFilter converts a string representation of a filter into a BER-encoded packet
func CompileFilter(filter string) (*ber.Packet, error) {
if len(filter) == 0 || filter[0] != '(' {
return nil, NewError(ErrorFilterCompile, errors.New("ldap: filter does not start with an '('"))
@ -81,6 +88,7 @@ func CompileFilter(filter string) (*ber.Packet, error) {
return packet, nil
}
// DecompileFilter converts a packet representation of a filter into a string representation
func DecompileFilter(packet *ber.Packet) (ret string, err error) {
defer func() {
if r := recover(); r != nil {
@ -239,11 +247,13 @@ func compileFilter(filter string, pos int) (*ber.Packet, int, error) {
packet.AppendChild(child)
return packet, newPos, err
default:
READING_ATTR := 0
READING_EXTENSIBLE_MATCHING_RULE := 1
READING_CONDITION := 2
const (
stateReadingAttr = 0
stateReadingExtensibleMatchingRule = 1
stateReadingCondition = 2
)
state := READING_ATTR
state := stateReadingAttr
attribute := ""
extensibleDNAttributes := false
@ -261,56 +271,56 @@ func compileFilter(filter string, pos int) (*ber.Packet, int, error) {
}
switch state {
case READING_ATTR:
case stateReadingAttr:
switch {
// Extensible rule, with only DN-matching
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:="):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
extensibleDNAttributes = true
state = READING_CONDITION
state = stateReadingCondition
newPos += 5
// Extensible rule, with DN-matching and a matching OID
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:"):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
extensibleDNAttributes = true
state = READING_EXTENSIBLE_MATCHING_RULE
state = stateReadingExtensibleMatchingRule
newPos += 4
// Extensible rule, with attr only
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
state = READING_CONDITION
state = stateReadingCondition
newPos += 2
// Extensible rule, with no DN attribute matching
case currentRune == ':':
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch])
state = READING_EXTENSIBLE_MATCHING_RULE
newPos += 1
state = stateReadingExtensibleMatchingRule
newPos++
// Equality condition
case currentRune == '=':
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterEqualityMatch, nil, FilterMap[FilterEqualityMatch])
state = READING_CONDITION
newPos += 1
state = stateReadingCondition
newPos++
// Greater-than or equal
case currentRune == '>' && strings.HasPrefix(remainingFilter, ">="):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterGreaterOrEqual, nil, FilterMap[FilterGreaterOrEqual])
state = READING_CONDITION
state = stateReadingCondition
newPos += 2
// Less-than or equal
case currentRune == '<' && strings.HasPrefix(remainingFilter, "<="):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterLessOrEqual, nil, FilterMap[FilterLessOrEqual])
state = READING_CONDITION
state = stateReadingCondition
newPos += 2
// Approx
case currentRune == '~' && strings.HasPrefix(remainingFilter, "~="):
packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterApproxMatch, nil, FilterMap[FilterApproxMatch])
state = READING_CONDITION
state = stateReadingCondition
newPos += 2
// Still reading the attribute name
@ -319,12 +329,12 @@ func compileFilter(filter string, pos int) (*ber.Packet, int, error) {
newPos += currentWidth
}
case READING_EXTENSIBLE_MATCHING_RULE:
case stateReadingExtensibleMatchingRule:
switch {
// Matching rule OID is done
case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="):
state = READING_CONDITION
state = stateReadingCondition
newPos += 2
// Still reading the matching rule oid
@ -333,7 +343,7 @@ func compileFilter(filter string, pos int) (*ber.Packet, int, error) {
newPos += currentWidth
}
case READING_CONDITION:
case stateReadingCondition:
// append to the condition
condition += fmt.Sprintf("%c", currentRune)
newPos += currentWidth
@ -369,9 +379,9 @@ func compileFilter(filter string, pos int) (*ber.Packet, int, error) {
}
// Add the value (only required child)
encodedString, err := escapedStringToEncodedBytes(condition)
if err != nil {
return packet, newPos, err
encodedString, encodeErr := escapedStringToEncodedBytes(condition)
if encodeErr != nil {
return packet, newPos, encodeErr
}
packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchValue, encodedString, MatchingRuleAssertionMap[MatchingRuleAssertionMatchValue]))
@ -401,17 +411,17 @@ func compileFilter(filter string, pos int) (*ber.Packet, int, error) {
default:
tag = FilterSubstringsAny
}
encodedString, err := escapedStringToEncodedBytes(part)
if err != nil {
return packet, newPos, err
encodedString, encodeErr := escapedStringToEncodedBytes(part)
if encodeErr != nil {
return packet, newPos, encodeErr
}
seq.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, tag, encodedString, FilterSubstringsMap[uint64(tag)]))
}
packet.AppendChild(seq)
default:
encodedString, err := escapedStringToEncodedBytes(condition)
if err != nil {
return packet, newPos, err
encodedString, encodeErr := escapedStringToEncodedBytes(condition)
if encodeErr != nil {
return packet, newPos, encodeErr
}
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute"))
packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, encodedString, "Condition"))
@ -440,12 +450,12 @@ func escapedStringToEncodedBytes(escapedString string) (string, error) {
if i+2 > len(escapedString) {
return "", NewError(ErrorFilterCompile, errors.New("ldap: missing characters for escape in filter"))
}
if escByte, decodeErr := hexpac.DecodeString(escapedString[i+1 : i+3]); decodeErr != nil {
escByte, decodeErr := hexpac.DecodeString(escapedString[i+1 : i+3])
if decodeErr != nil {
return "", NewError(ErrorFilterCompile, errors.New("ldap: invalid characters for escape in filter"))
} else {
buffer.WriteByte(escByte[0])
i += 2 // +1 from end of loop, so 3 total for \xx.
}
buffer.WriteByte(escByte[0])
i += 2 // +1 from end of loop, so 3 total for \xx.
} else {
buffer.WriteRune(currentRune)
}

View File

@ -36,6 +36,7 @@ const (
ApplicationExtendedResponse = 24
)
// ApplicationMap contains human readable descriptions of LDAP Application Codes
var ApplicationMap = map[uint8]string{
ApplicationBindRequest: "Bind Request",
ApplicationBindResponse: "Bind Response",
@ -72,6 +73,7 @@ const (
BeheraPasswordInHistory = 8
)
// BeheraPasswordPolicyErrorMap contains human readable descriptions of Behera Password Policy error codes
var BeheraPasswordPolicyErrorMap = map[int8]string{
BeheraPasswordExpired: "Password expired",
BeheraAccountLocked: "Account locked",
@ -237,6 +239,7 @@ func addDefaultLDAPResponseDescriptions(packet *ber.Packet) {
}
}
// DebugBinaryFile reads and prints packets from the given filename
func DebugBinaryFile(fileName string) error {
file, err := ioutil.ReadFile(fileName)
if err != nil {

View File

@ -36,14 +36,18 @@ import (
"gopkg.in/asn1-ber.v1"
)
// Change operation choices
const (
AddAttribute = 0
DeleteAttribute = 1
ReplaceAttribute = 2
)
// PartialAttribute for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
type PartialAttribute struct {
// Type is the type of the partial attribute
Type string
// Vals are the values of the partial attribute
Vals []string
}
@ -58,21 +62,29 @@ func (p *PartialAttribute) encode() *ber.Packet {
return seq
}
// ModifyRequest as defined in https://tools.ietf.org/html/rfc4511
type ModifyRequest struct {
DN string
AddAttributes []PartialAttribute
DeleteAttributes []PartialAttribute
// DN is the distinguishedName of the directory entry to modify
DN string
// AddAttributes contain the attributes to add
AddAttributes []PartialAttribute
// DeleteAttributes contain the attributes to delete
DeleteAttributes []PartialAttribute
// ReplaceAttributes contain the attributes to replace
ReplaceAttributes []PartialAttribute
}
// Add inserts the given attribute to the list of attributes to add
func (m *ModifyRequest) Add(attrType string, attrVals []string) {
m.AddAttributes = append(m.AddAttributes, PartialAttribute{Type: attrType, Vals: attrVals})
}
// Delete inserts the given attribute to the list of attributes to delete
func (m *ModifyRequest) Delete(attrType string, attrVals []string) {
m.DeleteAttributes = append(m.DeleteAttributes, PartialAttribute{Type: attrType, Vals: attrVals})
}
// Replace inserts the given attribute to the list of attributes to replace
func (m *ModifyRequest) Replace(attrType string, attrVals []string) {
m.ReplaceAttributes = append(m.ReplaceAttributes, PartialAttribute{Type: attrType, Vals: attrVals})
}
@ -103,6 +115,7 @@ func (m ModifyRequest) encode() *ber.Packet {
return request
}
// NewModifyRequest creates a modify request for the given DN
func NewModifyRequest(
dn string,
) *ModifyRequest {
@ -111,6 +124,7 @@ func NewModifyRequest(
}
}
// Modify performs the ModifyRequest
func (l *Conn) Modify(modifyRequest *ModifyRequest) error {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))

View File

@ -16,13 +16,21 @@ const (
passwordModifyOID = "1.3.6.1.4.1.4203.1.11.1"
)
// PasswordModifyRequest implements the Password Modify Extended Operation as defined in https://www.ietf.org/rfc/rfc3062.txt
type PasswordModifyRequest struct {
// UserIdentity is an optional string representation of the user associated with the request.
// This string may or may not be an LDAPDN [RFC2253].
// If no UserIdentity field is present, the request acts up upon the password of the user currently associated with the LDAP session
UserIdentity string
OldPassword string
NewPassword string
// OldPassword, if present, contains the user's current password
OldPassword string
// NewPassword, if present, contains the desired password for this user
NewPassword string
}
// PasswordModifyResult holds the server response to a PasswordModifyRequest
type PasswordModifyResult struct {
// GeneratedPassword holds a password generated by the server, if present
GeneratedPassword string
}
@ -47,7 +55,7 @@ func (r *PasswordModifyRequest) encode() (*ber.Packet, error) {
return request, nil
}
// Create a new PasswordModifyRequest
// NewPasswordModifyRequest creates a new PasswordModifyRequest
//
// According to the RFC 3602:
// userIdentity is a string representing the user associated with the request.
@ -72,6 +80,7 @@ func NewPasswordModifyRequest(userIdentity string, oldPassword string, newPasswo
}
}
// PasswordModify performs the modification request
func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))

View File

@ -68,18 +68,21 @@ import (
"gopkg.in/asn1-ber.v1"
)
// scope choices
const (
ScopeBaseObject = 0
ScopeSingleLevel = 1
ScopeWholeSubtree = 2
)
// ScopeMap contains human readable descriptions of scope choices
var ScopeMap = map[int]string{
ScopeBaseObject: "Base Object",
ScopeSingleLevel: "Single Level",
ScopeWholeSubtree: "Whole Subtree",
}
// derefAliases
const (
NeverDerefAliases = 0
DerefInSearching = 1
@ -87,6 +90,7 @@ const (
DerefAlways = 3
)
// DerefMap contains human readable descriptions of derefAliases choices
var DerefMap = map[int]string{
NeverDerefAliases: "NeverDerefAliases",
DerefInSearching: "DerefInSearching",
@ -114,11 +118,15 @@ func NewEntry(dn string, attributes map[string][]string) *Entry {
}
}
// Entry represents a single search result entry
type Entry struct {
DN string
// DN is the distinguished name of the entry
DN string
// Attributes are the returned attributes for the entry
Attributes []*EntryAttribute
}
// GetAttributeValues returns the values for the named attribute, or an empty list
func (e *Entry) GetAttributeValues(attribute string) []string {
for _, attr := range e.Attributes {
if attr.Name == attribute {
@ -128,6 +136,7 @@ func (e *Entry) GetAttributeValues(attribute string) []string {
return []string{}
}
// GetRawAttributeValues returns the byte values for the named attribute, or an empty list
func (e *Entry) GetRawAttributeValues(attribute string) [][]byte {
for _, attr := range e.Attributes {
if attr.Name == attribute {
@ -137,6 +146,7 @@ func (e *Entry) GetRawAttributeValues(attribute string) [][]byte {
return [][]byte{}
}
// GetAttributeValue returns the first value for the named attribute, or ""
func (e *Entry) GetAttributeValue(attribute string) string {
values := e.GetAttributeValues(attribute)
if len(values) == 0 {
@ -145,6 +155,7 @@ func (e *Entry) GetAttributeValue(attribute string) string {
return values[0]
}
// GetRawAttributeValue returns the first value for the named attribute, or an empty slice
func (e *Entry) GetRawAttributeValue(attribute string) []byte {
values := e.GetRawAttributeValues(attribute)
if len(values) == 0 {
@ -153,6 +164,7 @@ func (e *Entry) GetRawAttributeValue(attribute string) []byte {
return values[0]
}
// Print outputs a human-readable description
func (e *Entry) Print() {
fmt.Printf("DN: %s\n", e.DN)
for _, attr := range e.Attributes {
@ -160,6 +172,7 @@ func (e *Entry) Print() {
}
}
// PrettyPrint outputs a human-readable description indenting
func (e *Entry) PrettyPrint(indent int) {
fmt.Printf("%sDN: %s\n", strings.Repeat(" ", indent), e.DN)
for _, attr := range e.Attributes {
@ -180,38 +193,51 @@ func NewEntryAttribute(name string, values []string) *EntryAttribute {
}
}
// EntryAttribute holds a single attribute
type EntryAttribute struct {
Name string
Values []string
// Name is the name of the attribute
Name string
// Values contain the string values of the attribute
Values []string
// ByteValues contain the raw values of the attribute
ByteValues [][]byte
}
// Print outputs a human-readable description
func (e *EntryAttribute) Print() {
fmt.Printf("%s: %s\n", e.Name, e.Values)
}
// PrettyPrint outputs a human-readable description with indenting
func (e *EntryAttribute) PrettyPrint(indent int) {
fmt.Printf("%s%s: %s\n", strings.Repeat(" ", indent), e.Name, e.Values)
}
// SearchResult holds the server's response to a search request
type SearchResult struct {
Entries []*Entry
// Entries are the returned entries
Entries []*Entry
// Referrals are the returned referrals
Referrals []string
Controls []Control
// Controls are the returned controls
Controls []Control
}
// Print outputs a human-readable description
func (s *SearchResult) Print() {
for _, entry := range s.Entries {
entry.Print()
}
}
// PrettyPrint outputs a human-readable description with indenting
func (s *SearchResult) PrettyPrint(indent int) {
for _, entry := range s.Entries {
entry.PrettyPrint(indent)
}
}
// SearchRequest represents a search request to send to the server
type SearchRequest struct {
BaseDN string
Scope int
@ -247,6 +273,7 @@ func (s *SearchRequest) encode() (*ber.Packet, error) {
return request, nil
}
// NewSearchRequest creates a new search request
func NewSearchRequest(
BaseDN string,
Scope, DerefAliases, SizeLimit, TimeLimit int,
@ -341,6 +368,7 @@ func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32)
return searchResult, nil
}
// Search performs the given search request
func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) {
packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request")
packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID"))

View File

@ -69,3 +69,7 @@ Alexander Inozemtsev <alexander.inozemtsev@gmail.com>
Rob McColl <rob@robmccoll.com>; <rmccoll@ionicsecurity.com>
Viktor Tönköl <viktor.toenkoel@motionlogic.de>
Ian Lozinski <ian.lozinski@gmail.com>
Michael Highstead <highstead@gmail.com>
Sarah Brown <esbie.is@gmail.com>
Caleb Doxsey <caleb@datadoghq.com>
Frederic Hemery <frederic.hemery@datadoghq.com>

View File

@ -481,6 +481,13 @@ func (pool *hostConnPool) connect() (err error) {
if err == nil {
break
}
if opErr, isOpErr := err.(*net.OpError); isOpErr {
// if the error is not a temporary error (ex: network unreachable) don't
// retry
if !opErr.Temporary() {
break
}
}
}
if err != nil {

View File

@ -37,6 +37,8 @@ func goType(t TypeInfo) reflect.Type {
return reflect.TypeOf(*new(float64))
case TypeInt:
return reflect.TypeOf(*new(int))
case TypeSmallInt:
return reflect.TypeOf(*new(int16))
case TypeDecimal:
return reflect.TypeOf(*new(*inf.Dec))
case TypeUUID, TypeTimeUUID:

View File

@ -1255,6 +1255,10 @@ func marshalMap(info TypeInfo, value interface{}) ([]byte, error) {
return nil, marshalErrorf("marshal: can not marshal none collection type into map")
}
if value == nil {
return nil, nil
}
rv := reflect.ValueOf(value)
if rv.IsNil() {
return nil, nil

View File

@ -89,13 +89,17 @@ func (r randomPartitioner) Name() string {
return "RandomPartitioner"
}
func (p randomPartitioner) Hash(partitionKey []byte) token {
hash := md5.New()
sum := hash.Sum(partitionKey)
// 2 ** 128
var maxHashInt, _ = new(big.Int).SetString("340282366920938463463374607431768211456", 10)
func (p randomPartitioner) Hash(partitionKey []byte) token {
sum := md5.Sum(partitionKey)
val := new(big.Int)
val = val.SetBytes(sum)
val = val.Abs(val)
val.SetBytes(sum[:])
if sum[0] > 127 {
val.Sub(val, maxHashInt)
val.Abs(val)
}
return (*randomToken)(val)
}

View File

@ -378,6 +378,11 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
wire := int(u & 0x7)
if wire == WireEndGroup {
if is_group {
if required > 0 {
// Not enough information to determine the exact field.
// (See below.)
return &RequiredNotSetError{"{Unknown}"}
}
return nil // input is satisfied
}
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)

View File

@ -96,20 +96,17 @@ func (s *ActivityService) ListRepositoryNotifications(owner, repo string, opt *N
}
type markReadOptions struct {
LastReadAt time.Time `url:"last_read_at,omitempty"`
LastReadAt time.Time `json:"last_read_at,omitempty"`
}
// MarkNotificationsRead marks all notifications up to lastRead as read.
//
// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#mark-as-read
func (s *ActivityService) MarkNotificationsRead(lastRead time.Time) (*Response, error) {
u := fmt.Sprintf("notifications")
u, err := addOptions(u, markReadOptions{lastRead})
if err != nil {
return nil, err
opts := &markReadOptions{
LastReadAt: lastRead,
}
req, err := s.client.NewRequest("PUT", u, nil)
req, err := s.client.NewRequest("PUT", "notifications", opts)
if err != nil {
return nil, err
}
@ -122,13 +119,11 @@ func (s *ActivityService) MarkNotificationsRead(lastRead time.Time) (*Response,
//
// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#mark-notifications-as-read-in-a-repository
func (s *ActivityService) MarkRepositoryNotificationsRead(owner, repo string, lastRead time.Time) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo)
u, err := addOptions(u, markReadOptions{lastRead})
if err != nil {
return nil, err
opts := &markReadOptions{
LastReadAt: lastRead,
}
req, err := s.client.NewRequest("PUT", u, nil)
u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo)
req, err := s.client.NewRequest("PUT", u, opts)
if err != nil {
return nil, err
}

View File

@ -397,3 +397,40 @@ func (s *AuthorizationsService) DeleteGrant(id int) (*Response, error) {
return s.client.Do(req, nil)
}
// Create an impersonation OAuth token.
//
// This requires admin permissions. With the returned Authorization.Token
// you can e.g. create or delete a user's public SSH key. NOTE: creating a
// new token automatically revokes an existing one.
//
// GitHub API docs: https://developer.github.com/enterprise/2.5/v3/users/administration/#create-an-impersonation-oauth-token
func (s *AuthorizationsService) CreateImpersonation(username string, authReq *AuthorizationRequest) (*Authorization, *Response, error) {
u := fmt.Sprintf("admin/users/%v/authorizations", username)
req, err := s.client.NewRequest("POST", u, authReq)
if err != nil {
return nil, nil, err
}
a := new(Authorization)
resp, err := s.client.Do(req, a)
if err != nil {
return nil, resp, err
}
return a, resp, err
}
// Delete an impersonation OAuth token.
//
// NOTE: there can be only one at a time.
//
// GitHub API docs: https://developer.github.com/enterprise/2.5/v3/users/administration/#delete-an-impersonation-oauth-token
func (s *AuthorizationsService) DeleteImpersonation(username string) (*Response, error) {
u := fmt.Sprintf("admin/users/%v/authorizations", username)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
return s.client.Do(req, nil)
}

View File

@ -6,6 +6,10 @@
/*
Package github provides a client for using the GitHub API.
Usage:
import "github.com/google/go-github/github"
Construct a new GitHub client, then use the various services on the client to
access different parts of the GitHub API. For example:
@ -14,7 +18,9 @@ access different parts of the GitHub API. For example:
// list all organizations for user "willnorris"
orgs, _, err := client.Organizations.List("willnorris", nil)
Set optional parameters for an API method by passing an Options object.
Some API methods have optional parameters that can be passed. For example:
client := github.NewClient(nil)
// list recently updated repositories for org "github"
opt := &github.RepositoryListByOrgOptions{Sort: "updated"}
@ -51,18 +57,23 @@ Note that when using an authenticated Client, all calls made by the client will
include the specified OAuth token. Therefore, authenticated clients should
almost never be shared between different users.
See the oauth2 docs for complete instructions on using that library.
For API methods that require HTTP Basic Authentication, use the
BasicAuthTransport.
Rate Limiting
GitHub imposes a rate limit on all API clients. Unauthenticated clients are
GitHub imposes a rate limit on all API clients. Unauthenticated clients are
limited to 60 requests per hour, while authenticated clients can make up to
5,000 requests per hour. To receive the higher rate limit when making calls
5,000 requests per hour. To receive the higher rate limit when making calls
that are not issued on behalf of a user, use the
UnauthenticatedRateLimitedTransport.
The Rate method on a client returns the rate limit information based on the most
recent API call. This is updated on every call, but may be out of date if it's
recent API call. This is updated on every call, but may be out of date if it's
been some time since the last API call and other clients have made subsequent
requests since then. You can always call RateLimits() directly to get the most
requests since then. You can always call RateLimits() directly to get the most
up-to-date rate limit data for the client.
To detect an API rate limit error, you can check if its type is *github.RateLimitError:
@ -79,11 +90,9 @@ Conditional Requests
The GitHub API has good support for conditional requests which will help
prevent you from burning through your rate limit, as well as help speed up your
application. go-github does not handle conditional requests directly, but is
instead designed to work with a caching http.Transport. We recommend using
https://github.com/gregjones/httpcache, which can be used in conjunction with
https://github.com/sourcegraph/apiproxy to provide additional flexibility and
control of caching rules.
application. go-github does not handle conditional requests directly, but is
instead designed to work with a caching http.Transport. We recommend using
https://github.com/gregjones/httpcache for that.
Learn more about GitHub conditional requests at
https://developer.github.com/v3/#conditional-requests.
@ -93,7 +102,7 @@ Creating and Updating Resources
All structs for GitHub resources use pointer values for all non-repeated fields.
This allows distinguishing between unset fields and those set to a zero-value.
Helper functions have been provided to easily create these pointers for string,
bool, and int values. For example:
bool, and int values. For example:
// create a new private repository named "foo"
repo := &github.Repository{
@ -106,11 +115,14 @@ Users who have worked with protocol buffers should find this pattern familiar.
Pagination
All requests for resource collections (repos, pull requests, issues, etc)
All requests for resource collections (repos, pull requests, issues, etc.)
support pagination. Pagination options are described in the
ListOptions struct and passed to the list methods directly or as an
github.ListOptions struct and passed to the list methods directly or as an
embedded type of a more specific list options struct (for example
PullRequestListOptions). Pages information is available via Response struct.
github.PullRequestListOptions). Pages information is available via the
github.Response struct.
client := github.NewClient(nil)
opt := &github.RepositoryListByOrgOptions{
ListOptions: github.ListOptions{PerPage: 10},

View File

@ -73,9 +73,6 @@ const (
// https://developer.github.com/changes/2016-04-04-git-signing-api-preview/
mediaTypeGitSigningPreview = "application/vnd.github.cryptographer-preview+json"
// https://developer.github.com/changes/2016-5-27-multiple-assignees/
mediaTypeMultipleAssigneesPreview = "application/vnd.github.cerberus-preview+json"
// https://developer.github.com/changes/2016-05-23-timeline-preview-api/
mediaTypeTimelinePreview = "application/vnd.github.mockingbird-preview+json"
@ -84,14 +81,15 @@ const (
// https://developer.github.com/changes/2016-04-21-oauth-authorizations-grants-api-preview/
mediaTypeOAuthGrantAuthorizationsPreview = "application/vnd.github.damage-preview+json"
// https://developer.github.com/changes/2016-07-06-github-pages-preiew-api/
mediaTypePagesPreview = "application/vnd.github.mister-fantastic-preview+json"
)
// A Client manages communication with the GitHub API.
type Client struct {
// HTTP client used to communicate with the API.
client *http.Client
// clientMu protects the client during calls that modify the CheckRedirect func.
clientMu sync.Mutex
clientMu sync.Mutex // clientMu protects the client during calls that modify the CheckRedirect func.
client *http.Client // HTTP client used to communicate with the API.
// Base URL for API requests. Defaults to the public GitHub API, but can be
// set to a domain endpoint to use with GitHub Enterprise. BaseURL should
@ -225,9 +223,12 @@ func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Requ
return nil, err
}
req.Header.Add("Accept", mediaTypeV3)
if body != nil {
req.Header.Set("Content-Type", "application/json")
}
req.Header.Set("Accept", mediaTypeV3)
if c.UserAgent != "" {
req.Header.Add("User-Agent", c.UserAgent)
req.Header.Set("User-Agent", c.UserAgent)
}
return req, nil
}
@ -248,12 +249,12 @@ func (c *Client) NewUploadRequest(urlStr string, reader io.Reader, size int64, m
}
req.ContentLength = size
if len(mediaType) == 0 {
if mediaType == "" {
mediaType = defaultMediaType
}
req.Header.Add("Content-Type", mediaType)
req.Header.Add("Accept", mediaTypeV3)
req.Header.Add("User-Agent", c.UserAgent)
req.Header.Set("Content-Type", mediaType)
req.Header.Set("Accept", mediaTypeV3)
req.Header.Set("User-Agent", c.UserAgent)
return req, nil
}
@ -761,7 +762,7 @@ func (t *BasicAuthTransport) RoundTrip(req *http.Request) (*http.Response, error
req = cloneRequest(req) // per RoundTrip contract
req.SetBasicAuth(t.Username, t.Password)
if t.OTP != "" {
req.Header.Add(headerOTP, t.OTP)
req.Header.Set(headerOTP, t.OTP)
}
return t.transport().RoundTrip(req)
}

View File

@ -18,6 +18,7 @@ type IssuesService service
// Issue represents a GitHub issue on a repository.
type Issue struct {
ID *int `json:"id,omitempty"`
Number *int `json:"number,omitempty"`
State *string `json:"state,omitempty"`
Title *string `json:"title,omitempty"`
@ -243,9 +244,6 @@ func (s *IssuesService) Create(owner string, repo string, issue *IssueRequest) (
return nil, nil, err
}
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeMultipleAssigneesPreview)
i := new(Issue)
resp, err := s.client.Do(req, i)
if err != nil {
@ -265,9 +263,6 @@ func (s *IssuesService) Edit(owner string, repo string, number int, issue *Issue
return nil, nil, err
}
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeMultipleAssigneesPreview)
i := new(Issue)
resp, err := s.client.Do(req, i)
if err != nil {

View File

@ -58,9 +58,6 @@ func (s *IssuesService) AddAssignees(owner, repo string, number int, assignees [
return nil, nil, err
}
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeMultipleAssigneesPreview)
issue := &Issue{}
resp, err := s.client.Do(req, issue)
return issue, resp, err
@ -79,9 +76,6 @@ func (s *IssuesService) RemoveAssignees(owner, repo string, number int, assignee
return nil, nil, err
}
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeMultipleAssigneesPreview)
issue := &Issue{}
resp, err := s.client.Do(req, issue)
return issue, resp, err

View File

@ -18,6 +18,7 @@ type PullRequestsService service
// PullRequest represents a GitHub pull request on a repository.
type PullRequest struct {
ID *int `json:"id,omitempty"`
Number *int `json:"number,omitempty"`
State *string `json:"state,omitempty"`
Title *string `json:"title,omitempty"`

View File

@ -13,6 +13,7 @@ type Pages struct {
Status *string `json:"status,omitempty"`
CNAME *string `json:"cname,omitempty"`
Custom404 *bool `json:"custom_404,omitempty"`
HTMLURL *string `json:"html_url,omitempty"`
}
// PagesError represents a build error for a GitHub Pages site.
@ -42,6 +43,9 @@ func (s *RepositoriesService) GetPagesInfo(owner string, repo string) (*Pages, *
return nil, nil, err
}
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypePagesPreview)
site := new(Pages)
resp, err := s.client.Do(req, site)
if err != nil {
@ -88,3 +92,25 @@ func (s *RepositoriesService) GetLatestPagesBuild(owner string, repo string) (*P
return build, resp, err
}
// RequestPageBuild requests a build of a GitHub Pages site without needing to push new commit.
//
// GitHub API docs: https://developer.github.com/v3/repos/pages/#request-a-page-build
func (s *RepositoriesService) RequestPageBuild(owner string, repo string) (*PagesBuild, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo)
req, err := s.client.NewRequest("POST", u, nil)
if err != nil {
return nil, nil, err
}
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypePagesPreview)
build := new(PagesBuild)
resp, err := s.client.Do(req, build)
if err != nil {
return nil, resp, err
}
return build, resp, err
}

View File

@ -25,6 +25,11 @@ type ServiceQuery struct {
// Service is the service to query.
Service string
// Near allows baking in the name of a node to automatically distance-
// sort from. The magic "_agent" value is supported, which sorts near
// the agent which initiated the request by default.
Near string
// Failover controls what we do if there are no healthy nodes in the
// local datacenter.
Failover QueryDatacenterOptions
@ -40,6 +45,17 @@ type ServiceQuery struct {
Tags []string
}
// QueryTemplate carries the arguments for creating a templated query.
type QueryTemplate struct {
// Type specifies the type of the query template. Currently only
// "name_prefix_match" is supported. This field is required.
Type string
// Regexp allows specifying a regex pattern to match against the name
// of the query being executed.
Regexp string
}
// PrepatedQueryDefinition defines a complete prepared query.
type PreparedQueryDefinition struct {
// ID is this UUID-based ID for the query, always generated by Consul.
@ -67,6 +83,11 @@ type PreparedQueryDefinition struct {
// DNS has options that control how the results of this query are
// served over DNS.
DNS QueryDNSOptions
// Template is used to pass through the arguments for creating a
// prepared query with an attached template. If a template is given,
// interpolations are possible in other struct fields.
Template QueryTemplate
}
// PreparedQueryExecuteResponse has the results of executing a query.

View File

@ -86,6 +86,13 @@ func NewRequest(method, url string, body io.ReadSeeker) (*Request, error) {
// consumers.
type RequestLogHook func(*log.Logger, *http.Request, int)
// ResponseLogHook is like RequestLogHook, but allows running a function
// on each HTTP response. This function will be invoked at the end of
// every HTTP request executed, regardless of whether a subsequent retry
// needs to be performed or not. If the response body is read or closed
// from this method, this will affect the response returned from Do().
type ResponseLogHook func(*log.Logger, *http.Response)
// Client is used to make HTTP requests. It adds additional functionality
// like automatic retries to tolerate minor outages.
type Client struct {
@ -99,6 +106,10 @@ type Client struct {
// RequestLogHook allows a user-supplied function to be called
// before each retry.
RequestLogHook RequestLogHook
// ResponseLogHook allows a user-supplied function to be called
// with the response from each HTTP request executed.
ResponseLogHook ResponseLogHook
}
// NewClient creates a new Client with default settings.
@ -138,6 +149,11 @@ func (c *Client) Do(req *Request) (*http.Response, error) {
}
code = resp.StatusCode
// Call the response logger function if provided.
if c.ResponseLogHook != nil {
c.ResponseLogHook(c.Logger, resp)
}
// Check the response code. We retry on 500-range responses to allow
// the server time to recover, as 500's are typically not permanent
// errors and may relate to outages on the server side.

Some files were not shown because too many files have changed in this diff Show More