Bump aws-sdk-go to v1.25.41 (#7458)

This is in support of #7450 and #7924
This commit is contained in:
Joel Thompson 2019-12-16 19:43:00 -05:00 committed by Becca Petrin
parent e953e32c6a
commit ed20dbf4f7
84 changed files with 24539 additions and 2865 deletions

2
go.mod
View File

@ -21,7 +21,7 @@ require (
github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e
github.com/armon/go-radix v1.0.0
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf
github.com/aws/aws-sdk-go v1.19.39
github.com/aws/aws-sdk-go v1.25.41
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0

2
go.sum
View File

@ -70,6 +70,8 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf h1:eg0MeVzs
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.19.39 h1:pIez14zQWSd/TER2Scohm7aCEG2TgoyXSOX6srOKt6o=
github.com/aws/aws-sdk-go v1.19.39/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.25.41 h1:/hj7nZ0586wFqpwjNpzWiUTwtaMgxAZNZKHay80MdXw=
github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=

View File

@ -208,7 +208,7 @@ func (e errorList) Error() string {
// How do we want to handle the array size being zero
if size := len(e); size > 0 {
for i := 0; i < size; i++ {
msg += fmt.Sprintf("%s", e[i].Error())
msg += e[i].Error()
// We check the next index to see if it is within the slice.
// If it is, then we append a newline. We do this, because unit tests
// could be broken with the additional '\n'

View File

@ -70,7 +70,7 @@ func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTer
value = value.FieldByNameFunc(func(name string) bool {
if c == name {
return true
} else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
} else if !caseSensitive && strings.EqualFold(name, c) {
return true
}
return false
@ -185,13 +185,12 @@ func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
// SetValueAtPath sets a value at the case insensitive lexical path inside
// of a structure.
func SetValueAtPath(i interface{}, path string, v interface{}) {
if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
for _, rval := range rvals {
if rval.Kind() == reflect.Ptr && rval.IsNil() {
continue
}
setValue(rval, v)
rvals := rValuesAtPath(i, path, true, false, v == nil)
for _, rval := range rvals {
if rval.Kind() == reflect.Ptr && rval.IsNil() {
continue
}
setValue(rval, v)
}
}

View File

@ -12,6 +12,7 @@ import (
type Config struct {
Config *aws.Config
Handlers request.Handlers
PartitionID string
Endpoint string
SigningRegion string
SigningName string
@ -64,7 +65,7 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op
default:
maxRetries := aws.IntValue(cfg.MaxRetries)
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
maxRetries = 3
maxRetries = DefaultRetryerMaxNumRetries
}
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
}

View File

@ -1,6 +1,7 @@
package client
import (
"math"
"strconv"
"time"
@ -9,82 +10,142 @@ import (
)
// DefaultRetryer implements basic retry logic using exponential backoff for
// most services. If you want to implement custom retry logic, implement the
// request.Retryer interface or create a structure type that composes this
// struct and override the specific methods. For example, to override only
// the MaxRetries method:
// most services. If you want to implement custom retry logic, you can implement the
// request.Retryer interface.
//
// type retryer struct {
// client.DefaultRetryer
// }
//
// // This implementation always has 100 max retries
// func (d retryer) MaxRetries() int { return 100 }
type DefaultRetryer struct {
// Num max Retries is the number of max retries that will be performed.
// By default, this is zero.
NumMaxRetries int
// MinRetryDelay is the minimum retry delay after which retry will be performed.
// If not set, the value is 0ns.
MinRetryDelay time.Duration
// MinThrottleRetryDelay is the minimum retry delay when throttled.
// If not set, the value is 0ns.
MinThrottleDelay time.Duration
// MaxRetryDelay is the maximum retry delay before which retry must be performed.
// If not set, the value is 0ns.
MaxRetryDelay time.Duration
// MaxThrottleDelay is the maximum retry delay when throttled.
// If not set, the value is 0ns.
MaxThrottleDelay time.Duration
}
const (
// DefaultRetryerMaxNumRetries sets maximum number of retries
DefaultRetryerMaxNumRetries = 3
// DefaultRetryerMinRetryDelay sets minimum retry delay
DefaultRetryerMinRetryDelay = 30 * time.Millisecond
// DefaultRetryerMinThrottleDelay sets minimum delay when throttled
DefaultRetryerMinThrottleDelay = 500 * time.Millisecond
// DefaultRetryerMaxRetryDelay sets maximum retry delay
DefaultRetryerMaxRetryDelay = 300 * time.Second
// DefaultRetryerMaxThrottleDelay sets maximum delay when throttled
DefaultRetryerMaxThrottleDelay = 300 * time.Second
)
// MaxRetries returns the number of maximum returns the service will use to make
// an individual API request.
func (d DefaultRetryer) MaxRetries() int {
return d.NumMaxRetries
}
// setRetryerDefaults sets the default values of the retryer if not set
func (d *DefaultRetryer) setRetryerDefaults() {
if d.MinRetryDelay == 0 {
d.MinRetryDelay = DefaultRetryerMinRetryDelay
}
if d.MaxRetryDelay == 0 {
d.MaxRetryDelay = DefaultRetryerMaxRetryDelay
}
if d.MinThrottleDelay == 0 {
d.MinThrottleDelay = DefaultRetryerMinThrottleDelay
}
if d.MaxThrottleDelay == 0 {
d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay
}
}
// RetryRules returns the delay duration before retrying this request again
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
// Set the upper limit of delay in retrying at ~five minutes
minTime := 30
throttle := d.shouldThrottle(r)
if throttle {
if delay, ok := getRetryDelay(r); ok {
return delay
}
minTime = 500
// if number of max retries is zero, no retries will be performed.
if d.NumMaxRetries == 0 {
return 0
}
// Sets default value for retryer members
d.setRetryerDefaults()
// minDelay is the minimum retryer delay
minDelay := d.MinRetryDelay
var initialDelay time.Duration
isThrottle := r.IsErrorThrottle()
if isThrottle {
if delay, ok := getRetryAfterDelay(r); ok {
initialDelay = delay
}
minDelay = d.MinThrottleDelay
}
retryCount := r.RetryCount
if throttle && retryCount > 8 {
retryCount = 8
} else if retryCount > 13 {
retryCount = 13
// maxDelay the maximum retryer delay
maxDelay := d.MaxRetryDelay
if isThrottle {
maxDelay = d.MaxThrottleDelay
}
delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime)
return time.Duration(delay) * time.Millisecond
var delay time.Duration
// Logic to cap the retry count based on the minDelay provided
actualRetryCount := int(math.Log2(float64(minDelay))) + 1
if actualRetryCount < 63-retryCount {
delay = time.Duration(1<<uint64(retryCount)) * getJitterDelay(minDelay)
if delay > maxDelay {
delay = getJitterDelay(maxDelay / 2)
}
} else {
delay = getJitterDelay(maxDelay / 2)
}
return delay + initialDelay
}
// getJitterDelay returns a jittered delay for retry
func getJitterDelay(duration time.Duration) time.Duration {
return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration))
}
// ShouldRetry returns true if the request should be retried.
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
// ShouldRetry returns false if number of max retries is 0.
if d.NumMaxRetries == 0 {
return false
}
// If one of the other handlers already set the retry state
// we don't want to override it based on the service's state
if r.Retryable != nil {
return *r.Retryable
}
if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 {
return true
}
return r.IsErrorRetryable() || d.shouldThrottle(r)
}
// ShouldThrottle returns true if the request should be throttled.
func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
switch r.HTTPResponse.StatusCode {
case 429:
case 502:
case 503:
case 504:
default:
return r.IsErrorThrottle()
}
return true
return r.IsErrorRetryable() || r.IsErrorThrottle()
}
// This will look in the Retry-After header, RFC 7231, for how long
// it will wait before attempting another request
func getRetryDelay(r *request.Request) (time.Duration, bool) {
func getRetryAfterDelay(r *request.Request) (time.Duration, bool) {
if !canUseRetryAfterHeader(r) {
return 0, false
}

View File

@ -67,10 +67,14 @@ func logRequest(r *request.Request) {
if !bodySeekable {
r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
}
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
// Body as a NoOpCloser and will not be reset after read by the HTTP
// client reader.
r.ResetBody()
// Reset the request body because dumpRequest will re-wrap the
// r.HTTPRequest's Body as a NoOpCloser and will not be reset after
// read by the HTTP client reader.
if err := r.Error; err != nil {
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
r.ClientInfo.ServiceName, r.Operation.Name, err))
return
}
}
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,

View File

@ -5,6 +5,7 @@ type ClientInfo struct {
ServiceName string
ServiceID string
APIVersion string
PartitionID string
Endpoint string
SigningName string
SigningRegion string

View File

@ -0,0 +1,28 @@
package client
import (
"time"
"github.com/aws/aws-sdk-go/aws/request"
)
// NoOpRetryer provides a retryer that performs no retries.
// It should be used when we do not want retries to be performed.
type NoOpRetryer struct{}
// MaxRetries returns the number of maximum returns the service will use to make
// an individual API; For NoOpRetryer the MaxRetries will always be zero.
func (d NoOpRetryer) MaxRetries() int {
return 0
}
// ShouldRetry will always return false for NoOpRetryer, as it should never retry.
func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool {
return false
}
// RetryRules returns the delay duration before retrying this request again;
// since NoOpRetryer does not retry, RetryRules always returns 0.
func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration {
return 0
}

View File

@ -20,7 +20,7 @@ type RequestRetryer interface{}
// A Config provides service configuration for service clients. By default,
// all clients will use the defaults.DefaultConfig structure.
//
// // Create Session with MaxRetry configuration to be shared by multiple
// // Create Session with MaxRetries configuration to be shared by multiple
// // service clients.
// sess := session.Must(session.NewSession(&aws.Config{
// MaxRetries: aws.Int(3),
@ -246,12 +246,18 @@ type Config struct {
// Disabling this feature is useful when you want to use local endpoints
// for testing that do not support the modeled host prefix pattern.
DisableEndpointHostPrefix *bool
// STSRegionalEndpoint will enable regional or legacy endpoint resolving
STSRegionalEndpoint endpoints.STSRegionalEndpoint
// S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving
S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint
}
// NewConfig returns a new Config pointer that can be chained with builder
// methods to set multiple configuration values inline without using pointers.
//
// // Create Session with MaxRetry configuration to be shared by multiple
// // Create Session with MaxRetries configuration to be shared by multiple
// // service clients.
// sess := session.Must(session.NewSession(aws.NewConfig().
// WithMaxRetries(3),
@ -420,6 +426,20 @@ func (c *Config) MergeIn(cfgs ...*Config) {
}
}
// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag
// when resolving the endpoint for a service
func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config {
c.STSRegionalEndpoint = sre
return c
}
// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag
// when resolving the endpoint for a service
func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config {
c.S3UsEast1RegionalEndpoint = sre
return c
}
func mergeInConfig(dst *Config, other *Config) {
if other == nil {
return
@ -520,6 +540,14 @@ func mergeInConfig(dst *Config, other *Config) {
if other.DisableEndpointHostPrefix != nil {
dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
}
if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint {
dst.STSRegionalEndpoint = other.STSRegionalEndpoint
}
if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint {
dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint
}
}
// Copy will return a shallow copy of the Config object. If any additional

View File

@ -179,6 +179,242 @@ func IntValueMap(src map[string]*int) map[string]int {
return dst
}
// Uint returns a pointer to the uint value passed in.
func Uint(v uint) *uint {
return &v
}
// UintValue returns the value of the uint pointer passed in or
// 0 if the pointer is nil.
func UintValue(v *uint) uint {
if v != nil {
return *v
}
return 0
}
// UintSlice converts a slice of uint values uinto a slice of
// uint pointers
func UintSlice(src []uint) []*uint {
dst := make([]*uint, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// UintValueSlice converts a slice of uint pointers uinto a slice of
// uint values
func UintValueSlice(src []*uint) []uint {
dst := make([]uint, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// UintMap converts a string map of uint values uinto a string
// map of uint pointers
func UintMap(src map[string]uint) map[string]*uint {
dst := make(map[string]*uint)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// UintValueMap converts a string map of uint pointers uinto a string
// map of uint values
func UintValueMap(src map[string]*uint) map[string]uint {
dst := make(map[string]uint)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int8 returns a pointer to the int8 value passed in.
func Int8(v int8) *int8 {
return &v
}
// Int8Value returns the value of the int8 pointer passed in or
// 0 if the pointer is nil.
func Int8Value(v *int8) int8 {
if v != nil {
return *v
}
return 0
}
// Int8Slice converts a slice of int8 values into a slice of
// int8 pointers
func Int8Slice(src []int8) []*int8 {
dst := make([]*int8, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int8ValueSlice converts a slice of int8 pointers into a slice of
// int8 values
func Int8ValueSlice(src []*int8) []int8 {
dst := make([]int8, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int8Map converts a string map of int8 values into a string
// map of int8 pointers
func Int8Map(src map[string]int8) map[string]*int8 {
dst := make(map[string]*int8)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int8ValueMap converts a string map of int8 pointers into a string
// map of int8 values
func Int8ValueMap(src map[string]*int8) map[string]int8 {
dst := make(map[string]int8)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int16 returns a pointer to the int16 value passed in.
func Int16(v int16) *int16 {
return &v
}
// Int16Value returns the value of the int16 pointer passed in or
// 0 if the pointer is nil.
func Int16Value(v *int16) int16 {
if v != nil {
return *v
}
return 0
}
// Int16Slice converts a slice of int16 values into a slice of
// int16 pointers
func Int16Slice(src []int16) []*int16 {
dst := make([]*int16, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int16ValueSlice converts a slice of int16 pointers into a slice of
// int16 values
func Int16ValueSlice(src []*int16) []int16 {
dst := make([]int16, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int16Map converts a string map of int16 values into a string
// map of int16 pointers
func Int16Map(src map[string]int16) map[string]*int16 {
dst := make(map[string]*int16)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int16ValueMap converts a string map of int16 pointers into a string
// map of int16 values
func Int16ValueMap(src map[string]*int16) map[string]int16 {
dst := make(map[string]int16)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int32 returns a pointer to the int32 value passed in.
func Int32(v int32) *int32 {
return &v
}
// Int32Value returns the value of the int32 pointer passed in or
// 0 if the pointer is nil.
func Int32Value(v *int32) int32 {
if v != nil {
return *v
}
return 0
}
// Int32Slice converts a slice of int32 values into a slice of
// int32 pointers
func Int32Slice(src []int32) []*int32 {
dst := make([]*int32, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int32ValueSlice converts a slice of int32 pointers into a slice of
// int32 values
func Int32ValueSlice(src []*int32) []int32 {
dst := make([]int32, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int32Map converts a string map of int32 values into a string
// map of int32 pointers
func Int32Map(src map[string]int32) map[string]*int32 {
dst := make(map[string]*int32)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int32ValueMap converts a string map of int32 pointers into a string
// map of int32 values
func Int32ValueMap(src map[string]*int32) map[string]int32 {
dst := make(map[string]int32)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int64 returns a pointer to the int64 value passed in.
func Int64(v int64) *int64 {
return &v
@ -238,6 +474,301 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 {
return dst
}
// Uint8 returns a pointer to the uint8 value passed in.
func Uint8(v uint8) *uint8 {
return &v
}
// Uint8Value returns the value of the uint8 pointer passed in or
// 0 if the pointer is nil.
func Uint8Value(v *uint8) uint8 {
if v != nil {
return *v
}
return 0
}
// Uint8Slice converts a slice of uint8 values into a slice of
// uint8 pointers
func Uint8Slice(src []uint8) []*uint8 {
dst := make([]*uint8, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Uint8ValueSlice converts a slice of uint8 pointers into a slice of
// uint8 values
func Uint8ValueSlice(src []*uint8) []uint8 {
dst := make([]uint8, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Uint8Map converts a string map of uint8 values into a string
// map of uint8 pointers
func Uint8Map(src map[string]uint8) map[string]*uint8 {
dst := make(map[string]*uint8)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Uint8ValueMap converts a string map of uint8 pointers into a string
// map of uint8 values
func Uint8ValueMap(src map[string]*uint8) map[string]uint8 {
dst := make(map[string]uint8)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Uint16 returns a pointer to the uint16 value passed in.
func Uint16(v uint16) *uint16 {
return &v
}
// Uint16Value returns the value of the uint16 pointer passed in or
// 0 if the pointer is nil.
func Uint16Value(v *uint16) uint16 {
if v != nil {
return *v
}
return 0
}
// Uint16Slice converts a slice of uint16 values into a slice of
// uint16 pointers
func Uint16Slice(src []uint16) []*uint16 {
dst := make([]*uint16, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Uint16ValueSlice converts a slice of uint16 pointers into a slice of
// uint16 values
func Uint16ValueSlice(src []*uint16) []uint16 {
dst := make([]uint16, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Uint16Map converts a string map of uint16 values into a string
// map of uint16 pointers
func Uint16Map(src map[string]uint16) map[string]*uint16 {
dst := make(map[string]*uint16)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Uint16ValueMap converts a string map of uint16 pointers into a string
// map of uint16 values
func Uint16ValueMap(src map[string]*uint16) map[string]uint16 {
dst := make(map[string]uint16)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Uint32 returns a pointer to the uint32 value passed in.
func Uint32(v uint32) *uint32 {
return &v
}
// Uint32Value returns the value of the uint32 pointer passed in or
// 0 if the pointer is nil.
func Uint32Value(v *uint32) uint32 {
if v != nil {
return *v
}
return 0
}
// Uint32Slice converts a slice of uint32 values into a slice of
// uint32 pointers
func Uint32Slice(src []uint32) []*uint32 {
dst := make([]*uint32, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Uint32ValueSlice converts a slice of uint32 pointers into a slice of
// uint32 values
func Uint32ValueSlice(src []*uint32) []uint32 {
dst := make([]uint32, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Uint32Map converts a string map of uint32 values into a string
// map of uint32 pointers
func Uint32Map(src map[string]uint32) map[string]*uint32 {
dst := make(map[string]*uint32)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Uint32ValueMap converts a string map of uint32 pointers into a string
// map of uint32 values
func Uint32ValueMap(src map[string]*uint32) map[string]uint32 {
dst := make(map[string]uint32)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Uint64 returns a pointer to the uint64 value passed in.
func Uint64(v uint64) *uint64 {
return &v
}
// Uint64Value returns the value of the uint64 pointer passed in or
// 0 if the pointer is nil.
func Uint64Value(v *uint64) uint64 {
if v != nil {
return *v
}
return 0
}
// Uint64Slice converts a slice of uint64 values into a slice of
// uint64 pointers
func Uint64Slice(src []uint64) []*uint64 {
dst := make([]*uint64, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Uint64ValueSlice converts a slice of uint64 pointers into a slice of
// uint64 values
func Uint64ValueSlice(src []*uint64) []uint64 {
dst := make([]uint64, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Uint64Map converts a string map of uint64 values into a string
// map of uint64 pointers
func Uint64Map(src map[string]uint64) map[string]*uint64 {
dst := make(map[string]*uint64)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Uint64ValueMap converts a string map of uint64 pointers into a string
// map of uint64 values
func Uint64ValueMap(src map[string]*uint64) map[string]uint64 {
dst := make(map[string]uint64)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Float32 returns a pointer to the float32 value passed in.
func Float32(v float32) *float32 {
return &v
}
// Float32Value returns the value of the float32 pointer passed in or
// 0 if the pointer is nil.
func Float32Value(v *float32) float32 {
if v != nil {
return *v
}
return 0
}
// Float32Slice converts a slice of float32 values into a slice of
// float32 pointers
func Float32Slice(src []float32) []*float32 {
dst := make([]*float32, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Float32ValueSlice converts a slice of float32 pointers into a slice of
// float32 values
func Float32ValueSlice(src []*float32) []float32 {
dst := make([]float32, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Float32Map converts a string map of float32 values into a string
// map of float32 pointers
func Float32Map(src map[string]float32) map[string]*float32 {
dst := make(map[string]*float32)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Float32ValueMap converts a string map of float32 pointers into a string
// map of float32 values
func Float32ValueMap(src map[string]*float32) map[string]float32 {
dst := make(map[string]float32)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Float64 returns a pointer to the float64 value passed in.
func Float64(v float64) *float64 {
return &v

View File

@ -159,9 +159,9 @@ func handleSendError(r *request.Request, err error) {
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
}
// Catch all other request errors.
// Catch all request errors, and let the default retrier determine
// if the error is retryable.
r.Error = awserr.New("RequestError", "send request failed", err)
r.Retryable = aws.Bool(true) // network errors are retryable
// Override the error with a context canceled error, if that was canceled.
ctx := r.Context()
@ -184,37 +184,39 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH
// AfterRetryHandler performs final checks to determine if the request should
// be retried and how long to delay.
var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
// If one of the other handlers already set the retry state
// we don't want to override it based on the service's state
if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
r.Retryable = aws.Bool(r.ShouldRetry(r))
}
if r.WillRetry() {
r.RetryDelay = r.RetryRules(r)
if sleepFn := r.Config.SleepDelay; sleepFn != nil {
// Support SleepDelay for backwards compatibility and testing
sleepFn(r.RetryDelay)
} else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
r.Error = awserr.New(request.CanceledErrorCode,
"request context canceled", err)
r.Retryable = aws.Bool(false)
return
var AfterRetryHandler = request.NamedHandler{
Name: "core.AfterRetryHandler",
Fn: func(r *request.Request) {
// If one of the other handlers already set the retry state
// we don't want to override it based on the service's state
if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
r.Retryable = aws.Bool(r.ShouldRetry(r))
}
// when the expired token exception occurs the credentials
// need to be expired locally so that the next request to
// get credentials will trigger a credentials refresh.
if r.IsErrorExpired() {
r.Config.Credentials.Expire()
}
if r.WillRetry() {
r.RetryDelay = r.RetryRules(r)
r.RetryCount++
r.Error = nil
}
}}
if sleepFn := r.Config.SleepDelay; sleepFn != nil {
// Support SleepDelay for backwards compatibility and testing
sleepFn(r.RetryDelay)
} else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
r.Error = awserr.New(request.CanceledErrorCode,
"request context canceled", err)
r.Retryable = aws.Bool(false)
return
}
// when the expired token exception occurs the credentials
// need to be expired locally so that the next request to
// get credentials will trigger a credentials refresh.
if r.IsErrorExpired() {
r.Config.Credentials.Expire()
}
r.RetryCount++
r.Error = nil
}
}}
// ValidateEndpointHandler is a request handler to validate a request had the
// appropriate Region and Endpoint set. Will set r.Error if the endpoint or

View File

@ -50,9 +50,10 @@ package credentials
import (
"fmt"
"github.com/aws/aws-sdk-go/aws/awserr"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
)
// AnonymousCredentials is an empty Credential object that can be used as
@ -83,6 +84,12 @@ type Value struct {
ProviderName string
}
// HasKeys returns if the credentials Value has both AccessKeyID and
// SecretAccessKey value set.
func (v Value) HasKeys() bool {
return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0
}
// A Provider is the interface for any component which will provide credentials
// Value. A provider is required to manage its own Expired state, and what to
// be expired means.

View File

@ -98,8 +98,8 @@ func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint strin
return p
}
// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
// from an arbitrary endpoint concurrently. The client will request the
// NewCredentialsClient returns a pointer to a new Credentials object
// wrapping the endpoint credentials Provider.
func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
}

View File

@ -200,7 +200,7 @@ type AssumeRoleProvider struct {
// by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must
// have a value between 0 and 1. Any other value may lead to expected behavior.
// With a MaxJitterFrac value of 0, default) will no jitter will be used.
//
//
// For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the
// AssumeRole call will be made with an arbitrary Duration between 27m and
// 30m.
@ -258,7 +258,6 @@ func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*
// Retrieve generates a new set of temporary credentials using STS.
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
// Apply defaults where parameters are not set.
if p.RoleSessionName == "" {
// Try to work out a role name that will hopefully end up unique.

View File

@ -0,0 +1,100 @@
package stscreds
import (
"fmt"
"io/ioutil"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/aws/aws-sdk-go/service/sts/stsiface"
)
const (
// ErrCodeWebIdentity will be used as an error code when constructing
// a new error to be returned during session creation or retrieval.
ErrCodeWebIdentity = "WebIdentityErr"
// WebIdentityProviderName is the web identity provider name
WebIdentityProviderName = "WebIdentityCredentials"
)
// now is used to return a time.Time object representing
// the current time. This can be used to easily test and
// compare test values.
var now = time.Now
// WebIdentityRoleProvider is used to retrieve credentials using
// an OIDC token.
type WebIdentityRoleProvider struct {
credentials.Expiry
client stsiface.STSAPI
ExpiryWindow time.Duration
tokenFilePath string
roleARN string
roleSessionName string
}
// NewWebIdentityCredentials will return a new set of credentials with a given
// configuration, role arn, and token file path.
func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials {
svc := sts.New(c)
p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path)
return credentials.NewCredentials(p)
}
// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the
// provided stsiface.STSAPI
func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider {
return &WebIdentityRoleProvider{
client: svc,
tokenFilePath: path,
roleARN: roleARN,
roleSessionName: roleSessionName,
}
}
// Retrieve will attempt to assume a role from a token which is located at
// 'WebIdentityTokenFilePath' specified destination and if that is empty an
// error will be returned.
func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) {
b, err := ioutil.ReadFile(p.tokenFilePath)
if err != nil {
errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath)
return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err)
}
sessionName := p.roleSessionName
if len(sessionName) == 0 {
// session name is used to uniquely identify a session. This simply
// uses unix time in nanoseconds to uniquely identify sessions.
sessionName = strconv.FormatInt(now().UnixNano(), 10)
}
req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{
RoleArn: &p.roleARN,
RoleSessionName: &sessionName,
WebIdentityToken: aws.String(string(b)),
})
// InvalidIdentityToken error is a temporary error that can occur
// when assuming an Role with a JWT web identity token.
req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException)
if err := req.Send(); err != nil {
return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err)
}
p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow)
value := credentials.Value{
AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId),
SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey),
SessionToken: aws.StringValue(resp.Credentials.SessionToken),
ProviderName: WebIdentityProviderName,
}
return value, nil
}

View File

@ -1,30 +1,61 @@
// Package csm provides Client Side Monitoring (CSM) which enables sending metrics
// via UDP connection. Using the Start function will enable the reporting of
// metrics on a given port. If Start is called, with different parameters, again,
// a panic will occur.
// Package csm provides the Client Side Monitoring (CSM) client which enables
// sending metrics via UDP connection to the CSM agent. This package provides
// control options, and configuration for the CSM client. The client can be
// controlled manually, or automatically via the SDK's Session configuration.
//
// Pause can be called to pause any metrics publishing on a given port. Sessions
// that have had their handlers modified via InjectHandlers may still be used.
// However, the handlers will act as a no-op meaning no metrics will be published.
// Enabling CSM client via SDK's Session configuration
//
// The CSM client can be enabled automatically via SDK's Session configuration.
// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT
// environment variable is set to a non-empty value.
//
// The configuration options for the CSM client via the SDK's session
// configuration are:
//
// * AWS_CSM_PORT=<port number>
// The port number the CSM agent will receive metrics on.
//
// * AWS_CSM_HOST=<hostname or ip>
// The hostname, or IP address the CSM agent will receive metrics on.
// Without port number.
//
// Manually enabling the CSM client
//
// The CSM client can be started, paused, and resumed manually. The Start
// function will enable the CSM client to publish metrics to the CSM agent. It
// is safe to call Start concurrently, but if Start is called additional times
// with different ClientID or address it will panic.
//
// Example:
// r, err := csm.Start("clientID", ":31000")
// if err != nil {
// panic(fmt.Errorf("failed starting CSM: %v", err))
// }
//
// When controlling the CSM client manually, you must also inject its request
// handlers into the SDK's Session configuration for the SDK's API clients to
// publish metrics.
//
// sess, err := session.NewSession(&aws.Config{})
// if err != nil {
// panic(fmt.Errorf("failed loading session: %v", err))
// }
//
// // Add CSM client's metric publishing request handlers to the SDK's
// // Session Configuration.
// r.InjectHandlers(&sess.Handlers)
//
// client := s3.New(sess)
// resp, err := client.GetObject(&s3.GetObjectInput{
// Bucket: aws.String("bucket"),
// Key: aws.String("key"),
// })
// Controlling CSM client
//
// Once the CSM client has been enabled the Get function will return a Reporter
// value that you can use to pause and resume the metrics published to the CSM
// agent. If Get function is called before the reporter is enabled with the
// Start function or via SDK's Session configuration nil will be returned.
//
// The Pause method can be called to stop the CSM client publishing metrics to
// the CSM agent. The Continue method will resume metric publishing.
//
// // Get the CSM client Reporter.
// r := csm.Get()
//
// // Will pause monitoring
// r.Pause()
@ -35,12 +66,4 @@
//
// // Resume monitoring
// r.Continue()
//
// Start returns a Reporter that is used to enable or disable monitoring. If
// access to the Reporter is required later, calling Get will return the Reporter
// singleton.
//
// Example:
// r := csm.Get()
// r.Continue()
package csm

View File

@ -2,6 +2,7 @@ package csm
import (
"fmt"
"strings"
"sync"
)
@ -9,19 +10,40 @@ var (
lock sync.Mutex
)
// Client side metric handler names
const (
APICallMetricHandlerName = "awscsm.SendAPICallMetric"
APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
// DefaultPort is used when no port is specified.
DefaultPort = "31000"
// DefaultHost is the host that will be used when none is specified.
DefaultHost = "127.0.0.1"
)
// Start will start the a long running go routine to capture
// AddressWithDefaults returns a CSM address built from the host and port
// values. If the host or port is not set, default values will be used
// instead. If host is "localhost" it will be replaced with "127.0.0.1".
func AddressWithDefaults(host, port string) string {
if len(host) == 0 || strings.EqualFold(host, "localhost") {
host = DefaultHost
}
if len(port) == 0 {
port = DefaultPort
}
// Only IP6 host can contain a colon
if strings.Contains(host, ":") {
return "[" + host + "]:" + port
}
return host + ":" + port
}
// Start will start a long running go routine to capture
// client side metrics. Calling start multiple time will only
// start the metric listener once and will panic if a different
// client ID or port is passed in.
//
// Example:
// r, err := csm.Start("clientID", "127.0.0.1:8094")
// r, err := csm.Start("clientID", "127.0.0.1:31000")
// if err != nil {
// panic(fmt.Errorf("expected no error, but received %v", err))
// }

View File

@ -16,25 +16,26 @@ var (
type metricChan struct {
ch chan metric
paused int64
paused *int64
}
func newMetricChan(size int) metricChan {
return metricChan{
ch: make(chan metric, size),
ch: make(chan metric, size),
paused: new(int64),
}
}
func (ch *metricChan) Pause() {
atomic.StoreInt64(&ch.paused, pausedEnum)
atomic.StoreInt64(ch.paused, pausedEnum)
}
func (ch *metricChan) Continue() {
atomic.StoreInt64(&ch.paused, runningEnum)
atomic.StoreInt64(ch.paused, runningEnum)
}
func (ch *metricChan) IsPaused() bool {
v := atomic.LoadInt64(&ch.paused)
v := atomic.LoadInt64(ch.paused)
return v == pausedEnum
}

View File

@ -10,11 +10,6 @@ import (
"github.com/aws/aws-sdk-go/aws/request"
)
const (
// DefaultPort is used when no port is specified
DefaultPort = "31000"
)
// Reporter will gather metrics of API requests made and
// send those metrics to the CSM endpoint.
type Reporter struct {
@ -71,7 +66,6 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
XAmzRequestID: aws.String(r.RequestID),
AttemptCount: aws.Int(r.RetryCount + 1),
AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))),
AccessKey: aws.String(creds.AccessKeyID),
}
@ -123,7 +117,7 @@ func (rep *Reporter) sendAPICallMetric(r *request.Request) {
Type: aws.String("ApiCall"),
AttemptCount: aws.Int(r.RetryCount + 1),
Region: r.Config.Region,
Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)),
XAmzRequestID: aws.String(r.RequestID),
MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())),
}
@ -190,8 +184,9 @@ func (rep *Reporter) start() {
}
}
// Pause will pause the metric channel preventing any new metrics from
// being added.
// Pause will pause the metric channel preventing any new metrics from being
// added. It is safe to call concurrently with other calls to Pause, but if
// called concurently with Continue can lead to unexpected state.
func (rep *Reporter) Pause() {
lock.Lock()
defer lock.Unlock()
@ -203,8 +198,9 @@ func (rep *Reporter) Pause() {
rep.close()
}
// Continue will reopen the metric channel and allow for monitoring
// to be resumed.
// Continue will reopen the metric channel and allow for monitoring to be
// resumed. It is safe to call concurrently with other calls to Continue, but
// if called concurently with Pause can lead to unexpected state.
func (rep *Reporter) Continue() {
lock.Lock()
defer lock.Unlock()
@ -219,10 +215,18 @@ func (rep *Reporter) Continue() {
rep.metricsCh.Continue()
}
// Client side metric handler names
const (
APICallMetricHandlerName = "awscsm.SendAPICallMetric"
APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
)
// InjectHandlers will will enable client side metrics and inject the proper
// handlers to handle how metrics are sent.
//
// Example:
// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers
// multiple times may lead to unexpected behavior, (e.g. duplicate metrics).
//
// // Start must be called in order to inject the correct handlers
// r, err := csm.Start("clientID", "127.0.0.1:8094")
// if err != nil {

View File

@ -4,6 +4,7 @@ import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"time"
@ -12,8 +13,41 @@ import (
"github.com/aws/aws-sdk-go/internal/sdkuri"
)
// getToken uses the duration to return a token for EC2 metadata service,
// or an error if the request failed.
func (c *EC2Metadata) getToken(duration time.Duration) (tokenOutput, error) {
op := &request.Operation{
Name: "GetToken",
HTTPMethod: "PUT",
HTTPPath: "/api/token",
}
var output tokenOutput
req := c.NewRequest(op, nil, &output)
// remove the fetch token handler from the request handlers to avoid infinite recursion
req.Handlers.Sign.RemoveByName(fetchTokenHandlerName)
// Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request.
req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler)
ttl := strconv.FormatInt(int64(duration / time.Second),10)
req.HTTPRequest.Header.Set(ttlHeader, ttl)
err := req.Send()
// Errors with bad request status should be returned.
if err != nil {
err = awserr.NewRequestFailure(
awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err),
req.HTTPResponse.StatusCode, req.RequestID)
}
return output, err
}
// GetMetadata uses the path provided to request information from the EC2
// instance metdata service. The content will be returned as a string, or
// instance metadata service. The content will be returned as a string, or
// error if the request failed.
func (c *EC2Metadata) GetMetadata(p string) (string, error) {
op := &request.Operation{
@ -21,11 +55,11 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) {
HTTPMethod: "GET",
HTTPPath: sdkuri.PathJoin("/meta-data", p),
}
output := &metadataOutput{}
req := c.NewRequest(op, nil, output)
err := req.Send()
req := c.NewRequest(op, nil, output)
err := req.Send()
return output.Content, err
}
@ -41,13 +75,8 @@ func (c *EC2Metadata) GetUserData() (string, error) {
output := &metadataOutput{}
req := c.NewRequest(op, nil, output)
req.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
if r.HTTPResponse.StatusCode == http.StatusNotFound {
r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
}
})
err := req.Send()
err := req.Send()
return output.Content, err
}
@ -63,8 +92,8 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
output := &metadataOutput{}
req := c.NewRequest(op, nil, output)
err := req.Send()
err := req.Send()
return output.Content, err
}
@ -152,18 +181,19 @@ type EC2IAMInfo struct {
// An EC2InstanceIdentityDocument provides the shape for unmarshaling
// an instance identity document
type EC2InstanceIdentityDocument struct {
DevpayProductCodes []string `json:"devpayProductCodes"`
AvailabilityZone string `json:"availabilityZone"`
PrivateIP string `json:"privateIp"`
Version string `json:"version"`
Region string `json:"region"`
InstanceID string `json:"instanceId"`
BillingProducts []string `json:"billingProducts"`
InstanceType string `json:"instanceType"`
AccountID string `json:"accountId"`
PendingTime time.Time `json:"pendingTime"`
ImageID string `json:"imageId"`
KernelID string `json:"kernelId"`
RamdiskID string `json:"ramdiskId"`
Architecture string `json:"architecture"`
DevpayProductCodes []string `json:"devpayProductCodes"`
MarketplaceProductCodes []string `json:"marketplaceProductCodes"`
AvailabilityZone string `json:"availabilityZone"`
PrivateIP string `json:"privateIp"`
Version string `json:"version"`
Region string `json:"region"`
InstanceID string `json:"instanceId"`
BillingProducts []string `json:"billingProducts"`
InstanceType string `json:"instanceType"`
AccountID string `json:"accountId"`
PendingTime time.Time `json:"pendingTime"`
ImageID string `json:"imageId"`
KernelID string `json:"kernelId"`
RamdiskID string `json:"ramdiskId"`
Architecture string `json:"architecture"`
}

View File

@ -13,6 +13,7 @@ import (
"io"
"net/http"
"os"
"strconv"
"strings"
"time"
@ -24,9 +25,25 @@ import (
"github.com/aws/aws-sdk-go/aws/request"
)
// ServiceName is the name of the service.
const ServiceName = "ec2metadata"
const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
const (
// ServiceName is the name of the service.
ServiceName = "ec2metadata"
disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
// Headers for Token and TTL
ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds"
tokenHeader = "x-aws-ec2-metadata-token"
// Named Handler constants
fetchTokenHandlerName = "FetchTokenHandler"
unmarshalMetadataHandlerName = "unmarshalMetadataHandler"
unmarshalTokenHandlerName = "unmarshalTokenHandler"
enableTokenProviderHandlerName = "enableTokenProviderHandler"
// TTL constants
defaultTTL = 21600 * time.Second
ttlExpirationWindow = 30 * time.Second
)
// A EC2Metadata is an EC2 Metadata service Client.
type EC2Metadata struct {
@ -80,13 +97,27 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
),
}
svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
// token provider instance
tp := newTokenProvider(svc, defaultTTL)
// NamedHandler for fetching token
svc.Handlers.Sign.PushBackNamed(request.NamedHandler{
Name: fetchTokenHandlerName,
Fn: tp.fetchTokenHandler,
})
// NamedHandler for enabling token provider
svc.Handlers.Complete.PushBackNamed(request.NamedHandler{
Name: enableTokenProviderHandlerName,
Fn: tp.enableTokenProviderHandler,
})
svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler)
svc.Handlers.UnmarshalError.PushBack(unmarshalError)
svc.Handlers.Validate.Clear()
svc.Handlers.Validate.PushBack(validateEndpointHandler)
// Disable the EC2 Metadata service if the environment variable is set.
// This shortcirctes the service's functionality to always fail to send
// This short-circuits the service's functionality to always fail to send
// requests.
if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" {
svc.Handlers.Send.SwapNamed(request.NamedHandler{
@ -107,7 +138,6 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
for _, option := range opts {
option(svc.Client)
}
return svc
}
@ -119,30 +149,74 @@ type metadataOutput struct {
Content string
}
func unmarshalHandler(r *request.Request) {
defer r.HTTPResponse.Body.Close()
b := &bytes.Buffer{}
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata respose", err)
return
}
type tokenOutput struct {
Token string
TTL time.Duration
}
if data, ok := r.Data.(*metadataOutput); ok {
data.Content = b.String()
}
// unmarshal token handler is used to parse the response of a getToken operation
var unmarshalTokenHandler = request.NamedHandler{
Name: unmarshalTokenHandlerName,
Fn: func(r *request.Request) {
defer r.HTTPResponse.Body.Close()
var b bytes.Buffer
if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil {
r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization,
"unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID)
return
}
v := r.HTTPResponse.Header.Get(ttlHeader)
data, ok := r.Data.(*tokenOutput)
if !ok {
return
}
data.Token = b.String()
// TTL is in seconds
i, err := strconv.ParseInt(v, 10, 64)
if err != nil {
r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode,
"unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID)
return
}
t := time.Duration(i) * time.Second
data.TTL = t
},
}
var unmarshalHandler = request.NamedHandler{
Name: unmarshalMetadataHandlerName,
Fn: func(r *request.Request) {
defer r.HTTPResponse.Body.Close()
var b bytes.Buffer
if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil {
r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization,
"unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID)
return
}
if data, ok := r.Data.(*metadataOutput); ok {
data.Content = b.String()
}
},
}
func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
b := &bytes.Buffer{}
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error respose", err)
var b bytes.Buffer
if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil {
r.Error = awserr.NewRequestFailure(
awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err),
r.HTTPResponse.StatusCode, r.RequestID)
return
}
// Response body format is not consistent between metadata endpoints.
// Grab the error message as a string and include that as the source error
r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
r.Error = awserr.NewRequestFailure(awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())),
r.HTTPResponse.StatusCode, r.RequestID)
}
func validateEndpointHandler(r *request.Request) {

View File

@ -0,0 +1,92 @@
package ec2metadata
import (
"net/http"
"sync/atomic"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
)
// A tokenProvider struct provides access to EC2Metadata client
// and atomic instance of a token, along with configuredTTL for it.
// tokenProvider also provides an atomic flag to disable the
// fetch token operation.
// The disabled member will use 0 as false, and 1 as true.
type tokenProvider struct {
client *EC2Metadata
token atomic.Value
configuredTTL time.Duration
disabled uint32
}
// A ec2Token struct helps use of token in EC2 Metadata service ops
type ec2Token struct {
token string
credentials.Expiry
}
// newTokenProvider provides a pointer to a tokenProvider instance
func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider {
return &tokenProvider{client: c, configuredTTL: duration}
}
// fetchTokenHandler fetches token for EC2Metadata service client by default.
func (t *tokenProvider) fetchTokenHandler(r *request.Request) {
// short-circuits to insecure data flow if tokenProvider is disabled.
if v := atomic.LoadUint32(&t.disabled); v == 1 {
return
}
if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() {
r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token)
return
}
output, err := t.client.getToken(t.configuredTTL)
if err != nil {
// change the disabled flag on token provider to true,
// when error is request timeout error.
if requestFailureError, ok := err.(awserr.RequestFailure); ok {
switch requestFailureError.StatusCode() {
case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed:
atomic.StoreUint32(&t.disabled, 1)
case http.StatusBadRequest:
r.Error = requestFailureError
}
// Check if request timed out while waiting for response
if e, ok := requestFailureError.OrigErr().(awserr.Error); ok {
if e.Code() == "RequestError" {
atomic.StoreUint32(&t.disabled, 1)
}
}
}
return
}
newToken := ec2Token{
token: output.Token,
}
newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow)
t.token.Store(newToken)
// Inject token header to the request.
if ec2Token, ok := t.token.Load().(ec2Token); ok {
r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token)
}
}
// enableTokenProviderHandler enables the token provider
func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) {
// If the error code status is 401, we enable the token provider
if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil &&
e.StatusCode() == http.StatusUnauthorized {
atomic.StoreUint32(&t.disabled, 0)
}
}

View File

@ -83,6 +83,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol
p := &ps[i]
custAddEC2Metadata(p)
custAddS3DualStack(p)
custRegionalS3(p)
custRmIotDataService(p)
custFixAppAutoscalingChina(p)
custFixAppAutoscalingUsGov(p)
@ -100,6 +101,33 @@ func custAddS3DualStack(p *partition) {
custAddDualstack(p, "s3-control")
}
func custRegionalS3(p *partition) {
if p.ID != "aws" {
return
}
service, ok := p.Services["s3"]
if !ok {
return
}
// If global endpoint already exists no customization needed.
if _, ok := service.Endpoints["aws-global"]; ok {
return
}
service.PartitionEndpoint = "aws-global"
service.Endpoints["us-east-1"] = endpoint{}
service.Endpoints["aws-global"] = endpoint{
Hostname: "s3.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
}
p.Services["s3"] = service
}
func custAddDualstack(p *partition, svcName string) {
s, ok := p.Services[svcName]
if !ok {

File diff suppressed because it is too large Load Diff

View File

@ -3,6 +3,7 @@ package endpoints
import (
"fmt"
"regexp"
"strings"
"github.com/aws/aws-sdk-go/aws/awserr"
)
@ -46,6 +47,108 @@ type Options struct {
//
// This option is ignored if StrictMatching is enabled.
ResolveUnknownService bool
// STS Regional Endpoint flag helps with resolving the STS endpoint
STSRegionalEndpoint STSRegionalEndpoint
// S3 Regional Endpoint flag helps with resolving the S3 endpoint
S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint
}
// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint
// options.
type STSRegionalEndpoint int
func (e STSRegionalEndpoint) String() string {
switch e {
case LegacySTSEndpoint:
return "legacy"
case RegionalSTSEndpoint:
return "regional"
case UnsetSTSEndpoint:
return ""
default:
return "unknown"
}
}
const (
// UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified.
UnsetSTSEndpoint STSRegionalEndpoint = iota
// LegacySTSEndpoint represents when STS Regional Endpoint flag is specified
// to use legacy endpoints.
LegacySTSEndpoint
// RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified
// to use regional endpoints.
RegionalSTSEndpoint
)
// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based
// on the input string provided in env config or shared config by the user.
//
// `legacy`, `regional` are the only case-insensitive valid strings for
// resolving the STS regional Endpoint flag.
func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) {
switch {
case strings.EqualFold(s, "legacy"):
return LegacySTSEndpoint, nil
case strings.EqualFold(s, "regional"):
return RegionalSTSEndpoint, nil
default:
return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s)
}
}
// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1
// Regional Endpoint options.
type S3UsEast1RegionalEndpoint int
func (e S3UsEast1RegionalEndpoint) String() string {
switch e {
case LegacyS3UsEast1Endpoint:
return "legacy"
case RegionalS3UsEast1Endpoint:
return "regional"
case UnsetS3UsEast1Endpoint:
return ""
default:
return "unknown"
}
}
const (
// UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not
// specified.
UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota
// LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is
// specified to use legacy endpoints.
LegacyS3UsEast1Endpoint
// RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is
// specified to use regional endpoints.
RegionalS3UsEast1Endpoint
)
// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based
// on the input string provided in env config or shared config by the user.
//
// `legacy`, `regional` are the only case-insensitive valid strings for
// resolving the S3 regional Endpoint flag.
func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) {
switch {
case strings.EqualFold(s, "legacy"):
return LegacyS3UsEast1Endpoint, nil
case strings.EqualFold(s, "regional"):
return RegionalS3UsEast1Endpoint, nil
default:
return UnsetS3UsEast1Endpoint,
fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s)
}
}
// Set combines all of the option functions together.
@ -79,6 +182,12 @@ func ResolveUnknownServiceOption(o *Options) {
o.ResolveUnknownService = true
}
// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve
// STS endpoint to their regional endpoint, instead of the global endpoint.
func STSRegionalEndpointOption(o *Options) {
o.STSRegionalEndpoint = RegionalSTSEndpoint
}
// A Resolver provides the interface for functionality to resolve endpoints.
// The build in Partition and DefaultResolver return value satisfy this interface.
type Resolver interface {
@ -170,10 +279,13 @@ func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) {
// A Partition provides the ability to enumerate the partition's regions
// and services.
type Partition struct {
id string
p *partition
id, dnsSuffix string
p *partition
}
// DNSSuffix returns the base domain name of the partition.
func (p Partition) DNSSuffix() string { return p.dnsSuffix }
// ID returns the identifier of the partition.
func (p Partition) ID() string { return p.id }
@ -191,7 +303,7 @@ func (p Partition) ID() string { return p.id }
// require the provided service and region to be known by the partition.
// If the endpoint cannot be strictly resolved an error will be returned. This
// mode is useful to ensure the endpoint resolved is valid. Without
// StrictMatching enabled the endpoint returned my look valid but may not work.
// StrictMatching enabled the endpoint returned may look valid but may not work.
// StrictMatching requires the SDK to be updated if you want to take advantage
// of new regions and services expansions.
//
@ -347,6 +459,9 @@ type ResolvedEndpoint struct {
// The endpoint URL
URL string
// The endpoint partition
PartitionID string
// The region that should be used for signing requests.
SigningRegion string

View File

@ -0,0 +1,24 @@
package endpoints
var legacyGlobalRegions = map[string]map[string]struct{}{
"sts": {
"ap-northeast-1": {},
"ap-south-1": {},
"ap-southeast-1": {},
"ap-southeast-2": {},
"ca-central-1": {},
"eu-central-1": {},
"eu-north-1": {},
"eu-west-1": {},
"eu-west-2": {},
"eu-west-3": {},
"sa-east-1": {},
"us-east-1": {},
"us-east-2": {},
"us-west-1": {},
"us-west-2": {},
},
"s3": {
"us-east-1": {},
},
}

View File

@ -54,8 +54,9 @@ type partition struct {
func (p partition) Partition() Partition {
return Partition{
id: p.ID,
p: &p,
dnsSuffix: p.DNSSuffix,
id: p.ID,
p: &p,
}
}
@ -74,24 +75,56 @@ func (p partition) canResolveEndpoint(service, region string, strictMatch bool)
return p.RegionRegex.MatchString(region)
}
func allowLegacyEmptyRegion(service string) bool {
legacy := map[string]struct{}{
"budgets": {},
"ce": {},
"chime": {},
"cloudfront": {},
"ec2metadata": {},
"iam": {},
"importexport": {},
"organizations": {},
"route53": {},
"sts": {},
"support": {},
"waf": {},
}
_, allowed := legacy[service]
return allowed
}
func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) {
var opt Options
opt.Set(opts...)
s, hasService := p.Services[service]
if !(hasService || opt.ResolveUnknownService) {
if len(service) == 0 || !(hasService || opt.ResolveUnknownService) {
// Only return error if the resolver will not fallback to creating
// endpoint based on service endpoint ID passed in.
return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
}
if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 {
region = s.PartitionEndpoint
}
if (service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint) ||
(service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint) {
if _, ok := legacyGlobalRegions[service][region]; ok {
region = "aws-global"
}
}
e, hasEndpoint := s.endpointForRegion(region)
if !hasEndpoint && opt.StrictMatching {
if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) {
return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints))
}
defs := []endpoint{p.Defaults, s.Defaults}
return e.resolve(service, region, p.DNSSuffix, defs, opt), nil
return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt), nil
}
func serviceList(ss services) []string {
@ -200,7 +233,7 @@ func getByPriority(s []string, p []string, def string) string {
return s[0]
}
func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
var merged endpoint
for _, def := range defs {
merged.mergeIn(def)
@ -208,20 +241,6 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op
merged.mergeIn(e)
e = merged
hostname := e.Hostname
// Offset the hostname for dualstack if enabled
if opts.UseDualStack && e.HasDualStack == boxedTrue {
hostname = e.DualStackHostname
}
u := strings.Replace(hostname, "{service}", service, 1)
u = strings.Replace(u, "{region}", region, 1)
u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1)
scheme := getEndpointScheme(e.Protocols, opts.DisableSSL)
u = fmt.Sprintf("%s://%s", scheme, u)
signingRegion := e.CredentialScope.Region
if len(signingRegion) == 0 {
signingRegion = region
@ -234,8 +253,23 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op
signingNameDerived = true
}
hostname := e.Hostname
// Offset the hostname for dualstack if enabled
if opts.UseDualStack && e.HasDualStack == boxedTrue {
hostname = e.DualStackHostname
region = signingRegion
}
u := strings.Replace(hostname, "{service}", service, 1)
u = strings.Replace(u, "{region}", region, 1)
u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1)
scheme := getEndpointScheme(e.Protocols, opts.DisableSSL)
u = fmt.Sprintf("%s://%s", scheme, u)
return ResolvedEndpoint{
URL: u,
PartitionID: partitionID,
SigningRegion: signingRegion,
SigningName: signingName,
SigningNameDerived: signingNameDerived,

View File

@ -1,18 +1,17 @@
// +build !appengine,!plan9
package request
import (
"net"
"os"
"syscall"
"strings"
)
func isErrConnectionReset(err error) bool {
if opErr, ok := err.(*net.OpError); ok {
if sysErr, ok := opErr.Err.(*os.SyscallError); ok {
return sysErr.Err == syscall.ECONNRESET
}
if strings.Contains(err.Error(), "read: connection reset") {
return false
}
if strings.Contains(err.Error(), "connection reset") ||
strings.Contains(err.Error(), "broken pipe") {
return true
}
return false

View File

@ -1,11 +0,0 @@
// +build appengine plan9
package request
import (
"strings"
)
func isErrConnectionReset(err error) bool {
return strings.Contains(err.Error(), "connection reset")
}

View File

@ -23,7 +23,7 @@ type Handlers struct {
Complete HandlerList
}
// Copy returns of this handler's lists.
// Copy returns a copy of this handler's lists.
func (h *Handlers) Copy() Handlers {
return Handlers{
Validate: h.Validate.copy(),
@ -42,7 +42,7 @@ func (h *Handlers) Copy() Handlers {
}
}
// Clear removes callback functions for all handlers
// Clear removes callback functions for all handlers.
func (h *Handlers) Clear() {
h.Validate.Clear()
h.Build.Clear()
@ -59,6 +59,51 @@ func (h *Handlers) Clear() {
h.Complete.Clear()
}
// IsEmpty returns if there are no handlers in any of the handlerlists.
func (h *Handlers) IsEmpty() bool {
if h.Validate.Len() != 0 {
return false
}
if h.Build.Len() != 0 {
return false
}
if h.Send.Len() != 0 {
return false
}
if h.Sign.Len() != 0 {
return false
}
if h.Unmarshal.Len() != 0 {
return false
}
if h.UnmarshalStream.Len() != 0 {
return false
}
if h.UnmarshalMeta.Len() != 0 {
return false
}
if h.UnmarshalError.Len() != 0 {
return false
}
if h.ValidateResponse.Len() != 0 {
return false
}
if h.Retry.Len() != 0 {
return false
}
if h.AfterRetry.Len() != 0 {
return false
}
if h.CompleteAttempt.Len() != 0 {
return false
}
if h.Complete.Len() != 0 {
return false
}
return true
}
// A HandlerListRunItem represents an entry in the HandlerList which
// is being run.
type HandlerListRunItem struct {

View File

@ -15,12 +15,15 @@ type offsetReader struct {
closed bool
}
func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) {
reader := &offsetReader{}
buf.Seek(offset, sdkio.SeekStart)
_, err := buf.Seek(offset, sdkio.SeekStart)
if err != nil {
return nil, err
}
reader.buf = buf
return reader
return reader, nil
}
// Close will close the instance of the offset reader's access to
@ -54,7 +57,9 @@ func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
// CloseAndCopy will return a new offsetReader with a copy of the old buffer
// and close the old buffer.
func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
o.Close()
func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) {
if err := o.Close(); err != nil {
return nil, err
}
return newOffsetReader(o.buf, offset)
}

View File

@ -64,6 +64,15 @@ type Request struct {
LastSignedAt time.Time
DisableFollowRedirects bool
// Additional API error codes that should be retried. IsErrorRetryable
// will consider these codes in addition to its built in cases.
RetryErrorCodes []string
// Additional API error codes that should be retried with throttle backoff
// delay. IsErrorThrottle will consider these codes in addition to its
// built in cases.
ThrottleErrorCodes []string
// A value greater than 0 instructs the request to be signed as Presigned URL
// You should not set this field directly. Instead use Request's
// Presign or PresignRequest methods.
@ -90,8 +99,12 @@ type Operation struct {
BeforePresignFn func(r *Request) error
}
// New returns a new Request pointer for the service API
// operation and parameters.
// New returns a new Request pointer for the service API operation and
// parameters.
//
// A Retryer should be provided to direct how the request is retried. If
// Retryer is nil, a default no retry value will be used. You can use
// NoOpRetryer in the Client package to disable retry behavior directly.
//
// Params is any value of input parameters to be the request payload.
// Data is pointer value to an object which the request's response
@ -99,6 +112,10 @@ type Operation struct {
func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
if retryer == nil {
retryer = noOpRetryer{}
}
method := operation.HTTPMethod
if method == "" {
method = "POST"
@ -231,6 +248,10 @@ func (r *Request) WillRetry() bool {
return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
}
func fmtAttemptCount(retryCount, maxRetries int) string {
return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries)
}
// ParamsFilled returns if the request's parameters have been populated
// and the parameters are valid. False is returned if no parameters are
// provided or invalid.
@ -259,7 +280,18 @@ func (r *Request) SetStringBody(s string) {
// SetReaderBody will set the request's body reader.
func (r *Request) SetReaderBody(reader io.ReadSeeker) {
r.Body = reader
r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset.
if aws.IsReaderSeekable(reader) {
var err error
// Get the Bodies current offset so retries will start from the same
// initial position.
r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent)
if err != nil {
r.Error = awserr.New(ErrCodeSerialization,
"failed to determine start of request body", err)
return
}
}
r.ResetBody()
}
@ -330,16 +362,15 @@ func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, err
return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
}
func debugLogReqError(r *Request, stage string, retrying bool, err error) {
const (
notRetrying = "not retrying"
)
func debugLogReqError(r *Request, stage, retryStr string, err error) {
if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
return
}
retryStr := "not retrying"
if retrying {
retryStr = "will retry"
}
r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
}
@ -358,12 +389,12 @@ func (r *Request) Build() error {
if !r.built {
r.Handlers.Validate.Run(r)
if r.Error != nil {
debugLogReqError(r, "Validate Request", false, r.Error)
debugLogReqError(r, "Validate Request", notRetrying, r.Error)
return r.Error
}
r.Handlers.Build.Run(r)
if r.Error != nil {
debugLogReqError(r, "Build Request", false, r.Error)
debugLogReqError(r, "Build Request", notRetrying, r.Error)
return r.Error
}
r.built = true
@ -379,7 +410,7 @@ func (r *Request) Build() error {
func (r *Request) Sign() error {
r.Build()
if r.Error != nil {
debugLogReqError(r, "Build Request", false, r.Error)
debugLogReqError(r, "Build Request", notRetrying, r.Error)
return r.Error
}
@ -387,12 +418,16 @@ func (r *Request) Sign() error {
return r.Error
}
func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) {
if r.safeBody != nil {
r.safeBody.Close()
}
r.safeBody = newOffsetReader(r.Body, r.BodyStart)
r.safeBody, err = newOffsetReader(r.Body, r.BodyStart)
if err != nil {
return nil, awserr.New(ErrCodeSerialization,
"failed to get next request body reader", err)
}
// Go 1.8 tightened and clarified the rules code needs to use when building
// requests with the http package. Go 1.8 removed the automatic detection
@ -409,10 +444,10 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
// Related golang/go#18257
l, err := aws.SeekerLen(r.Body)
if err != nil {
return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
return nil, awserr.New(ErrCodeSerialization,
"failed to compute request body size", err)
}
var body io.ReadCloser
if l == 0 {
body = NoBody
} else if l > 0 {
@ -473,29 +508,28 @@ func (r *Request) Send() error {
r.AttemptTime = time.Now()
if err := r.Sign(); err != nil {
debugLogReqError(r, "Sign Request", false, err)
debugLogReqError(r, "Sign Request", notRetrying, err)
return err
}
if err := r.sendRequest(); err == nil {
return nil
} else if !shouldRetryCancel(r.Error) {
}
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)
if r.Error != nil || !aws.BoolValue(r.Retryable) {
return r.Error
}
if err := r.prepareRetry(); err != nil {
r.Error = err
return err
} else {
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)
if r.Error != nil || !aws.BoolValue(r.Retryable) {
return r.Error
}
r.prepareRetry()
continue
}
}
}
func (r *Request) prepareRetry() {
func (r *Request) prepareRetry() error {
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
@ -506,12 +540,19 @@ func (r *Request) prepareRetry() {
// the request's body even though the Client's Do returned.
r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
r.ResetBody()
if err := r.Error; err != nil {
return awserr.New(ErrCodeSerialization,
"failed to prepare body for retry", err)
}
// Closing response body to ensure that no response body is leaked
// between retry attempts.
if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
r.HTTPResponse.Body.Close()
}
return nil
}
func (r *Request) sendRequest() (sendErr error) {
@ -520,7 +561,9 @@ func (r *Request) sendRequest() (sendErr error) {
r.Retryable = nil
r.Handlers.Send.Run(r)
if r.Error != nil {
debugLogReqError(r, "Send Request", r.WillRetry(), r.Error)
debugLogReqError(r, "Send Request",
fmtAttemptCount(r.RetryCount, r.MaxRetries()),
r.Error)
return r.Error
}
@ -528,13 +571,17 @@ func (r *Request) sendRequest() (sendErr error) {
r.Handlers.ValidateResponse.Run(r)
if r.Error != nil {
r.Handlers.UnmarshalError.Run(r)
debugLogReqError(r, "Validate Response", r.WillRetry(), r.Error)
debugLogReqError(r, "Validate Response",
fmtAttemptCount(r.RetryCount, r.MaxRetries()),
r.Error)
return r.Error
}
r.Handlers.Unmarshal.Run(r)
if r.Error != nil {
debugLogReqError(r, "Unmarshal Response", r.WillRetry(), r.Error)
debugLogReqError(r, "Unmarshal Response",
fmtAttemptCount(r.RetryCount, r.MaxRetries()),
r.Error)
return r.Error
}
@ -561,48 +608,6 @@ func AddToUserAgent(r *Request, s string) {
r.HTTPRequest.Header.Set("User-Agent", s)
}
type temporary interface {
Temporary() bool
}
func shouldRetryCancel(err error) bool {
switch err := err.(type) {
case awserr.Error:
if err.Code() == CanceledErrorCode {
return false
}
return shouldRetryCancel(err.OrigErr())
case *url.Error:
if strings.Contains(err.Error(), "connection refused") {
// Refused connections should be retried as the service may not yet
// be running on the port. Go TCP dial considers refused
// connections as not temporary.
return true
}
// *url.Error only implements Temporary after golang 1.6 but since
// url.Error only wraps the error:
return shouldRetryCancel(err.Err)
case temporary:
// If the error is temporary, we want to allow continuation of the
// retry process
return err.Temporary()
case nil:
// `awserr.Error.OrigErr()` can be nil, meaning there was an error but
// because we don't know the cause, it is marked as retryable. See
// TestRequest4xxUnretryable for an example.
return true
default:
switch err.Error() {
case "net/http: request canceled",
"net/http: request canceled while waiting for connection":
// known 1.5 error case when an http request is cancelled
return false
}
// here we don't know the error; so we allow a retry.
return true
}
}
// SanitizeHostForHeader removes default port from host and updates request.Host
func SanitizeHostForHeader(r *http.Request) {
host := getHost(r)

View File

@ -4,6 +4,8 @@ package request
import (
"net/http"
"github.com/aws/aws-sdk-go/aws/awserr"
)
// NoBody is a http.NoBody reader instructing Go HTTP client to not include
@ -24,7 +26,8 @@ var NoBody = http.NoBody
func (r *Request) ResetBody() {
body, err := r.getNextRequestBody()
if err != nil {
r.Error = err
r.Error = awserr.New(ErrCodeSerialization,
"failed to reset request body", err)
return
}

View File

@ -17,11 +17,13 @@ import (
// does the pagination between API operations, and Paginator defines the
// configuration that will be used per page request.
//
// cont := true
// for p.Next() && cont {
// for p.Next() {
// data := p.Page().(*s3.ListObjectsOutput)
// // process the page's data
// // ...
// // break out of loop to stop fetching additional pages
// }
//
// return p.Err()
//
// See service client API operation Pages methods for examples how the SDK will
@ -146,7 +148,7 @@ func (r *Request) nextPageTokens() []interface{} {
return nil
}
case bool:
if v == false {
if !v {
return nil
}
}

View File

@ -1,26 +1,75 @@
package request
import (
"net"
"net/url"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
)
// Retryer is an interface to control retry logic for a given service.
// The default implementation used by most services is the client.DefaultRetryer
// structure, which contains basic retry logic using exponential backoff.
// Retryer provides the interface drive the SDK's request retry behavior. The
// Retryer implementation is responsible for implementing exponential backoff,
// and determine if a request API error should be retried.
//
// client.DefaultRetryer is the SDK's default implementation of the Retryer. It
// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle
// methods to determine if the request is retried.
type Retryer interface {
// RetryRules return the retry delay that should be used by the SDK before
// making another request attempt for the failed request.
RetryRules(*Request) time.Duration
// ShouldRetry returns if the failed request is retryable.
//
// Implementations may consider request attempt count when determining if a
// request is retryable, but the SDK will use MaxRetries to limit the
// number of attempts a request are made.
ShouldRetry(*Request) bool
// MaxRetries is the number of times a request may be retried before
// failing.
MaxRetries() int
}
// WithRetryer sets a config Retryer value to the given Config returning it
// for chaining.
// WithRetryer sets a Retryer value to the given Config returning the Config
// value for chaining. The value must not be nil.
func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
if retryer == nil {
if cfg.Logger != nil {
cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.")
}
retryer = noOpRetryer{}
}
cfg.Retryer = retryer
return cfg
}
// noOpRetryer is a internal no op retryer used when a request is created
// without a retryer.
//
// Provides a retryer that performs no retries.
// It should be used when we do not want retries to be performed.
type noOpRetryer struct{}
// MaxRetries returns the number of maximum returns the service will use to make
// an individual API; For NoOpRetryer the MaxRetries will always be zero.
func (d noOpRetryer) MaxRetries() int {
return 0
}
// ShouldRetry will always return false for NoOpRetryer, as it should never retry.
func (d noOpRetryer) ShouldRetry(_ *Request) bool {
return false
}
// RetryRules returns the delay duration before retrying this request again;
// since NoOpRetryer does not retry, RetryRules always returns 0.
func (d noOpRetryer) RetryRules(_ *Request) time.Duration {
return 0
}
// retryableCodes is a collection of service response codes which are retry-able
@ -76,10 +125,6 @@ var validParentCodes = map[string]struct{}{
ErrCodeRead: {},
}
type temporaryError interface {
Temporary() bool
}
func isNestedErrorRetryable(parentErr awserr.Error) bool {
if parentErr == nil {
return false
@ -98,7 +143,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool {
return isCodeRetryable(aerr.Code())
}
if t, ok := err.(temporaryError); ok {
if t, ok := err.(temporary); ok {
return t.Temporary() || isErrConnectionReset(err)
}
@ -108,32 +153,90 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool {
// IsErrorRetryable returns whether the error is retryable, based on its Code.
// Returns false if error is nil.
func IsErrorRetryable(err error) bool {
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr)
}
if err == nil {
return false
}
return shouldRetryError(err)
}
type temporary interface {
Temporary() bool
}
func shouldRetryError(origErr error) bool {
switch err := origErr.(type) {
case awserr.Error:
if err.Code() == CanceledErrorCode {
return false
}
if isNestedErrorRetryable(err) {
return true
}
origErr := err.OrigErr()
var shouldRetry bool
if origErr != nil {
shouldRetry := shouldRetryError(origErr)
if err.Code() == "RequestError" && !shouldRetry {
return false
}
}
if isCodeRetryable(err.Code()) {
return true
}
return shouldRetry
case *url.Error:
if strings.Contains(err.Error(), "connection refused") {
// Refused connections should be retried as the service may not yet
// be running on the port. Go TCP dial considers refused
// connections as not temporary.
return true
}
// *url.Error only implements Temporary after golang 1.6 but since
// url.Error only wraps the error:
return shouldRetryError(err.Err)
case temporary:
if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" {
return true
}
// If the error is temporary, we want to allow continuation of the
// retry process
return err.Temporary() || isErrConnectionReset(origErr)
case nil:
// `awserr.Error.OrigErr()` can be nil, meaning there was an error but
// because we don't know the cause, it is marked as retryable. See
// TestRequest4xxUnretryable for an example.
return true
default:
switch err.Error() {
case "net/http: request canceled",
"net/http: request canceled while waiting for connection":
// known 1.5 error case when an http request is cancelled
return false
}
// here we don't know the error; so we allow a retry.
return true
}
return false
}
// IsErrorThrottle returns whether the error is to be throttled based on its code.
// Returns false if error is nil.
func IsErrorThrottle(err error) bool {
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
return isCodeThrottle(aerr.Code())
}
if aerr, ok := err.(awserr.Error); ok && aerr != nil {
return isCodeThrottle(aerr.Code())
}
return false
}
// IsErrorExpiredCreds returns whether the error code is a credential expiry error.
// Returns false if error is nil.
// IsErrorExpiredCreds returns whether the error code is a credential expiry
// error. Returns false if error is nil.
func IsErrorExpiredCreds(err error) bool {
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
return isCodeExpiredCreds(aerr.Code())
}
if aerr, ok := err.(awserr.Error); ok && aerr != nil {
return isCodeExpiredCreds(aerr.Code())
}
return false
}
@ -143,17 +246,58 @@ func IsErrorExpiredCreds(err error) bool {
//
// Alias for the utility function IsErrorRetryable
func (r *Request) IsErrorRetryable() bool {
if isErrCode(r.Error, r.RetryErrorCodes) {
return true
}
// HTTP response status code 501 should not be retried.
// 501 represents Not Implemented which means the request method is not
// supported by the server and cannot be handled.
if r.HTTPResponse != nil {
// HTTP response status code 500 represents internal server error and
// should be retried without any throttle.
if r.HTTPResponse.StatusCode == 500 {
return true
}
}
return IsErrorRetryable(r.Error)
}
// IsErrorThrottle returns whether the error is to be throttled based on its code.
// Returns false if the request has no Error set
// IsErrorThrottle returns whether the error is to be throttled based on its
// code. Returns false if the request has no Error set.
//
// Alias for the utility function IsErrorThrottle
func (r *Request) IsErrorThrottle() bool {
if isErrCode(r.Error, r.ThrottleErrorCodes) {
return true
}
if r.HTTPResponse != nil {
switch r.HTTPResponse.StatusCode {
case
429, // error caused due to too many requests
502, // Bad Gateway error should be throttled
503, // caused when service is unavailable
504: // error occurred due to gateway timeout
return true
}
}
return IsErrorThrottle(r.Error)
}
func isErrCode(err error, codes []string) bool {
if aerr, ok := err.(awserr.Error); ok && aerr != nil {
for _, code := range codes {
if code == aerr.Code() {
return true
}
}
}
return false
}
// IsErrorExpired returns whether the error code is a credential expiry error.
// Returns false if the request has no Error set.
//

View File

@ -0,0 +1,259 @@
package session
import (
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/processcreds"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
func resolveCredentials(cfg *aws.Config,
envCfg envConfig, sharedCfg sharedConfig,
handlers request.Handlers,
sessOpts Options,
) (*credentials.Credentials, error) {
switch {
case len(sessOpts.Profile) != 0:
// User explicitly provided an Profile in the session's configuration
// so load that profile from shared config first.
// Github(aws/aws-sdk-go#2727)
return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts)
case envCfg.Creds.HasKeys():
// Environment credentials
return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil
case len(envCfg.WebIdentityTokenFilePath) != 0:
// Web identity token from environment, RoleARN required to also be
// set.
return assumeWebIdentity(cfg, handlers,
envCfg.WebIdentityTokenFilePath,
envCfg.RoleARN,
envCfg.RoleSessionName,
)
default:
// Fallback to the "default" credential resolution chain.
return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts)
}
}
// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but
// 'AWS_ROLE_ARN' was not set.
var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil)
// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but
// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set.
var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil)
func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers,
filepath string,
roleARN, sessionName string,
) (*credentials.Credentials, error) {
if len(filepath) == 0 {
return nil, WebIdentityEmptyTokenFilePathErr
}
if len(roleARN) == 0 {
return nil, WebIdentityEmptyRoleARNErr
}
creds := stscreds.NewWebIdentityCredentials(
&Session{
Config: cfg,
Handlers: handlers.Copy(),
},
roleARN,
sessionName,
filepath,
)
return creds, nil
}
func resolveCredsFromProfile(cfg *aws.Config,
envCfg envConfig, sharedCfg sharedConfig,
handlers request.Handlers,
sessOpts Options,
) (creds *credentials.Credentials, err error) {
switch {
case sharedCfg.SourceProfile != nil:
// Assume IAM role with credentials source from a different profile.
creds, err = resolveCredsFromProfile(cfg, envCfg,
*sharedCfg.SourceProfile, handlers, sessOpts,
)
case sharedCfg.Creds.HasKeys():
// Static Credentials from Shared Config/Credentials file.
creds = credentials.NewStaticCredentialsFromCreds(
sharedCfg.Creds,
)
case len(sharedCfg.CredentialProcess) != 0:
// Get credentials from CredentialProcess
creds = processcreds.NewCredentials(sharedCfg.CredentialProcess)
case len(sharedCfg.CredentialSource) != 0:
creds, err = resolveCredsFromSource(cfg, envCfg,
sharedCfg, handlers, sessOpts,
)
case len(sharedCfg.WebIdentityTokenFile) != 0:
// Credentials from Assume Web Identity token require an IAM Role, and
// that roll will be assumed. May be wrapped with another assume role
// via SourceProfile.
return assumeWebIdentity(cfg, handlers,
sharedCfg.WebIdentityTokenFile,
sharedCfg.RoleARN,
sharedCfg.RoleSessionName,
)
default:
// Fallback to default credentials provider, include mock errors for
// the credential chain so user can identify why credentials failed to
// be retrieved.
creds = credentials.NewCredentials(&credentials.ChainProvider{
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
Providers: []credentials.Provider{
&credProviderError{
Err: awserr.New("EnvAccessKeyNotFound",
"failed to find credentials in the environment.", nil),
},
&credProviderError{
Err: awserr.New("SharedCredsLoad",
fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil),
},
defaults.RemoteCredProvider(*cfg, handlers),
},
})
}
if err != nil {
return nil, err
}
if len(sharedCfg.RoleARN) > 0 {
cfgCp := *cfg
cfgCp.Credentials = creds
return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts)
}
return creds, nil
}
// valid credential source values
const (
credSourceEc2Metadata = "Ec2InstanceMetadata"
credSourceEnvironment = "Environment"
credSourceECSContainer = "EcsContainer"
)
func resolveCredsFromSource(cfg *aws.Config,
envCfg envConfig, sharedCfg sharedConfig,
handlers request.Handlers,
sessOpts Options,
) (creds *credentials.Credentials, err error) {
switch sharedCfg.CredentialSource {
case credSourceEc2Metadata:
p := defaults.RemoteCredProvider(*cfg, handlers)
creds = credentials.NewCredentials(p)
case credSourceEnvironment:
creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds)
case credSourceECSContainer:
if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 {
return nil, ErrSharedConfigECSContainerEnvVarEmpty
}
p := defaults.RemoteCredProvider(*cfg, handlers)
creds = credentials.NewCredentials(p)
default:
return nil, ErrSharedConfigInvalidCredSource
}
return creds, nil
}
func credsFromAssumeRole(cfg aws.Config,
handlers request.Handlers,
sharedCfg sharedConfig,
sessOpts Options,
) (*credentials.Credentials, error) {
if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil {
// AssumeRole Token provider is required if doing Assume Role
// with MFA.
return nil, AssumeRoleTokenProviderNotSetError{}
}
return stscreds.NewCredentials(
&Session{
Config: &cfg,
Handlers: handlers.Copy(),
},
sharedCfg.RoleARN,
func(opt *stscreds.AssumeRoleProvider) {
opt.RoleSessionName = sharedCfg.RoleSessionName
opt.Duration = sessOpts.AssumeRoleDuration
// Assume role with external ID
if len(sharedCfg.ExternalID) > 0 {
opt.ExternalID = aws.String(sharedCfg.ExternalID)
}
// Assume role with MFA
if len(sharedCfg.MFASerial) > 0 {
opt.SerialNumber = aws.String(sharedCfg.MFASerial)
opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
}
},
), nil
}
// AssumeRoleTokenProviderNotSetError is an error returned when creating a
// session when the MFAToken option is not set when shared config is configured
// load assume a role with an MFA token.
type AssumeRoleTokenProviderNotSetError struct{}
// Code is the short id of the error.
func (e AssumeRoleTokenProviderNotSetError) Code() string {
return "AssumeRoleTokenProviderNotSetError"
}
// Message is the description of the error
func (e AssumeRoleTokenProviderNotSetError) Message() string {
return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
}
// OrigErr is the underlying error that caused the failure.
func (e AssumeRoleTokenProviderNotSetError) OrigErr() error {
return nil
}
// Error satisfies the error interface.
func (e AssumeRoleTokenProviderNotSetError) Error() string {
return awserr.SprintError(e.Code(), e.Message(), "", nil)
}
type credProviderError struct {
Err error
}
func (c credProviderError) Retrieve() (credentials.Value, error) {
return credentials.Value{}, c.Err
}
func (c credProviderError) IsExpired() bool {
return true
}

View File

@ -1,97 +1,93 @@
/*
Package session provides configuration for the SDK's service clients.
Sessions can be shared across all service clients that share the same base
configuration. The Session is built from the SDK's default configuration and
request handlers.
Sessions should be cached when possible, because creating a new Session will
load all configuration values from the environment, and config files each time
the Session is created. Sharing the Session value across all of your service
clients will ensure the configuration is loaded the fewest number of times possible.
Concurrency
Package session provides configuration for the SDK's service clients. Sessions
can be shared across service clients that share the same base configuration.
Sessions are safe to use concurrently as long as the Session is not being
modified. The SDK will not modify the Session once the Session has been created.
Creating service clients concurrently from a shared Session is safe.
modified. Sessions should be cached when possible, because creating a new
Session will load all configuration values from the environment, and config
files each time the Session is created. Sharing the Session value across all of
your service clients will ensure the configuration is loaded the fewest number
of times possible.
Sessions from Shared Config
Sessions can be created using the method above that will only load the
additional config if the AWS_SDK_LOAD_CONFIG environment variable is set.
Alternatively you can explicitly create a Session with shared config enabled.
To do this you can use NewSessionWithOptions to configure how the Session will
be created. Using the NewSessionWithOptions with SharedConfigState set to
SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG
environment variable was set.
Creating Sessions
When creating Sessions optional aws.Config values can be passed in that will
override the default, or loaded config values the Session is being created
with. This allows you to provide additional, or case based, configuration
as needed.
Sessions options from Shared Config
By default NewSession will only load credentials from the shared credentials
file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is
set to a truthy value the Session will be created from the configuration
values from the shared config (~/.aws/config) and shared credentials
(~/.aws/credentials) files. See the section Sessions from Shared Config for
more information.
(~/.aws/credentials) files. Using the NewSessionWithOptions with
SharedConfigState set to SharedConfigEnable will create the session as if the
AWS_SDK_LOAD_CONFIG environment variable was set.
Create a Session with the default config and request handlers. With credentials
region, and profile loaded from the environment and shared config automatically.
Requires the AWS_PROFILE to be set, or "default" is used.
Credential and config loading order
The Session will attempt to load configuration and credentials from the
environment, configuration files, and other credential sources. The order
configuration is loaded in is:
* Environment Variables
* Shared Credentials file
* Shared Configuration file (if SharedConfig is enabled)
* EC2 Instance Metadata (credentials only)
The Environment variables for credentials will have precedence over shared
config even if SharedConfig is enabled. To override this behavior, and use
shared config credentials instead specify the session.Options.Profile, (e.g.
when using credential_source=Environment to assume a role).
sess, err := session.NewSessionWithOptions(session.Options{
Profile: "myProfile",
})
Creating Sessions
Creating a Session without additional options will load credentials region, and
profile loaded from the environment and shared config automatically. See,
"Environment Variables" section for information on environment variables used
by Session.
// Create Session
sess := session.Must(session.NewSession())
sess, err := session.NewSession()
When creating Sessions optional aws.Config values can be passed in that will
override the default, or loaded, config values the Session is being created
with. This allows you to provide additional, or case based, configuration
as needed.
// Create a Session with a custom region
sess := session.Must(session.NewSession(&aws.Config{
Region: aws.String("us-east-1"),
}))
sess, err := session.NewSession(&aws.Config{
Region: aws.String("us-west-2"),
})
// Create a S3 client instance from a session
sess := session.Must(session.NewSession())
svc := s3.New(sess)
Create Session With Option Overrides
In addition to NewSession, Sessions can be created using NewSessionWithOptions.
This func allows you to control and override how the Session will be created
through code instead of being driven by environment variables only.
Use NewSessionWithOptions when you want to provide the config profile, or
override the shared config state (AWS_SDK_LOAD_CONFIG).
Use NewSessionWithOptions to provide additional configuration driving how the
Session's configuration will be loaded. Such as, specifying shared config
profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG).
// Equivalent to session.NewSession()
sess := session.Must(session.NewSessionWithOptions(session.Options{
sess, err := session.NewSessionWithOptions(session.Options{
// Options
}))
})
// Specify profile to load for the session's config
sess := session.Must(session.NewSessionWithOptions(session.Options{
Profile: "profile_name",
}))
sess, err := session.NewSessionWithOptions(session.Options{
// Specify profile to load for the session's config
Profile: "profile_name",
// Specify profile for config and region for requests
sess := session.Must(session.NewSessionWithOptions(session.Options{
Config: aws.Config{Region: aws.String("us-east-1")},
Profile: "profile_name",
}))
// Provide SDK Config options, such as Region.
Config: aws.Config{
Region: aws.String("us-west-2"),
},
// Force enable Shared Config support
sess := session.Must(session.NewSessionWithOptions(session.Options{
// Force enable Shared Config support
SharedConfigState: session.SharedConfigEnable,
}))
})
Adding Handlers
You can add handlers to a session for processing HTTP requests. All service
clients that use the session inherit the handlers. For example, the following
handler logs every request and its payload made by a service client:
You can add handlers to a session to decorate API operation, (e.g. adding HTTP
headers). All clients that use the Session receive a copy of the Session's
handlers. For example, the following request handler added to the Session logs
every requests made.
// Create a session, and add additional handlers for all service
// clients created with the Session to inherit. Adds logging handler.
@ -99,22 +95,15 @@ handler logs every request and its payload made by a service client:
sess.Handlers.Send.PushFront(func(r *request.Request) {
// Log every request made and its payload
logger.Printf("Request: %s/%s, Payload: %s",
logger.Printf("Request: %s/%s, Params: %s",
r.ClientInfo.ServiceName, r.Operation, r.Params)
})
Deprecated "New" function
The New session function has been deprecated because it does not provide good
way to return errors that occur when loading the configuration files and values.
Because of this, NewSession was created so errors can be retrieved when
creating a session fails.
Shared Config Fields
By default the SDK will only load the shared credentials file's (~/.aws/credentials)
credentials values, and all other config is provided by the environment variables,
SDK defaults, and user provided aws.Config values.
By default the SDK will only load the shared credentials file's
(~/.aws/credentials) credentials values, and all other config is provided by
the environment variables, SDK defaults, and user provided aws.Config values.
If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable
option is used to create the Session the full shared config values will be
@ -125,24 +114,31 @@ files have the same format.
If both config files are present the configuration from both files will be
read. The Session will be created from configuration values from the shared
credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config).
credentials file (~/.aws/credentials) over those in the shared config file
(~/.aws/config).
Credentials are the values the SDK should use for authenticating requests with
AWS Services. They are from a configuration file will need to include both
aws_access_key_id and aws_secret_access_key must be provided together in the
same file to be considered valid. The values will be ignored if not a complete
group. aws_session_token is an optional field that can be provided if both of
the other two fields are also provided.
Credentials are the values the SDK uses to authenticating requests with AWS
Services. When specified in a file, both aws_access_key_id and
aws_secret_access_key must be provided together in the same file to be
considered valid. They will be ignored if both are not present.
aws_session_token is an optional field that can be provided in addition to the
other two fields.
aws_access_key_id = AKID
aws_secret_access_key = SECRET
aws_session_token = TOKEN
Assume Role values allow you to configure the SDK to assume an IAM role using
a set of credentials provided in a config file via the source_profile field.
Both "role_arn" and "source_profile" are required. The SDK supports assuming
a role with MFA token if the session option AssumeRoleTokenProvider
is set.
; region only supported if SharedConfigEnabled.
region = us-east-1
Assume Role configuration
The role_arn field allows you to configure the SDK to assume an IAM role using
a set of credentials from another source. Such as when paired with static
credentials, "profile_source", "credential_process", or "credential_source"
fields. If "role_arn" is provided, a source of credentials must also be
specified, such as "source_profile", "credential_source", or
"credential_process".
role_arn = arn:aws:iam::<account_number>:role/<role_name>
source_profile = profile_with_creds
@ -150,40 +146,16 @@ is set.
mfa_serial = <serial or mfa arn>
role_session_name = session_name
Region is the region the SDK should use for looking up AWS service endpoints
and signing requests.
region = us-east-1
Assume Role with MFA token
To create a session with support for assuming an IAM role with MFA set the
session option AssumeRoleTokenProvider to a function that will prompt for the
MFA token code when the SDK assumes the role and refreshes the role's credentials.
This allows you to configure the SDK via the shared config to assumea role
with MFA tokens.
In order for the SDK to assume a role with MFA the SharedConfigState
session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG
environment variable set.
The shared configuration instructs the SDK to assume an IAM role with MFA
when the mfa_serial configuration field is set in the shared config
(~/.aws/config) or shared credentials (~/.aws/credentials) file.
If mfa_serial is set in the configuration, the SDK will assume the role, and
the AssumeRoleTokenProvider session option is not set an an error will
be returned when creating the session.
The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you
must also set the Session Option.AssumeRoleTokenProvider. The Session will fail
to load if the AssumeRoleTokenProvider is not specified.
sess := session.Must(session.NewSessionWithOptions(session.Options{
AssumeRoleTokenProvider: stscreds.StdinTokenProvider,
}))
// Create service client value configured for credentials
// from assumed role.
svc := s3.New(sess)
To setup assume role outside of a session see the stscreds.AssumeRoleProvider
To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider
documentation.
Environment Variables

View File

@ -1,12 +1,14 @@
package session
import (
"fmt"
"os"
"strconv"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/endpoints"
)
// EnvProviderName provides a name of the provider when config is loaded from environment.
@ -99,21 +101,55 @@ type envConfig struct {
CustomCABundle string
csmEnabled string
CSMEnabled bool
CSMEnabled *bool
CSMPort string
CSMHost string
CSMClientID string
enableEndpointDiscovery string
// Enables endpoint discovery via environment variables.
//
// AWS_ENABLE_ENDPOINT_DISCOVERY=true
EnableEndpointDiscovery *bool
enableEndpointDiscovery string
// Specifies the WebIdentity token the SDK should use to assume a role
// with.
//
// AWS_WEB_IDENTITY_TOKEN_FILE=file_path
WebIdentityTokenFilePath string
// Specifies the IAM role arn to use when assuming an role.
//
// AWS_ROLE_ARN=role_arn
RoleARN string
// Specifies the IAM role session name to use when assuming a role.
//
// AWS_ROLE_SESSION_NAME=session_name
RoleSessionName string
// Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint
// for a service.
//
// AWS_STS_REGIONAL_ENDPOINTS=regional
// This can take value as `regional` or `legacy`
STSRegionalEndpoint endpoints.STSRegionalEndpoint
// Specifies the S3 Regional Endpoint flag for the SDK to resolve the
// endpoint for a service.
//
// AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional
// This can take value as `regional` or `legacy`
S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint
}
var (
csmEnabledEnvKey = []string{
"AWS_CSM_ENABLED",
}
csmHostEnvKey = []string{
"AWS_CSM_HOST",
}
csmPortEnvKey = []string{
"AWS_CSM_PORT",
}
@ -150,6 +186,21 @@ var (
sharedConfigFileEnvKey = []string{
"AWS_CONFIG_FILE",
}
webIdentityTokenFilePathEnvKey = []string{
"AWS_WEB_IDENTITY_TOKEN_FILE",
}
roleARNEnvKey = []string{
"AWS_ROLE_ARN",
}
roleSessionNameEnvKey = []string{
"AWS_ROLE_SESSION_NAME",
}
stsRegionalEndpointKey = []string{
"AWS_STS_REGIONAL_ENDPOINTS",
}
s3UsEast1RegionalEndpoint = []string{
"AWS_S3_US_EAST_1_REGIONAL_ENDPOINT",
}
)
// loadEnvConfig retrieves the SDK's environment configuration.
@ -158,7 +209,7 @@ var (
// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
// the shared SDK config will be loaded in addition to the SDK's specific
// configuration values.
func loadEnvConfig() envConfig {
func loadEnvConfig() (envConfig, error) {
enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
return envConfigLoad(enableSharedConfig)
}
@ -169,30 +220,42 @@ func loadEnvConfig() envConfig {
// Loads the shared configuration in addition to the SDK's specific configuration.
// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
// environment variable is set.
func loadSharedEnvConfig() envConfig {
func loadSharedEnvConfig() (envConfig, error) {
return envConfigLoad(true)
}
func envConfigLoad(enableSharedConfig bool) envConfig {
func envConfigLoad(enableSharedConfig bool) (envConfig, error) {
cfg := envConfig{}
cfg.EnableSharedConfig = enableSharedConfig
setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey)
setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey)
setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey)
// Static environment credentials
var creds credentials.Value
setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey)
setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey)
setFromEnvVal(&creds.SessionToken, credSessionEnvKey)
if creds.HasKeys() {
// Require logical grouping of credentials
creds.ProviderName = EnvProviderName
cfg.Creds = creds
}
// Role Metadata
setFromEnvVal(&cfg.RoleARN, roleARNEnvKey)
setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey)
// Web identity environment variables
setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey)
// CSM environment variables
setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey)
setFromEnvVal(&cfg.CSMHost, csmHostEnvKey)
setFromEnvVal(&cfg.CSMPort, csmPortEnvKey)
setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey)
cfg.CSMEnabled = len(cfg.csmEnabled) > 0
// Require logical grouping of credentials
if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
cfg.Creds = credentials.Value{}
} else {
cfg.Creds.ProviderName = EnvProviderName
if len(cfg.csmEnabled) != 0 {
v, _ := strconv.ParseBool(cfg.csmEnabled)
cfg.CSMEnabled = &v
}
regionKeys := regionEnvKeys
@ -223,12 +286,33 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
return cfg
var err error
// STS Regional Endpoint variable
for _, k := range stsRegionalEndpointKey {
if v := os.Getenv(k); len(v) != 0 {
cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v)
if err != nil {
return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err)
}
}
}
// S3 Regional Endpoint variable
for _, k := range s3UsEast1RegionalEndpoint {
if v := os.Getenv(k); len(v) != 0 {
cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v)
if err != nil {
return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err)
}
}
}
return cfg, nil
}
func setFromEnvVal(dst *string, keys []string) {
for _, k := range keys {
if v := os.Getenv(k); len(v) > 0 {
if v := os.Getenv(k); len(v) != 0 {
*dst = v
break
}

View File

@ -8,19 +8,17 @@ import (
"io/ioutil"
"net/http"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/processcreds"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/csm"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
const (
@ -75,7 +73,7 @@ type Session struct {
// func is called instead of waiting to receive an error until a request is made.
func New(cfgs ...*aws.Config) *Session {
// load initial config from environment
envCfg := loadEnvConfig()
envCfg, envErr := loadEnvConfig()
if envCfg.EnableSharedConfig {
var cfg aws.Config
@ -95,19 +93,28 @@ func New(cfgs ...*aws.Config) *Session {
// Session creation failed, need to report the error and prevent
// any requests from succeeding.
s = &Session{Config: defaults.Config()}
s.Config.MergeIn(cfgs...)
s.Config.Logger.Log("ERROR:", msg, "Error:", err)
s.Handlers.Validate.PushBack(func(r *request.Request) {
r.Error = err
})
s.logDeprecatedNewSessionError(msg, err, cfgs)
}
return s
}
s := deprecatedNewSession(cfgs...)
if envCfg.CSMEnabled {
enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
if envErr != nil {
msg := "failed to load env config"
s.logDeprecatedNewSessionError(msg, envErr, cfgs)
}
if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil {
if l := s.Config.Logger; l != nil {
l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err))
}
} else if csmCfg.Enabled {
err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger)
if err != nil {
msg := "failed to enable CSM"
s.logDeprecatedNewSessionError(msg, err, cfgs)
}
}
return s
@ -126,7 +133,7 @@ func New(cfgs ...*aws.Config) *Session {
// to be built with retrieving credentials with AssumeRole set in the config.
//
// See the NewSessionWithOptions func for information on how to override or
// control through code how the Session will be created. Such as specifying the
// control through code how the Session will be created, such as specifying the
// config profile, and controlling if shared config is enabled or not.
func NewSession(cfgs ...*aws.Config) (*Session, error) {
opts := Options{}
@ -210,6 +217,12 @@ type Options struct {
// the config enables assume role wit MFA via the mfa_serial field.
AssumeRoleTokenProvider func() (string, error)
// When the SDK's shared config is configured to assume a role this option
// may be provided to set the expiry duration of the STS credentials.
// Defaults to 15 minutes if not set as documented in the
// stscreds.AssumeRoleProvider.
AssumeRoleDuration time.Duration
// Reader for a custom Credentials Authority (CA) bundle in PEM format that
// the SDK will use instead of the default system's root CA bundle. Use this
// only if you want to replace the CA bundle the SDK uses for TLS requests.
@ -224,6 +237,12 @@ type Options struct {
// to also enable this feature. CustomCABundle session option field has priority
// over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
CustomCABundle io.Reader
// The handlers that the session and all API clients will be created with.
// This must be a complete set of handlers. Use the defaults.Handlers()
// function to initialize this value before changing the handlers to be
// used by the SDK.
Handlers request.Handlers
}
// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
@ -257,13 +276,20 @@ type Options struct {
// }))
func NewSessionWithOptions(opts Options) (*Session, error) {
var envCfg envConfig
var err error
if opts.SharedConfigState == SharedConfigEnable {
envCfg = loadSharedEnvConfig()
envCfg, err = loadSharedEnvConfig()
if err != nil {
return nil, fmt.Errorf("failed to load shared config, %v", err)
}
} else {
envCfg = loadEnvConfig()
envCfg, err = loadEnvConfig()
if err != nil {
return nil, fmt.Errorf("failed to load environment config, %v", err)
}
}
if len(opts.Profile) > 0 {
if len(opts.Profile) != 0 {
envCfg.Profile = opts.Profile
}
@ -329,27 +355,33 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session {
return s
}
func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) {
logger.Log("Enabling CSM")
if len(port) == 0 {
port = csm.DefaultPort
func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error {
if logger != nil {
logger.Log("Enabling CSM")
}
r, err := csm.Start(clientID, "127.0.0.1:"+port)
r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port))
if err != nil {
return
return err
}
r.InjectHandlers(handlers)
return nil
}
func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
cfg := defaults.Config()
handlers := defaults.Handlers()
handlers := opts.Handlers
if handlers.IsEmpty() {
handlers = defaults.Handlers()
}
// Get a merged version of the user provided config to determine if
// credentials were.
userCfg := &aws.Config{}
userCfg.MergeIn(cfgs...)
cfg.MergeIn(userCfg)
// Ordered config files will be loaded in with later files overwriting
// previous config file values.
@ -366,9 +398,17 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
}
// Load additional config from file(s)
sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles)
sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig)
if err != nil {
return nil, err
if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) {
// Special case where the user has not explicitly specified an AWS_PROFILE,
// or session.Options.profile, shared config is not enabled, and the
// environment has credentials, allow the shared config file to fail to
// load since the user has already provided credentials, and nothing else
// is required to be read file. Github(aws/aws-sdk-go#2455)
} else if _, ok := err.(SharedConfigProfileNotExistsError); !ok {
return nil, err
}
}
if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil {
@ -381,8 +421,16 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
}
initHandlers(s)
if envCfg.CSMEnabled {
enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil {
if l := s.Config.Logger; l != nil {
l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err))
}
} else if csmCfg.Enabled {
err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger)
if err != nil {
return nil, err
}
}
// Setup HTTP client with custom cert bundle if enabled
@ -395,6 +443,46 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
return s, nil
}
type csmConfig struct {
Enabled bool
Host string
Port string
ClientID string
}
var csmProfileName = "aws_csm"
func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) {
if envCfg.CSMEnabled != nil {
if *envCfg.CSMEnabled {
return csmConfig{
Enabled: true,
ClientID: envCfg.CSMClientID,
Host: envCfg.CSMHost,
Port: envCfg.CSMPort,
}, nil
}
return csmConfig{}, nil
}
sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false)
if err != nil {
if _, ok := err.(SharedConfigProfileNotExistsError); !ok {
return csmConfig{}, err
}
}
if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true {
return csmConfig{
Enabled: true,
ClientID: sharedCfg.CSMClientID,
Host: sharedCfg.CSMHost,
Port: sharedCfg.CSMPort,
}, nil
}
return csmConfig{}, nil
}
func loadCustomCABundle(s *Session, bundle io.Reader) error {
var t *http.Transport
switch v := s.Config.HTTPClient.Transport.(type) {
@ -443,9 +531,11 @@ func loadCertPool(r io.Reader) (*x509.CertPool, error) {
return p, nil
}
func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error {
// Merge in user provided configuration
cfg.MergeIn(userCfg)
func mergeConfigSrcs(cfg, userCfg *aws.Config,
envCfg envConfig, sharedCfg sharedConfig,
handlers request.Handlers,
sessOpts Options,
) error {
// Region if not already set by user
if len(aws.StringValue(cfg.Region)) == 0 {
@ -464,162 +554,51 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
}
}
// Configure credentials if not already set
// Regional Endpoint flag for STS endpoint resolving
mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{
userCfg.STSRegionalEndpoint,
envCfg.STSRegionalEndpoint,
sharedCfg.STSRegionalEndpoint,
endpoints.LegacySTSEndpoint,
})
// Regional Endpoint flag for S3 endpoint resolving
mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{
userCfg.S3UsEast1RegionalEndpoint,
envCfg.S3UsEast1RegionalEndpoint,
sharedCfg.S3UsEast1RegionalEndpoint,
endpoints.LegacyS3UsEast1Endpoint,
})
// Configure credentials if not already set by the user when creating the
// Session.
if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
// inspect the profile to see if a credential source has been specified.
if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 {
// if both credential_source and source_profile have been set, return an error
// as this is undefined behavior.
if len(sharedCfg.AssumeRole.SourceProfile) > 0 {
return ErrSharedConfigSourceCollision
}
// valid credential source values
const (
credSourceEc2Metadata = "Ec2InstanceMetadata"
credSourceEnvironment = "Environment"
credSourceECSContainer = "EcsContainer"
)
switch sharedCfg.AssumeRole.CredentialSource {
case credSourceEc2Metadata:
cfgCp := *cfg
p := defaults.RemoteCredProvider(cfgCp, handlers)
cfgCp.Credentials = credentials.NewCredentials(p)
if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
// AssumeRole Token provider is required if doing Assume Role
// with MFA.
return AssumeRoleTokenProviderNotSetError{}
}
cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
case credSourceEnvironment:
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
envCfg.Creds,
)
case credSourceECSContainer:
if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 {
return ErrSharedConfigECSContainerEnvVarEmpty
}
cfgCp := *cfg
p := defaults.RemoteCredProvider(cfgCp, handlers)
creds := credentials.NewCredentials(p)
cfg.Credentials = creds
default:
return ErrSharedConfigInvalidCredSource
}
return nil
}
if len(envCfg.Creds.AccessKeyID) > 0 {
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
envCfg.Creds,
)
} else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil {
cfgCp := *cfg
cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
sharedCfg.AssumeRoleSource.Creds,
)
if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
// AssumeRole Token provider is required if doing Assume Role
// with MFA.
return AssumeRoleTokenProviderNotSetError{}
}
cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
} else if len(sharedCfg.Creds.AccessKeyID) > 0 {
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
sharedCfg.Creds,
)
} else if len(sharedCfg.CredentialProcess) > 0 {
cfg.Credentials = processcreds.NewCredentials(
sharedCfg.CredentialProcess,
)
} else {
// Fallback to default credentials provider, include mock errors
// for the credential chain so user can identify why credentials
// failed to be retrieved.
cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
Providers: []credentials.Provider{
&credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)},
&credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)},
defaults.RemoteCredProvider(*cfg, handlers),
},
})
creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts)
if err != nil {
return err
}
cfg.Credentials = creds
}
return nil
}
func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials {
return stscreds.NewCredentials(
&Session{
Config: &cfg,
Handlers: handlers.Copy(),
},
sharedCfg.AssumeRole.RoleARN,
func(opt *stscreds.AssumeRoleProvider) {
opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
// Assume role with external ID
if len(sharedCfg.AssumeRole.ExternalID) > 0 {
opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
}
// Assume role with MFA
if len(sharedCfg.AssumeRole.MFASerial) > 0 {
opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
}
},
)
func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) {
for _, v := range values {
if v != endpoints.UnsetSTSEndpoint {
cfg.STSRegionalEndpoint = v
break
}
}
}
// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the
// MFAToken option is not set when shared config is configured load assume a
// role with an MFA token.
type AssumeRoleTokenProviderNotSetError struct{}
// Code is the short id of the error.
func (e AssumeRoleTokenProviderNotSetError) Code() string {
return "AssumeRoleTokenProviderNotSetError"
}
// Message is the description of the error
func (e AssumeRoleTokenProviderNotSetError) Message() string {
return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
}
// OrigErr is the underlying error that caused the failure.
func (e AssumeRoleTokenProviderNotSetError) OrigErr() error {
return nil
}
// Error satisfies the error interface.
func (e AssumeRoleTokenProviderNotSetError) Error() string {
return awserr.SprintError(e.Code(), e.Message(), "", nil)
}
type credProviderError struct {
Err error
}
var emptyCreds = credentials.Value{}
func (c credProviderError) Retrieve() (credentials.Value, error) {
return credentials.Value{}, c.Err
}
func (c credProviderError) IsExpired() bool {
return true
func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) {
for _, v := range values {
if v != endpoints.UnsetS3UsEast1Endpoint {
cfg.S3UsEast1RegionalEndpoint = v
break
}
}
}
func initHandlers(s *Session) {
@ -630,7 +609,7 @@ func initHandlers(s *Session) {
}
}
// Copy creates and returns a copy of the current Session, coping the config
// Copy creates and returns a copy of the current Session, copying the config
// and handlers. If any additional configs are provided they will be merged
// on top of the Session's copied config.
//
@ -650,37 +629,15 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session {
// ClientConfig satisfies the client.ConfigProvider interface and is used to
// configure the service client instances. Passing the Session to the service
// client's constructor (New) will use this method to configure the client.
func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
// Backwards compatibility, the error will be eaten if user calls ClientConfig
// directly. All SDK services will use ClientconfigWithError.
cfg, _ := s.clientConfigWithErr(serviceName, cfgs...)
return cfg
}
func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) {
func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config {
s = s.Copy(cfgs...)
var resolved endpoints.ResolvedEndpoint
var err error
region := aws.StringValue(s.Config.Region)
if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 {
resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL))
resolved.SigningRegion = region
} else {
resolved, err = s.Config.EndpointResolver.EndpointFor(
serviceName, region,
func(opt *endpoints.Options) {
opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL)
opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack)
// Support the condition where the service is modeled but its
// endpoint metadata is not available.
opt.ResolveUnknownService = true
},
)
resolved, err := s.resolveEndpoint(service, region, s.Config)
if err != nil && s.Config.Logger != nil {
s.Config.Logger.Log(fmt.Sprintf(
"ERROR: unable to resolve endpoint for service %q, region %q, err: %v",
service, region, err))
}
return client.Config{
@ -690,7 +647,42 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (
SigningRegion: resolved.SigningRegion,
SigningNameDerived: resolved.SigningNameDerived,
SigningName: resolved.SigningName,
}, err
}
}
func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) {
if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 {
return endpoints.ResolvedEndpoint{
URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)),
SigningRegion: region,
}, nil
}
resolved, err := cfg.EndpointResolver.EndpointFor(service, region,
func(opt *endpoints.Options) {
opt.DisableSSL = aws.BoolValue(cfg.DisableSSL)
opt.UseDualStack = aws.BoolValue(cfg.UseDualStack)
// Support for STSRegionalEndpoint where the STSRegionalEndpoint is
// provided in envConfig or sharedConfig with envConfig getting
// precedence.
opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint
// Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is
// provided in envConfig or sharedConfig with envConfig getting
// precedence.
opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint
// Support the condition where the service is modeled but its
// endpoint metadata is not available.
opt.ResolveUnknownService = true
},
)
if err != nil {
return endpoints.ResolvedEndpoint{}, err
}
return resolved, nil
}
// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception
@ -700,12 +692,9 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf
s = s.Copy(cfgs...)
var resolved endpoints.ResolvedEndpoint
region := aws.StringValue(s.Config.Region)
if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 {
resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL))
resolved.SigningRegion = region
resolved.SigningRegion = aws.StringValue(s.Config.Region)
}
return client.Config{
@ -717,3 +706,14 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf
SigningName: resolved.SigningName,
}
}
// logDeprecatedNewSessionError function enables error handling for session
func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) {
// Session creation failed, need to report the error and prevent
// any requests from succeeding.
s.Config.MergeIn(cfgs...)
s.Config.Logger.Log("ERROR:", msg, "Error:", err)
s.Handlers.Validate.PushBack(func(r *request.Request) {
r.Error = err
})
}

View File

@ -5,7 +5,7 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/internal/ini"
)
@ -23,13 +23,29 @@ const (
mfaSerialKey = `mfa_serial` // optional
roleSessionNameKey = `role_session_name` // optional
// CSM options
csmEnabledKey = `csm_enabled`
csmHostKey = `csm_host`
csmPortKey = `csm_port`
csmClientIDKey = `csm_client_id`
// Additional Config fields
regionKey = `region`
// endpoint discovery group
enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
// External Credential Process
credentialProcessKey = `credential_process`
credentialProcessKey = `credential_process` // optional
// Web Identity Token File
webIdentityTokenFileKey = `web_identity_token_file` // optional
// Additional config fields for regional or legacy endpoints
stsRegionalEndpointSharedKey = `sts_regional_endpoints`
// Additional config fields for regional or legacy endpoints
s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint`
// DefaultSharedConfigProfile is the default profile to be used when
// loading configuration from the config files if another profile name
@ -37,36 +53,33 @@ const (
DefaultSharedConfigProfile = `default`
)
type assumeRoleConfig struct {
RoleARN string
SourceProfile string
CredentialSource string
ExternalID string
MFASerial string
RoleSessionName string
}
// sharedConfig represents the configuration fields of the SDK config files.
type sharedConfig struct {
// Credentials values from the config file. Both aws_access_key_id
// and aws_secret_access_key must be provided together in the same file
// to be considered valid. The values will be ignored if not a complete group.
// aws_session_token is an optional field that can be provided if both of the
// other two fields are also provided.
// Credentials values from the config file. Both aws_access_key_id and
// aws_secret_access_key must be provided together in the same file to be
// considered valid. The values will be ignored if not a complete group.
// aws_session_token is an optional field that can be provided if both of
// the other two fields are also provided.
//
// aws_access_key_id
// aws_secret_access_key
// aws_session_token
Creds credentials.Value
AssumeRole assumeRoleConfig
AssumeRoleSource *sharedConfig
CredentialSource string
CredentialProcess string
WebIdentityTokenFile string
// An external process to request credentials
CredentialProcess string
RoleARN string
RoleSessionName string
ExternalID string
MFASerial string
// Region is the region the SDK should use for looking up AWS service endpoints
// and signing requests.
SourceProfileName string
SourceProfile *sharedConfig
// Region is the region the SDK should use for looking up AWS service
// endpoints and signing requests.
//
// region
Region string
@ -76,6 +89,23 @@ type sharedConfig struct {
//
// endpoint_discovery_enabled = true
EnableEndpointDiscovery *bool
// CSM Options
CSMEnabled *bool
CSMHost string
CSMPort string
CSMClientID string
// Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service
//
// sts_regional_endpoints = regional
// This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint`
STSRegionalEndpoint endpoints.STSRegionalEndpoint
// Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service
//
// s3_us_east_1_regional_endpoint = regional
// This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint`
S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint
}
type sharedConfigFile struct {
@ -83,17 +113,18 @@ type sharedConfigFile struct {
IniData ini.Sections
}
// loadSharedConfig retrieves the configuration from the list of files
// using the profile provided. The order the files are listed will determine
// loadSharedConfig retrieves the configuration from the list of files using
// the profile provided. The order the files are listed will determine
// precedence. Values in subsequent files will overwrite values defined in
// earlier files.
//
// For example, given two files A and B. Both define credentials. If the order
// of the files are A then B, B's credential values will be used instead of A's.
// of the files are A then B, B's credential values will be used instead of
// A's.
//
// See sharedConfig.setFromFile for information how the config files
// will be loaded.
func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) {
func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) {
if len(profile) == 0 {
profile = DefaultSharedConfigProfile
}
@ -104,16 +135,11 @@ func loadSharedConfig(profile string, filenames []string) (sharedConfig, error)
}
cfg := sharedConfig{}
if err = cfg.setFromIniFiles(profile, files); err != nil {
profiles := map[string]struct{}{}
if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil {
return sharedConfig{}, err
}
if len(cfg.AssumeRole.SourceProfile) > 0 {
if err := cfg.setAssumeRoleSource(profile, files); err != nil {
return sharedConfig{}, err
}
}
return cfg, nil
}
@ -137,60 +163,88 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
return files, nil
}
func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error {
var assumeRoleSrc sharedConfig
if len(cfg.AssumeRole.CredentialSource) > 0 {
// setAssumeRoleSource is only called when source_profile is found.
// If both source_profile and credential_source are set, then
// ErrSharedConfigSourceCollision will be returned
return ErrSharedConfigSourceCollision
}
// Multiple level assume role chains are not support
if cfg.AssumeRole.SourceProfile == origProfile {
assumeRoleSrc = *cfg
assumeRoleSrc.AssumeRole = assumeRoleConfig{}
} else {
err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files)
if err != nil {
return err
}
}
if len(assumeRoleSrc.Creds.AccessKeyID) == 0 {
return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN}
}
cfg.AssumeRoleSource = &assumeRoleSrc
return nil
}
func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error {
func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error {
// Trim files from the list that don't exist.
var skippedFiles int
var profileNotFoundErr error
for _, f := range files {
if err := cfg.setFromIniFile(profile, f); err != nil {
if err := cfg.setFromIniFile(profile, f, exOpts); err != nil {
if _, ok := err.(SharedConfigProfileNotExistsError); ok {
// Ignore proviles missings
// Ignore profiles not defined in individual files.
profileNotFoundErr = err
skippedFiles++
continue
}
return err
}
}
if skippedFiles == len(files) {
// If all files were skipped because the profile is not found, return
// the original profile not found error.
return profileNotFoundErr
}
if _, ok := profiles[profile]; ok {
// if this is the second instance of the profile the Assume Role
// options must be cleared because they are only valid for the
// first reference of a profile. The self linked instance of the
// profile only have credential provider options.
cfg.clearAssumeRoleOptions()
} else {
// First time a profile has been seen, It must either be a assume role
// or credentials. Assert if the credential type requires a role ARN,
// the ARN is also set.
if err := cfg.validateCredentialsRequireARN(profile); err != nil {
return err
}
}
profiles[profile] = struct{}{}
if err := cfg.validateCredentialType(); err != nil {
return err
}
// Link source profiles for assume roles
if len(cfg.SourceProfileName) != 0 {
// Linked profile via source_profile ignore credential provider
// options, the source profile must provide the credentials.
cfg.clearCredentialOptions()
srcCfg := &sharedConfig{}
err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts)
if err != nil {
// SourceProfile that doesn't exist is an error in configuration.
if _, ok := err.(SharedConfigProfileNotExistsError); ok {
err = SharedConfigAssumeRoleError{
RoleARN: cfg.RoleARN,
SourceProfile: cfg.SourceProfileName,
}
}
return err
}
if !srcCfg.hasCredentials() {
return SharedConfigAssumeRoleError{
RoleARN: cfg.RoleARN,
SourceProfile: cfg.SourceProfileName,
}
}
cfg.SourceProfile = srcCfg
}
return nil
}
// setFromFile loads the configuration from the file using
// the profile provided. A sharedConfig pointer type value is used so that
// multiple config file loadings can be chained.
// setFromFile loads the configuration from the file using the profile
// provided. A sharedConfig pointer type value is used so that multiple config
// file loadings can be chained.
//
// Only loads complete logically grouped values, and will not set fields in cfg
// for incomplete grouped values in the config. Such as credentials. For example
// if a config file only includes aws_access_key_id but no aws_secret_access_key
// the aws_access_key_id will be ignored.
func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error {
// for incomplete grouped values in the config. Such as credentials. For
// example if a config file only includes aws_access_key_id but no
// aws_secret_access_key the aws_access_key_id will be ignored.
func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error {
section, ok := file.IniData.GetSection(profile)
if !ok {
// Fallback to to alternate profile name: profile <name>
@ -200,53 +254,160 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) e
}
}
if exOpts {
// Assume Role Parameters
updateString(&cfg.RoleARN, section, roleArnKey)
updateString(&cfg.ExternalID, section, externalIDKey)
updateString(&cfg.MFASerial, section, mfaSerialKey)
updateString(&cfg.RoleSessionName, section, roleSessionNameKey)
updateString(&cfg.SourceProfileName, section, sourceProfileKey)
updateString(&cfg.CredentialSource, section, credentialSourceKey)
updateString(&cfg.Region, section, regionKey)
if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 {
sre, err := endpoints.GetSTSRegionalEndpoint(v)
if err != nil {
return fmt.Errorf("failed to load %s from shared config, %s, %v",
stsRegionalEndpointSharedKey, file.Filename, err)
}
cfg.STSRegionalEndpoint = sre
}
if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 {
sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v)
if err != nil {
return fmt.Errorf("failed to load %s from shared config, %s, %v",
s3UsEast1RegionalSharedKey, file.Filename, err)
}
cfg.S3UsEast1RegionalEndpoint = sre
}
}
updateString(&cfg.CredentialProcess, section, credentialProcessKey)
updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey)
// Shared Credentials
akid := section.String(accessKeyIDKey)
secret := section.String(secretAccessKey)
if len(akid) > 0 && len(secret) > 0 {
cfg.Creds = credentials.Value{
AccessKeyID: akid,
SecretAccessKey: secret,
SessionToken: section.String(sessionTokenKey),
ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
}
creds := credentials.Value{
AccessKeyID: section.String(accessKeyIDKey),
SecretAccessKey: section.String(secretAccessKey),
SessionToken: section.String(sessionTokenKey),
ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
}
// Assume Role
roleArn := section.String(roleArnKey)
srcProfile := section.String(sourceProfileKey)
credentialSource := section.String(credentialSourceKey)
hasSource := len(srcProfile) > 0 || len(credentialSource) > 0
if len(roleArn) > 0 && hasSource {
cfg.AssumeRole = assumeRoleConfig{
RoleARN: roleArn,
SourceProfile: srcProfile,
CredentialSource: credentialSource,
ExternalID: section.String(externalIDKey),
MFASerial: section.String(mfaSerialKey),
RoleSessionName: section.String(roleSessionNameKey),
}
}
// `credential_process`
if credProc := section.String(credentialProcessKey); len(credProc) > 0 {
cfg.CredentialProcess = credProc
}
// Region
if v := section.String(regionKey); len(v) > 0 {
cfg.Region = v
if creds.HasKeys() {
cfg.Creds = creds
}
// Endpoint discovery
if section.Has(enableEndpointDiscoveryKey) {
v := section.Bool(enableEndpointDiscoveryKey)
cfg.EnableEndpointDiscovery = &v
updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey)
// CSM options
updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey)
updateString(&cfg.CSMHost, section, csmHostKey)
updateString(&cfg.CSMPort, section, csmPortKey)
updateString(&cfg.CSMClientID, section, csmClientIDKey)
return nil
}
func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error {
var credSource string
switch {
case len(cfg.SourceProfileName) != 0:
credSource = sourceProfileKey
case len(cfg.CredentialSource) != 0:
credSource = credentialSourceKey
case len(cfg.WebIdentityTokenFile) != 0:
credSource = webIdentityTokenFileKey
}
if len(credSource) != 0 && len(cfg.RoleARN) == 0 {
return CredentialRequiresARNError{
Type: credSource,
Profile: profile,
}
}
return nil
}
func (cfg *sharedConfig) validateCredentialType() error {
// Only one or no credential type can be defined.
if !oneOrNone(
len(cfg.SourceProfileName) != 0,
len(cfg.CredentialSource) != 0,
len(cfg.CredentialProcess) != 0,
len(cfg.WebIdentityTokenFile) != 0,
) {
return ErrSharedConfigSourceCollision
}
return nil
}
func (cfg *sharedConfig) hasCredentials() bool {
switch {
case len(cfg.SourceProfileName) != 0:
case len(cfg.CredentialSource) != 0:
case len(cfg.CredentialProcess) != 0:
case len(cfg.WebIdentityTokenFile) != 0:
case cfg.Creds.HasKeys():
default:
return false
}
return true
}
func (cfg *sharedConfig) clearCredentialOptions() {
cfg.CredentialSource = ""
cfg.CredentialProcess = ""
cfg.WebIdentityTokenFile = ""
cfg.Creds = credentials.Value{}
}
func (cfg *sharedConfig) clearAssumeRoleOptions() {
cfg.RoleARN = ""
cfg.ExternalID = ""
cfg.MFASerial = ""
cfg.RoleSessionName = ""
cfg.SourceProfileName = ""
}
func oneOrNone(bs ...bool) bool {
var count int
for _, b := range bs {
if b {
count++
if count > 1 {
return false
}
}
}
return true
}
// updateString will only update the dst with the value in the section key, key
// is present in the section.
func updateString(dst *string, section ini.Section, key string) {
if !section.Has(key) {
return
}
*dst = section.String(key)
}
// updateBoolPtr will only update the dst with the value in the section key,
// key is present in the section.
func updateBoolPtr(dst **bool, section ini.Section, key string) {
if !section.Has(key) {
return
}
*dst = new(bool)
**dst = section.Bool(key)
}
// SharedConfigLoadError is an error for the shared config file failed to load.
type SharedConfigLoadError struct {
Filename string
@ -304,7 +465,8 @@ func (e SharedConfigProfileNotExistsError) Error() string {
// profile contains assume role information, but that information is invalid
// or not complete.
type SharedConfigAssumeRoleError struct {
RoleARN string
RoleARN string
SourceProfile string
}
// Code is the short id of the error.
@ -314,8 +476,10 @@ func (e SharedConfigAssumeRoleError) Code() string {
// Message is the description of the error
func (e SharedConfigAssumeRoleError) Message() string {
return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials",
e.RoleARN)
return fmt.Sprintf(
"failed to load assume role for %s, source profile %s has no shared credentials",
e.RoleARN, e.SourceProfile,
)
}
// OrigErr is the underlying error that caused the failure.
@ -327,3 +491,36 @@ func (e SharedConfigAssumeRoleError) OrigErr() error {
func (e SharedConfigAssumeRoleError) Error() string {
return awserr.SprintError(e.Code(), e.Message(), "", nil)
}
// CredentialRequiresARNError provides the error for shared config credentials
// that are incorrectly configured in the shared config or credentials file.
type CredentialRequiresARNError struct {
// type of credentials that were configured.
Type string
// Profile name the credentials were in.
Profile string
}
// Code is the short id of the error.
func (e CredentialRequiresARNError) Code() string {
return "CredentialRequiresARNError"
}
// Message is the description of the error
func (e CredentialRequiresARNError) Message() string {
return fmt.Sprintf(
"credential type %s requires role_arn, profile %s",
e.Type, e.Profile,
)
}
// OrigErr is the underlying error that caused the failure.
func (e CredentialRequiresARNError) OrigErr() error {
return nil
}
// Error satisfies the error interface.
func (e CredentialRequiresARNError) Error() string {
return awserr.SprintError(e.Code(), e.Message(), "", nil)
}

View File

@ -687,7 +687,11 @@ func (ctx *signingCtx) buildBodyDigest() error {
if !aws.IsReaderSeekable(ctx.Body) {
return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
}
hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
hashBytes, err := makeSha256Reader(ctx.Body)
if err != nil {
return err
}
hash = hex.EncodeToString(hashBytes)
}
if includeSHA256Header {
@ -734,10 +738,16 @@ func makeSha256(data []byte) []byte {
return hash.Sum(nil)
}
func makeSha256Reader(reader io.ReadSeeker) []byte {
func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) {
hash := sha256.New()
start, _ := reader.Seek(0, sdkio.SeekCurrent)
defer reader.Seek(start, sdkio.SeekStart)
start, err := reader.Seek(0, sdkio.SeekCurrent)
if err != nil {
return nil, err
}
defer func() {
// ensure error is return if unable to seek back to start of payload.
_, err = reader.Seek(start, sdkio.SeekStart)
}()
// Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies
// smaller than 32KB. Fall back to io.Copy if we fail to determine the size.
@ -748,7 +758,7 @@ func makeSha256Reader(reader io.ReadSeeker) []byte {
io.CopyN(hash, reader, size)
}
return hash.Sum(nil)
return hash.Sum(nil), nil
}
const doubleSpace = " "

View File

@ -7,13 +7,18 @@ import (
"github.com/aws/aws-sdk-go/internal/sdkio"
)
// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should
// only be used with an io.Reader that is also an io.Seeker. Doing so may
// cause request signature errors, or request body's not sent for GET, HEAD
// and DELETE HTTP methods.
// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the
// SDK to accept an io.Reader that is not also an io.Seeker for unsigned
// streaming payload API operations.
//
// Deprecated: Should only be used with io.ReadSeeker. If using for
// S3 PutObject to stream content use s3manager.Uploader instead.
// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API
// operation's input will prevent that operation being retried in the case of
// network errors, and cause operation requests to fail if the operation
// requires payload signing.
//
// Note: If using With S3 PutObject to stream an object upload The SDK's S3
// Upload manager (s3manager.Uploader) provides support for streaming with the
// ability to retry network errors.
func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
return ReaderSeekerCloser{r}
}
@ -43,7 +48,8 @@ func IsReaderSeekable(r io.Reader) bool {
// Read reads from the reader up to size of p. The number of bytes read, and
// error if it occurred will be returned.
//
// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
// If the reader is not an io.Reader zero bytes read, and nil error will be
// returned.
//
// Performs the same functionality as io.Reader Read
func (r ReaderSeekerCloser) Read(p []byte) (int, error) {

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.19.39"
const SDKVersion = "1.25.41"

View File

@ -162,7 +162,7 @@ loop:
if len(tokens) == 0 {
break loop
}
// if should skip is true, we skip the tokens until should skip is set to false.
step = SkipTokenState
}
@ -218,7 +218,7 @@ loop:
// S -> equal_expr' expr_stmt'
switch k.Kind {
case ASTKindEqualExpr:
// assiging a value to some key
// assigning a value to some key
k.AppendChild(newExpression(tok))
stack.Push(newExprStatement(k))
case ASTKindExpr:
@ -250,6 +250,13 @@ loop:
if !runeCompare(tok.Raw(), openBrace) {
return nil, NewParseError("expected '['")
}
// If OpenScopeState is not at the start, we must mark the previous ast as complete
//
// for example: if previous ast was a skip statement;
// we should mark it as complete before we create a new statement
if k.Kind != ASTKindStart {
stack.MarkComplete(k)
}
stmt := newStatement()
stack.Push(stmt)
@ -304,7 +311,9 @@ loop:
stmt := newCommentStatement(tok)
stack.Push(stmt)
default:
return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok))
return nil, NewParseError(
fmt.Sprintf("invalid state with ASTKind %v and TokenType %v",
k, tok.Type()))
}
if len(tokens) > 0 {
@ -314,7 +323,7 @@ loop:
// this occurs when a statement has not been completed
if stack.top > 1 {
return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container))
return nil, NewParseError(fmt.Sprintf("incomplete ini expression"))
}
// returns a sublist which excludes the start symbol

View File

@ -22,24 +22,24 @@ func newSkipper() skipper {
}
func (s *skipper) ShouldSkip(tok Token) bool {
// should skip state will be modified only if previous token was new line (NL);
// and the current token is not WhiteSpace (WS).
if s.shouldSkip &&
s.prevTok.Type() == TokenNL &&
tok.Type() != TokenWS {
s.Continue()
return false
}
s.prevTok = tok
return s.shouldSkip
}
func (s *skipper) Skip() {
s.shouldSkip = true
s.prevTok = emptyToken
}
func (s *skipper) Continue() {
s.shouldSkip = false
// empty token is assigned as we return to default state, when should skip is false
s.prevTok = emptyToken
}

View File

@ -0,0 +1,12 @@
package sdkio
const (
// Byte is 8 bits
Byte int64 = 1
// KibiByte (KiB) is 1024 Bytes
KibiByte = Byte * 1024
// MebiByte (MiB) is 1024 KiB
MebiByte = KibiByte * 1024
// GibiByte (GiB) is 1024 MiB
GibiByte = MebiByte * 1024
)

View File

@ -0,0 +1,15 @@
// +build go1.10
package sdkmath
import "math"
// Round returns the nearest integer, rounding half away from zero.
//
// Special cases are:
// Round(±0) = ±0
// Round(±Inf) = ±Inf
// Round(NaN) = NaN
func Round(x float64) float64 {
return math.Round(x)
}

View File

@ -0,0 +1,56 @@
// +build !go1.10
package sdkmath
import "math"
// Copied from the Go standard library's (Go 1.12) math/floor.go for use in
// Go version prior to Go 1.10.
const (
uvone = 0x3FF0000000000000
mask = 0x7FF
shift = 64 - 11 - 1
bias = 1023
signMask = 1 << 63
fracMask = 1<<shift - 1
)
// Round returns the nearest integer, rounding half away from zero.
//
// Special cases are:
// Round(±0) = ±0
// Round(±Inf) = ±Inf
// Round(NaN) = NaN
//
// Copied from the Go standard library's (Go 1.12) math/floor.go for use in
// Go version prior to Go 1.10.
func Round(x float64) float64 {
// Round is a faster implementation of:
//
// func Round(x float64) float64 {
// t := Trunc(x)
// if Abs(x-t) >= 0.5 {
// return t + Copysign(1, x)
// }
// return t
// }
bits := math.Float64bits(x)
e := uint(bits>>shift) & mask
if e < bias {
// Round abs(x) < 1 including denormals.
bits &= signMask // +-0
if e == bias-1 {
bits |= uvone // +-1
}
} else if e < bias+shift {
// Round any abs(x) >= 1 containing a fractional component [0,1).
//
// Numbers with larger exponents are returned unchanged since they
// must be either an integer, infinity, or NaN.
const half = 1 << (shift - 1)
e -= bias
bits += half >> e
bits &^= fracMask >> e
}
return math.Float64frombits(bits)
}

View File

@ -0,0 +1,11 @@
// +build go1.6
package sdkrand
import "math/rand"
// Read provides the stub for math.Rand.Read method support for go version's
// 1.6 and greater.
func Read(r *rand.Rand, p []byte) (int, error) {
return r.Read(p)
}

View File

@ -0,0 +1,24 @@
// +build !go1.6
package sdkrand
import "math/rand"
// Read backfills Go 1.6's math.Rand.Reader for Go 1.5
func Read(r *rand.Rand, p []byte) (n int, err error) {
// Copy of Go standard libraries math package's read function not added to
// standard library until Go 1.6.
var pos int8
var val int64
for n = 0; n < len(p); n++ {
if pos == 0 {
val = r.Int63()
pos = 7
}
p[n] = byte(val)
val >>= 8
pos--
}
return n, err
}

View File

@ -146,6 +146,9 @@ func unmarshalStatusCode(v reflect.Value, statusCode int) {
}
func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
if len(headers) == 0 {
return nil
}
switch r.Interface().(type) {
case map[string]*string: // we only support string map value types
out := map[string]*string{}
@ -155,19 +158,28 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err
out[k[len(prefix):]] = &v[0]
}
}
r.Set(reflect.ValueOf(out))
if len(out) != 0 {
r.Set(reflect.ValueOf(out))
}
}
return nil
}
func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error {
isJSONValue := tag.Get("type") == "jsonvalue"
if isJSONValue {
switch tag.Get("type") {
case "jsonvalue":
if len(header) == 0 {
return nil
}
} else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
return nil
case "blob":
if len(header) == 0 {
return nil
}
default:
if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
return nil
}
}
switch v.Interface().(type) {
@ -178,7 +190,7 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro
if err != nil {
return err
}
v.Set(reflect.ValueOf(&b))
v.Set(reflect.ValueOf(b))
case *bool:
b, err := strconv.ParseBool(header)
if err != nil {

View File

@ -39,7 +39,7 @@ func Build(r *request.Request) {
r.Error = awserr.NewRequestFailure(
awserr.New(request.ErrCodeSerialization,
"failed to encode rest XML request", err),
r.HTTPResponse.StatusCode,
0,
r.RequestID,
)
return

View File

@ -1,8 +1,11 @@
package protocol
import (
"math"
"strconv"
"time"
"github.com/aws/aws-sdk-go/internal/sdkmath"
)
// Names of time formats supported by the SDK
@ -13,12 +16,19 @@ const (
)
// Time formats supported by the SDK
// Output time is intended to not contain decimals
const (
// RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
// This format is used for output time without seconds precision
RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
ISO8601TimeFormat = "2006-01-02T15:04:05Z"
ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z"
// This format is used for output time without seconds precision
ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z"
)
// IsKnownTimestampFormat returns if the timestamp format name
@ -42,9 +52,9 @@ func FormatTime(name string, t time.Time) string {
switch name {
case RFC822TimeFormatName:
return t.Format(RFC822TimeFormat)
return t.Format(RFC822OutputTimeFormat)
case ISO8601TimeFormatName:
return t.Format(ISO8601TimeFormat)
return t.Format(ISO8601OutputTimeFormat)
case UnixTimeFormatName:
return strconv.FormatInt(t.Unix(), 10)
default:
@ -62,10 +72,12 @@ func ParseTime(formatName, value string) (time.Time, error) {
return time.Parse(ISO8601TimeFormat, value)
case UnixTimeFormatName:
v, err := strconv.ParseFloat(value, 64)
_, dec := math.Modf(v)
dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123
if err != nil {
return time.Time{}, err
}
return time.Unix(int64(v), 0), nil
return time.Unix(int64(v), int64(dec*(1e9))), nil
default:
panic("unknown timestamp format name, " + formatName)
}

View File

@ -0,0 +1,32 @@
package xmlutil
import (
"encoding/xml"
"strings"
)
type xmlAttrSlice []xml.Attr
func (x xmlAttrSlice) Len() int {
return len(x)
}
func (x xmlAttrSlice) Less(i, j int) bool {
spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space
localI, localJ := x[i].Name.Local, x[j].Name.Local
valueI, valueJ := x[i].Value, x[j].Value
spaceCmp := strings.Compare(spaceI, spaceJ)
localCmp := strings.Compare(localI, localJ)
valueCmp := strings.Compare(valueI, valueJ)
if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) {
return true
}
return false
}
func (x xmlAttrSlice) Swap(i, j int) {
x[i], x[j] = x[j], x[i]
}

View File

@ -119,7 +119,18 @@ func (n *XMLNode) findElem(name string) (string, bool) {
// StructToXML writes an XMLNode to a xml.Encoder as tokens.
func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})
// Sort Attributes
attrs := node.Attr
if sorted {
sortedAttrs := make([]xml.Attr, len(attrs))
for _, k := range node.Attr {
sortedAttrs = append(sortedAttrs, k)
}
sort.Sort(xmlAttrSlice(sortedAttrs))
attrs = sortedAttrs
}
e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs})
if node.Text != "" {
e.EncodeToken(xml.CharData([]byte(node.Text)))

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,6 @@ import (
"hash/crc32"
"io"
"io/ioutil"
"math"
"strconv"
"time"
@ -15,15 +14,6 @@ import (
"github.com/aws/aws-sdk-go/aws/request"
)
type retryer struct {
client.DefaultRetryer
}
func (d retryer) RetryRules(r *request.Request) time.Duration {
delay := time.Duration(math.Pow(2, float64(r.RetryCount))) * 50
return delay * time.Millisecond
}
func init() {
initClient = func(c *client.Client) {
if c.Config.Retryer == nil {
@ -43,10 +33,9 @@ func setCustomRetryer(c *client.Client) {
maxRetries = 10
}
c.Retryer = retryer{
DefaultRetryer: client.DefaultRetryer{
NumMaxRetries: maxRetries,
},
c.Retryer = client.DefaultRetryer{
NumMaxRetries: maxRetries,
MinRetryDelay: 50 * time.Millisecond,
}
}

View File

@ -3,7 +3,7 @@ AttributeValue Marshaling and Unmarshaling Helpers
Utility helpers to marshal and unmarshal AttributeValue to and
from Go types can be found in the dynamodbattribute sub package. This package
provides has specialized functions for the common ways of working with
provides specialized functions for the common ways of working with
AttributeValues. Such as map[string]*AttributeValue, []*AttributeValue, and
directly with *AttributeValue. This is helpful for marshaling Go types for API
operations such as PutItem, and unmarshaling Query and Scan APIs' responses.

View File

@ -7,6 +7,7 @@ import (
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
)
@ -155,6 +156,7 @@ var byteSliceType = reflect.TypeOf([]byte(nil))
var byteSliceSlicetype = reflect.TypeOf([][]byte(nil))
var numberType = reflect.TypeOf(Number(""))
var timeType = reflect.TypeOf(time.Time{})
var ptrStringType = reflect.TypeOf(aws.String(""))
func (d *Decoder) decode(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
var u Unmarshaler
@ -172,23 +174,23 @@ func (d *Decoder) decode(av *dynamodb.AttributeValue, v reflect.Value, fieldTag
}
switch {
case len(av.B) != 0:
case len(av.B) != 0 || (av.B != nil && d.EnableEmptyCollections):
return d.decodeBinary(av.B, v)
case av.BOOL != nil:
return d.decodeBool(av.BOOL, v)
case len(av.BS) != 0:
case len(av.BS) != 0 || (av.BS != nil && d.EnableEmptyCollections):
return d.decodeBinarySet(av.BS, v)
case len(av.L) != 0:
case len(av.L) != 0 || (av.L != nil && d.EnableEmptyCollections):
return d.decodeList(av.L, v)
case len(av.M) != 0:
case len(av.M) != 0 || (av.M != nil && d.EnableEmptyCollections):
return d.decodeMap(av.M, v)
case av.N != nil:
return d.decodeNumber(av.N, v, fieldTag)
case len(av.NS) != 0:
case len(av.NS) != 0 || (av.NS != nil && d.EnableEmptyCollections):
return d.decodeNumberSet(av.NS, v)
case av.S != nil:
case av.S != nil: // DynamoDB does not allow for empty strings, so we do not consider the length or EnableEmptyCollections flag here
return d.decodeString(av.S, v, fieldTag)
case len(av.SS) != 0:
case len(av.SS) != 0 || (av.SS != nil && d.EnableEmptyCollections):
return d.decodeStringSet(av.SS, v)
}
@ -487,7 +489,8 @@ func (d *Decoder) decodeMap(avMap map[string]*dynamodb.AttributeValue, v reflect
if v.Kind() == reflect.Map {
for k, av := range avMap {
key := reflect.ValueOf(k)
key := reflect.New(v.Type().Key()).Elem()
key.SetString(k)
elem := reflect.New(v.Type().Elem()).Elem()
if err := d.decode(av, elem, tag{}); err != nil {
return err

View File

@ -194,6 +194,13 @@ type MarshalOptions struct {
// Note that values provided with a custom TagKey must also be supported
// by the (un)marshalers in this package.
TagKey string
// EnableEmptyCollections modifies how structures, maps, and slices are (un)marshalled.
// When set to true empty collection values will be preserved as their respective
// empty DynamoDB AttributeValue type when set to true.
//
// Disabled by default.
EnableEmptyCollections bool
}
// An Encoder provides marshaling Go value types to AttributeValues.
@ -255,7 +262,7 @@ func fieldByIndex(v reflect.Value, index []int,
func (e *Encoder) encode(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
// We should check for omitted values first before dereferencing.
if fieldTag.OmitEmpty && emptyValue(v) {
if fieldTag.OmitEmpty && emptyValue(v, e.EnableEmptyCollections) {
encodeNull(av)
return nil
}
@ -330,7 +337,7 @@ func (e *Encoder) encodeStruct(av *dynamodb.AttributeValue, v reflect.Value, fie
av.M[f.Name] = elem
}
if len(av.M) == 0 {
if len(av.M) == 0 && !e.EnableEmptyCollections {
encodeNull(av)
}
@ -357,7 +364,8 @@ func (e *Encoder) encodeMap(av *dynamodb.AttributeValue, v reflect.Value, fieldT
av.M[keyName] = elem
}
if len(av.M) == 0 {
if v.IsNil() || (len(av.M) == 0 && !e.EnableEmptyCollections) {
encodeNull(av)
}
@ -365,13 +373,18 @@ func (e *Encoder) encodeMap(av *dynamodb.AttributeValue, v reflect.Value, fieldT
}
func (e *Encoder) encodeSlice(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
if v.Kind() == reflect.Array && v.Len() == 0 && e.EnableEmptyCollections && fieldTag.OmitEmpty {
encodeNull(av)
return nil
}
switch v.Type().Elem().Kind() {
case reflect.Uint8:
slice := reflect.MakeSlice(byteSliceType, v.Len(), v.Len())
reflect.Copy(slice, v)
b := slice.Bytes()
if len(b) == 0 {
if (v.Kind() == reflect.Slice && v.IsNil()) || (len(b) == 0 && !e.EnableEmptyCollections) {
encodeNull(av)
return nil
}
@ -416,7 +429,7 @@ func (e *Encoder) encodeSlice(av *dynamodb.AttributeValue, v reflect.Value, fiel
if n, err := e.encodeList(v, fieldTag, elemFn); err != nil {
return err
} else if n == 0 {
} else if (v.Kind() == reflect.Slice && v.IsNil()) || (n == 0 && !e.EnableEmptyCollections) {
encodeNull(av)
}
}
@ -489,8 +502,10 @@ func (e *Encoder) encodeNumber(av *dynamodb.AttributeValue, v reflect.Value) err
out = encodeInt(v.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
out = encodeUint(v.Uint())
case reflect.Float32, reflect.Float64:
out = encodeFloat(v.Float())
case reflect.Float32:
out = encodeFloat(v.Float(), 32)
case reflect.Float64:
out = encodeFloat(v.Float(), 64)
default:
return &unsupportedMarshalTypeError{Type: v.Type()}
}
@ -526,8 +541,8 @@ func encodeInt(i int64) string {
func encodeUint(u uint64) string {
return strconv.FormatUint(u, 10)
}
func encodeFloat(f float64) string {
return strconv.FormatFloat(f, 'f', -1, 64)
func encodeFloat(f float64, bitSize int) string {
return strconv.FormatFloat(f, 'f', -1, bitSize)
}
func encodeNull(av *dynamodb.AttributeValue) {
t := true
@ -545,9 +560,13 @@ func valueElem(v reflect.Value) reflect.Value {
return v
}
func emptyValue(v reflect.Value) bool {
func emptyValue(v reflect.Value, emptyCollections bool) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
case reflect.Array:
return v.Len() == 0 && !emptyCollections
case reflect.Map, reflect.Slice:
return v.IsNil() || (v.Len() == 0 && !emptyCollections)
case reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()

View File

@ -124,8 +124,8 @@ const (
// "RequestLimitExceeded".
//
// Throughput exceeds the current throughput limit for your account. Please
// contact AWS Support at AWS Support (https://docs.aws.amazon.com/https:/aws.amazon.com/support)
// to request a limit increase.
// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
// a limit increase.
ErrCodeRequestLimitExceeded = "RequestLimitExceeded"
// ErrCodeResourceInUseException for service response error code
@ -165,9 +165,9 @@ const (
// ErrCodeTransactionCanceledException for service response error code
// "TransactionCanceledException".
//
// The entire transaction request was rejected.
// The entire transaction request was canceled.
//
// DynamoDB rejects a TransactWriteItems request under the following circumstances:
// DynamoDB cancels a TransactWriteItems request under the following circumstances:
//
// * A condition in one of the condition expressions is not met.
//
@ -186,7 +186,7 @@ const (
//
// * There is a user error, such as an invalid data format.
//
// DynamoDB rejects a TransactGetItems request under the following circumstances:
// DynamoDB cancels a TransactGetItems request under the following circumstances:
//
// * There is an ongoing TransactGetItems operation that conflicts with a
// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request.
@ -199,6 +199,57 @@ const (
// completed.
//
// * There is a user error, such as an invalid data format.
//
// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons
// property. This property is not set for other languages. Transaction cancellation
// reasons are ordered in the order of requested items, if an item has no error
// it will have NONE code and Null message.
//
// Cancellation reason codes and possible error messages:
//
// * No Errors: Code: NONE Message: null
//
// * Conditional Check Failed: Code: ConditionalCheckFailed Message: The
// conditional request failed.
//
// * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded
// Message: Collection size exceeded.
//
// * Transaction Conflict: Code: TransactionConflict Message: Transaction
// is ongoing for the item.
//
// * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded
// Messages: The level of configured provisioned throughput for the table
// was exceeded. Consider increasing your provisioning level with the UpdateTable
// API. This Message is received when provisioned throughput is exceeded
// is on a provisioned DynamoDB table. The level of configured provisioned
// throughput for one or more global secondary indexes of the table was exceeded.
// Consider increasing your provisioning level for the under-provisioned
// global secondary indexes with the UpdateTable API. This message is returned
// when provisioned throughput is exceeded is on a provisioned GSI.
//
// * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds
// the current capacity of your table or index. DynamoDB is automatically
// scaling your table or index so please try again shortly. If exceptions
// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.
// This message is returned when writes get throttled on an On-Demand table
// as DynamoDB is automatically scaling the table. Throughput exceeds the
// current capacity for one or more global secondary indexes. DynamoDB is
// automatically scaling your index so please try again shortly. This message
// is returned when when writes get throttled on an On-Demand GSI as DynamoDB
// is automatically scaling the GSI.
//
// * Validation Error: Code: ValidationError Messages: One or more parameter
// values were invalid. The update expression attempted to update the secondary
// index key beyond allowed size limits. The update expression attempted
// to update the secondary index key to unsupported type. An operand in the
// update expression has an incorrect data type. Item size to update has
// exceeded the maximum allowed size. Number overflow. Attempting to store
// a number with magnitude larger than supported range. Type mismatch for
// attribute to update. Nesting Levels have exceeded supported limits. The
// document path provided in the update expression is invalid for update.
// The provided expression refers to an attribute that does not exist in
// the item.
ErrCodeTransactionCanceledException = "TransactionCanceledException"
// ErrCodeTransactionConflictException for service response error code

View File

@ -41,6 +41,8 @@ const (
// aws.Config parameter to add your extra config.
//
// Example:
// mySession := session.Must(session.NewSession())
//
// // Create a DynamoDB client from just a session.
// svc := dynamodb.New(mySession)
//
@ -48,11 +50,11 @@ const (
// svc := dynamodb.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *DynamoDB {
c := p.ClientConfig(EndpointsID, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DynamoDB {
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DynamoDB {
svc := &DynamoDB{
Client: client.New(
cfg,
@ -61,6 +63,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
PartitionID: partitionID,
Endpoint: endpoint,
APIVersion: "2012-08-10",
JSONVersion: "1.0",

File diff suppressed because it is too large Load Diff

View File

@ -8,65 +8,32 @@ import (
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/sdkrand"
)
type retryer struct {
client.DefaultRetryer
}
const (
// customRetryerMinRetryDelay sets min retry delay
customRetryerMinRetryDelay = 1 * time.Second
func (d retryer) RetryRules(r *request.Request) time.Duration {
switch r.Operation.Name {
case opModifyNetworkInterfaceAttribute:
fallthrough
case opAssignPrivateIpAddresses:
return customRetryRule(r)
default:
return d.DefaultRetryer.RetryRules(r)
}
}
func customRetryRule(r *request.Request) time.Duration {
retryTimes := []time.Duration{
time.Second,
3 * time.Second,
5 * time.Second,
}
count := r.RetryCount
if count >= len(retryTimes) {
count = len(retryTimes) - 1
}
minTime := int(retryTimes[count])
return time.Duration(sdkrand.SeededRand.Intn(minTime) + minTime)
}
func setCustomRetryer(c *client.Client) {
maxRetries := aws.IntValue(c.Config.MaxRetries)
if c.Config.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
maxRetries = 3
}
c.Retryer = retryer{
DefaultRetryer: client.DefaultRetryer{
NumMaxRetries: maxRetries,
},
}
}
// customRetryerMaxRetryDelay sets max retry delay
customRetryerMaxRetryDelay = 8 * time.Second
)
func init() {
initClient = func(c *client.Client) {
if c.Config.Retryer == nil {
// Only override the retryer with a custom one if the config
// does not already contain a retryer
setCustomRetryer(c)
}
}
initRequest = func(r *request.Request) {
if r.Operation.Name == opCopySnapshot { // fill the PresignedURL parameter
r.Handlers.Build.PushFront(fillPresignedURL)
}
// only set the retryer on request if config doesn't have a retryer
if r.Config.Retryer == nil && (r.Operation.Name == opModifyNetworkInterfaceAttribute || r.Operation.Name == opAssignPrivateIpAddresses) {
r.Retryer = client.DefaultRetryer{
NumMaxRetries: client.DefaultRetryerMaxNumRetries,
MinRetryDelay: customRetryerMinRetryDelay,
MinThrottleDelay: customRetryerMinRetryDelay,
MaxRetryDelay: customRetryerMaxRetryDelay,
MaxThrottleDelay: customRetryerMaxRetryDelay,
}
}
}
}

View File

@ -7,18 +7,19 @@
// capacity in the AWS cloud. Using Amazon EC2 eliminates the need to invest
// in hardware up front, so you can develop and deploy applications faster.
//
// To learn more about Amazon EC2, Amazon EBS, and Amazon VPC, see the following
// resources:
// To learn more, see the following resources:
//
// * Amazon EC2 product page (http://aws.amazon.com/ec2)
// * Amazon EC2: AmazonEC2 product page (http://aws.amazon.com/ec2), Amazon
// EC2 documentation (http://aws.amazon.com/documentation/ec2)
//
// * Amazon EC2 documentation (http://aws.amazon.com/documentation/ec2)
// * Amazon EBS: Amazon EBS product page (http://aws.amazon.com/ebs), Amazon
// EBS documentation (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html)
//
// * Amazon EBS product page (http://aws.amazon.com/ebs)
// * Amazon VPC: Amazon VPC product page (http://aws.amazon.com/vpc), Amazon
// VPC documentation (http://aws.amazon.com/documentation/vpc)
//
// * Amazon VPC product page (http://aws.amazon.com/vpc)
//
// * Amazon VPC documentation (http://aws.amazon.com/documentation/vpc)
// * AWS VPN: AWS VPN product page (http://aws.amazon.com/vpn), AWS VPN documentation
// (http://aws.amazon.com/documentation/vpn)
//
// See https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15 for more information on this service.
//

View File

@ -39,6 +39,8 @@ const (
// aws.Config parameter to add your extra config.
//
// Example:
// mySession := session.Must(session.NewSession())
//
// // Create a EC2 client from just a session.
// svc := ec2.New(mySession)
//
@ -46,11 +48,11 @@ const (
// svc := ec2.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2 {
c := p.ClientConfig(EndpointsID, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *EC2 {
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *EC2 {
svc := &EC2{
Client: client.New(
cfg,
@ -59,6 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
PartitionID: partitionID,
Endpoint: endpoint,
APIVersion: "2016-11-15",
},

View File

@ -952,6 +952,57 @@ func (c *EC2) WaitUntilPasswordDataAvailableWithContext(ctx aws.Context, input *
return w.WaitWithContext(ctx)
}
// WaitUntilSecurityGroupExists uses the Amazon EC2 API operation
// DescribeSecurityGroups to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *EC2) WaitUntilSecurityGroupExists(input *DescribeSecurityGroupsInput) error {
return c.WaitUntilSecurityGroupExistsWithContext(aws.BackgroundContext(), input)
}
// WaitUntilSecurityGroupExistsWithContext is an extended version of WaitUntilSecurityGroupExists.
// With the support for passing in a context and options to configure the
// Waiter and the underlying request options.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *EC2) WaitUntilSecurityGroupExistsWithContext(ctx aws.Context, input *DescribeSecurityGroupsInput, opts ...request.WaiterOption) error {
w := request.Waiter{
Name: "WaitUntilSecurityGroupExists",
MaxAttempts: 6,
Delay: request.ConstantWaiterDelay(5 * time.Second),
Acceptors: []request.WaiterAcceptor{
{
State: request.SuccessWaiterState,
Matcher: request.PathWaiterMatch, Argument: "length(SecurityGroups[].GroupId) > `0`",
Expected: true,
},
{
State: request.RetryWaiterState,
Matcher: request.ErrorWaiterMatch,
Expected: "InvalidGroupNotFound",
},
},
Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) {
var inCpy *DescribeSecurityGroupsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeSecurityGroupsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
w.ApplyOptions(opts...)
return w.WaitWithContext(ctx)
}
// WaitUntilSnapshotCompleted uses the Amazon EC2 API operation
// DescribeSnapshots to wait for a condition to be met before returning.
// If the condition is not met within the max attempt window, an error will

File diff suppressed because it is too large Load Diff

View File

@ -162,6 +162,13 @@ const (
// to the service-linked role for that service.
ErrCodePolicyNotAttachableException = "PolicyNotAttachable"
// ErrCodeReportGenerationLimitExceededException for service response error code
// "ReportGenerationLimitExceeded".
//
// The request failed because the maximum number of concurrent requests for
// this account are already running.
ErrCodeReportGenerationLimitExceededException = "ReportGenerationLimitExceeded"
// ErrCodeServiceFailureException for service response error code
// "ServiceFailure".
//

View File

@ -260,6 +260,10 @@ type IAMAPI interface {
GenerateCredentialReportWithContext(aws.Context, *iam.GenerateCredentialReportInput, ...request.Option) (*iam.GenerateCredentialReportOutput, error)
GenerateCredentialReportRequest(*iam.GenerateCredentialReportInput) (*request.Request, *iam.GenerateCredentialReportOutput)
GenerateOrganizationsAccessReport(*iam.GenerateOrganizationsAccessReportInput) (*iam.GenerateOrganizationsAccessReportOutput, error)
GenerateOrganizationsAccessReportWithContext(aws.Context, *iam.GenerateOrganizationsAccessReportInput, ...request.Option) (*iam.GenerateOrganizationsAccessReportOutput, error)
GenerateOrganizationsAccessReportRequest(*iam.GenerateOrganizationsAccessReportInput) (*request.Request, *iam.GenerateOrganizationsAccessReportOutput)
GenerateServiceLastAccessedDetails(*iam.GenerateServiceLastAccessedDetailsInput) (*iam.GenerateServiceLastAccessedDetailsOutput, error)
GenerateServiceLastAccessedDetailsWithContext(aws.Context, *iam.GenerateServiceLastAccessedDetailsInput, ...request.Option) (*iam.GenerateServiceLastAccessedDetailsOutput, error)
GenerateServiceLastAccessedDetailsRequest(*iam.GenerateServiceLastAccessedDetailsInput) (*request.Request, *iam.GenerateServiceLastAccessedDetailsOutput)
@ -318,6 +322,10 @@ type IAMAPI interface {
GetOpenIDConnectProviderWithContext(aws.Context, *iam.GetOpenIDConnectProviderInput, ...request.Option) (*iam.GetOpenIDConnectProviderOutput, error)
GetOpenIDConnectProviderRequest(*iam.GetOpenIDConnectProviderInput) (*request.Request, *iam.GetOpenIDConnectProviderOutput)
GetOrganizationsAccessReport(*iam.GetOrganizationsAccessReportInput) (*iam.GetOrganizationsAccessReportOutput, error)
GetOrganizationsAccessReportWithContext(aws.Context, *iam.GetOrganizationsAccessReportInput, ...request.Option) (*iam.GetOrganizationsAccessReportOutput, error)
GetOrganizationsAccessReportRequest(*iam.GetOrganizationsAccessReportInput) (*request.Request, *iam.GetOrganizationsAccessReportOutput)
GetPolicy(*iam.GetPolicyInput) (*iam.GetPolicyOutput, error)
GetPolicyWithContext(aws.Context, *iam.GetPolicyInput, ...request.Option) (*iam.GetPolicyOutput, error)
GetPolicyRequest(*iam.GetPolicyInput) (*request.Request, *iam.GetPolicyOutput)

View File

@ -39,6 +39,8 @@ const (
// aws.Config parameter to add your extra config.
//
// Example:
// mySession := session.Must(session.NewSession())
//
// // Create a IAM client from just a session.
// svc := iam.New(mySession)
//
@ -46,11 +48,11 @@ const (
// svc := iam.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *IAM {
c := p.ClientConfig(EndpointsID, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *IAM {
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *IAM {
svc := &IAM{
Client: client.New(
cfg,
@ -59,6 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
PartitionID: partitionID,
Endpoint: endpoint,
APIVersion: "2010-05-08",
},

View File

@ -3404,10 +3404,12 @@ func (c *KMS) ListAliasesPagesWithContext(ctx aws.Context, input *ListAliasesInp
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*ListAliasesOutput), !p.HasNextPage())
for p.Next() {
if !fn(p.Page().(*ListAliasesOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
@ -3567,10 +3569,12 @@ func (c *KMS) ListGrantsPagesWithContext(ctx aws.Context, input *ListGrantsInput
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*ListGrantsResponse), !p.HasNextPage())
for p.Next() {
if !fn(p.Page().(*ListGrantsResponse), !p.HasNextPage()) {
break
}
}
return p.Err()
}
@ -3726,10 +3730,12 @@ func (c *KMS) ListKeyPoliciesPagesWithContext(ctx aws.Context, input *ListKeyPol
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*ListKeyPoliciesOutput), !p.HasNextPage())
for p.Next() {
if !fn(p.Page().(*ListKeyPoliciesOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}
@ -3871,10 +3877,12 @@ func (c *KMS) ListKeysPagesWithContext(ctx aws.Context, input *ListKeysInput, fn
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*ListKeysOutput), !p.HasNextPage())
for p.Next() {
if !fn(p.Page().(*ListKeysOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
}

View File

@ -39,6 +39,8 @@ const (
// aws.Config parameter to add your extra config.
//
// Example:
// mySession := session.Must(session.NewSession())
//
// // Create a KMS client from just a session.
// svc := kms.New(mySession)
//
@ -46,11 +48,11 @@ const (
// svc := kms.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *KMS {
c := p.ClientConfig(EndpointsID, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *KMS {
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *KMS {
svc := &KMS{
Client: client.New(
cfg,
@ -59,6 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
PartitionID: partitionID,
Endpoint: endpoint,
APIVersion: "2014-11-01",
JSONVersion: "1.1",

File diff suppressed because it is too large Load Diff

View File

@ -63,6 +63,20 @@
// See the s3manager package's Downloader type documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader
//
// Automatic URI cleaning
//
// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname)
// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct
// used by the service client.
//
// svc := s3.New(sess, &aws.Config{
// DisableRestProtocolURICleaning: aws.Bool(true),
// })
// out, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String("bucketname"),
// Key: aws.String("//foo//bar//moo"),
// })
//
// Get Bucket Region
//
// GetBucketRegion will attempt to get the region for a bucket using a region

View File

@ -13,6 +13,12 @@ const (
// ErrCodeBucketAlreadyOwnedByYou for service response error code
// "BucketAlreadyOwnedByYou".
//
// The bucket you tried to create already exists, and you own it. Amazon S3
// returns this error in all AWS Regions except in the North Virginia region.
// For legacy compatibility, if you re-create an existing bucket that you already
// own in the North Virginia region, Amazon S3 returns 200 OK and resets the
// bucket access control lists (ACLs).
ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
// ErrCodeNoSuchBucket for service response error code

View File

@ -39,6 +39,8 @@ const (
// aws.Config parameter to add your extra config.
//
// Example:
// mySession := session.Must(session.NewSession())
//
// // Create a S3 client from just a session.
// svc := s3.New(mySession)
//
@ -46,11 +48,11 @@ const (
// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
c := p.ClientConfig(EndpointsID, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 {
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3 {
svc := &S3{
Client: client.New(
cfg,
@ -59,6 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
PartitionID: partitionID,
Endpoint: endpoint,
APIVersion: "2006-03-01",
},

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,11 @@
package sts
import "github.com/aws/aws-sdk-go/aws/request"
func init() {
initRequest = customizeRequest
}
func customizeRequest(r *request.Request) {
r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException)
}

View File

@ -14,11 +14,11 @@ const (
// ErrCodeIDPCommunicationErrorException for service response error code
// "IDPCommunicationError".
//
// The request could not be fulfilled because the non-AWS identity provider
// (IDP) that was asked to verify the incoming identity token could not be reached.
// This is often a transient error caused by network conditions. Retry the request
// The request could not be fulfilled because the identity provider (IDP) that
// was asked to verify the incoming identity token could not be reached. This
// is often a transient error caused by network conditions. Retry the request
// a limited number of times so that you don't exceed the request rate. If the
// error persists, the non-AWS identity provider might be down or not responding.
// error persists, the identity provider might be down or not responding.
ErrCodeIDPCommunicationErrorException = "IDPCommunicationError"
// ErrCodeIDPRejectedClaimException for service response error code
@ -56,9 +56,18 @@ const (
// ErrCodePackedPolicyTooLargeException for service response error code
// "PackedPolicyTooLarge".
//
// The request was rejected because the policy document was too large. The error
// message describes how big the policy document is, in packed form, as a percentage
// of what the API allows.
// The request was rejected because the total packed size of the session policies
// and session tags combined was too large. An AWS conversion compresses the
// session policy document, session policy ARNs, and session tags into a packed
// binary format that has a separate limit. The error message indicates by percentage
// how close the policies and tags are to the upper size limit. For more information,
// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
//
// You could receive this error even though you meet other defined session policy
// and session tag limits. For more information, see IAM and STS Entity Character
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge"
// ErrCodeRegionDisabledException for service response error code

View File

@ -39,6 +39,8 @@ const (
// aws.Config parameter to add your extra config.
//
// Example:
// mySession := session.Must(session.NewSession())
//
// // Create a STS client from just a session.
// svc := sts.New(mySession)
//
@ -46,11 +48,11 @@ const (
// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
c := p.ClientConfig(EndpointsID, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS {
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS {
svc := &STS{
Client: client.New(
cfg,
@ -59,6 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
PartitionID: partitionID,
Endpoint: endpoint,
APIVersion: "2011-06-15",
},

View File

@ -76,6 +76,10 @@ type STSAPI interface {
DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error)
DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput)
GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error)
GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error)
GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput)
GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error)
GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error)
GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput)

3
vendor/modules.txt vendored
View File

@ -95,7 +95,7 @@ github.com/armon/go-proxyproto
github.com/armon/go-radix
# github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf
github.com/asaskevich/govalidator
# github.com/aws/aws-sdk-go v1.19.39
# github.com/aws/aws-sdk-go v1.25.41
github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/credentials
github.com/aws/aws-sdk-go/aws/credentials/stscreds
@ -141,6 +141,7 @@ github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi
github.com/aws/aws-sdk-go/private/protocol/rest
github.com/aws/aws-sdk-go/private/protocol/restxml
github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil
github.com/aws/aws-sdk-go/internal/sdkmath
github.com/aws/aws-sdk-go/private/protocol/query/queryutil
github.com/aws/aws-sdk-go/private/protocol/json/jsonutil
# github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973