Minor dep sync
This commit is contained in:
parent
a379989da4
commit
a371bd7e7b
|
@ -612,7 +612,7 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
|
|||
|
||||
// If returns a new BucketHandle that applies a set of preconditions.
|
||||
// Preconditions already set on the BucketHandle are ignored.
|
||||
// Operations on the new handle will only occur if the preconditions are
|
||||
// Operations on the new handle will return an error if the preconditions are not
|
||||
// satisfied. The only valid preconditions for buckets are MetagenerationMatch
|
||||
// and MetagenerationNotMatch.
|
||||
func (b *BucketHandle) If(conds BucketConditions) *BucketHandle {
|
||||
|
|
|
@ -346,7 +346,7 @@ func (o *ObjectHandle) Generation(gen int64) *ObjectHandle {
|
|||
|
||||
// If returns a new ObjectHandle that applies a set of preconditions.
|
||||
// Preconditions already set on the ObjectHandle are ignored.
|
||||
// Operations on the new handle will only occur if the preconditions are
|
||||
// Operations on the new handle will return an error if the preconditions are not
|
||||
// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions
|
||||
// for more details.
|
||||
func (o *ObjectHandle) If(conds Conditions) *ObjectHandle {
|
||||
|
|
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.14.21"
|
||||
const SDKVersion = "1.14.24"
|
||||
|
|
|
@ -2587,7 +2587,7 @@ func (c *EC2) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Reques
|
|||
// To copy an encrypted snapshot that has been shared from another account,
|
||||
// you must have permissions for the CMK used to encrypt the snapshot.
|
||||
//
|
||||
// Snapshots created by the CopySnapshot action have an arbitrary volume ID
|
||||
// Snapshots created by copying another snapshot have an arbitrary volume ID
|
||||
// that should not be used for any purpose.
|
||||
//
|
||||
// For more information, see Copying an Amazon EBS Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html)
|
||||
|
@ -4728,7 +4728,8 @@ func (c *EC2) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Re
|
|||
// protected.
|
||||
//
|
||||
// You can tag your snapshots during creation. For more information, see Tagging
|
||||
// Your Amazon EC2 Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html).
|
||||
// Your Amazon EC2 Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html)
|
||||
// in the Amazon Elastic Compute Cloud User Guide.
|
||||
//
|
||||
// For more information, see Amazon Elastic Block Store (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html)
|
||||
// and Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html)
|
||||
|
@ -5082,7 +5083,8 @@ func (c *EC2) CreateVolumeRequest(input *CreateVolumeInput) (req *request.Reques
|
|||
// in the Amazon Elastic Compute Cloud User Guide.
|
||||
//
|
||||
// You can tag your volumes during creation. For more information, see Tagging
|
||||
// Your Amazon EC2 Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html).
|
||||
// Your Amazon EC2 Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html)
|
||||
// in the Amazon Elastic Compute Cloud User Guide.
|
||||
//
|
||||
// For more information, see Creating an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html)
|
||||
// in the Amazon Elastic Compute Cloud User Guide.
|
||||
|
@ -7634,7 +7636,7 @@ func (c *EC2) DeleteVolumeRequest(input *DeleteVolumeInput) (req *request.Reques
|
|||
// Deletes the specified EBS volume. The volume must be in the available state
|
||||
// (not attached to an instance).
|
||||
//
|
||||
// The volume may remain in the deleting state for several minutes.
|
||||
// The volume can remain in the deleting state for several minutes.
|
||||
//
|
||||
// For more information, see Deleting an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-volume.html)
|
||||
// in the Amazon Elastic Compute Cloud User Guide.
|
||||
|
@ -14276,8 +14278,9 @@ func (c *EC2) DescribeVolumeStatusRequest(input *DescribeVolumeStatusInput) (req
|
|||
// status of the volume is ok. If the check fails, the overall status is impaired.
|
||||
// If the status is insufficient-data, then the checks may still be taking place
|
||||
// on your volume at the time. We recommend that you retry the request. For
|
||||
// more information on volume status, see Monitoring the Status of Your Volumes
|
||||
// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-status.html).
|
||||
// more information about volume status, see Monitoring the Status of Your Volumes
|
||||
// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-status.html)
|
||||
// in the Amazon Elastic Compute Cloud User Guide.
|
||||
//
|
||||
// Events: Reflect the cause of a volume status and may require you to take
|
||||
// action. For example, if your volume returns an impaired status, then the
|
||||
|
@ -14569,7 +14572,8 @@ func (c *EC2) DescribeVolumesModificationsRequest(input *DescribeVolumesModifica
|
|||
// You can also use CloudWatch Events to check the status of a modification
|
||||
// to an EBS volume. For information about CloudWatch Events, see the Amazon
|
||||
// CloudWatch Events User Guide (http://docs.aws.amazon.com/AmazonCloudWatch/latest/events/).
|
||||
// For more information, see Monitoring Volume Modifications" (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods).
|
||||
// For more information, see Monitoring Volume Modifications" (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods)
|
||||
// in the Amazon Elastic Compute Cloud User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
|
@ -18858,7 +18862,7 @@ func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput
|
|||
// be made public. Snapshots encrypted with your default CMK cannot be shared
|
||||
// with other accounts.
|
||||
//
|
||||
// For more information on modifying snapshot permissions, see Sharing Snapshots
|
||||
// For more information about modifying snapshot permissions, see Sharing Snapshots
|
||||
// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html)
|
||||
// in the Amazon Elastic Compute Cloud User Guide.
|
||||
//
|
||||
|
@ -19131,10 +19135,9 @@ func (c *EC2) ModifyVolumeRequest(input *ModifyVolumeInput) (req *request.Reques
|
|||
//
|
||||
// With previous-generation instance types, resizing an EBS volume may require
|
||||
// detaching and reattaching the volume or stopping and restarting the instance.
|
||||
// For more information about modifying an EBS volume running Linux, see Modifying
|
||||
// the Size, IOPS, or Type of an EBS Volume on Linux (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html).
|
||||
// For more information about modifying an EBS volume running Windows, see Modifying
|
||||
// the Size, IOPS, or Type of an EBS Volume on Windows (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html).
|
||||
// For more information, see Modifying the Size, IOPS, or Type of an EBS Volume
|
||||
// on Linux (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html)
|
||||
// and Modifying the Size, IOPS, or Type of an EBS Volume on Windows (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html).
|
||||
//
|
||||
// If you reach the maximum volume modification rate per volume limit, you will
|
||||
// need to wait at least six hours before applying further modifications to
|
||||
|
@ -21751,7 +21754,7 @@ func (c *EC2) ResetSnapshotAttributeRequest(input *ResetSnapshotAttributeInput)
|
|||
//
|
||||
// Resets permission settings for the specified snapshot.
|
||||
//
|
||||
// For more information on modifying snapshot permissions, see Sharing Snapshots
|
||||
// For more information about modifying snapshot permissions, see Sharing Snapshots
|
||||
// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html)
|
||||
// in the Amazon Elastic Compute Cloud User Guide.
|
||||
//
|
||||
|
@ -27034,10 +27037,10 @@ type CopySnapshotInput struct {
|
|||
// copy operation. This parameter is only valid for specifying the destination
|
||||
// region in a PresignedUrl parameter, where it is required.
|
||||
//
|
||||
// CopySnapshot sends the snapshot copy to the regional endpoint that you send
|
||||
// the HTTP request to, such as ec2.us-east-1.amazonaws.com (in the AWS CLI,
|
||||
// this is specified with the --region parameter or the default region in your
|
||||
// AWS configuration file).
|
||||
// The snapshot copy is sent to the regional endpoint that you sent the HTTP
|
||||
// request to (for example, ec2.us-east-1.amazonaws.com). With the AWS CLI,
|
||||
// this is specified using the --region parameter or the default region in your
|
||||
// AWS configuration file.
|
||||
DestinationRegion *string `locationName:"destinationRegion" type:"string"`
|
||||
|
||||
// Checks whether you have the required permissions for the action, without
|
||||
|
@ -29166,16 +29169,6 @@ func (s *CreateNetworkInterfaceInput) Validate() error {
|
|||
if s.SubnetId == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("SubnetId"))
|
||||
}
|
||||
if s.PrivateIpAddresses != nil {
|
||||
for i, v := range s.PrivateIpAddresses {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
if err := v.Validate(); err != nil {
|
||||
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PrivateIpAddresses", i), err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
|
@ -30269,7 +30262,8 @@ type CreateVolumeInput struct {
|
|||
|
||||
// The number of I/O operations per second (IOPS) to provision for the volume,
|
||||
// with a maximum ratio of 50 IOPS/GiB. Range is 100 to 32000 IOPS for volumes
|
||||
// in most regions. For exceptions, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
|
||||
// in most regions. For exceptions, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
|
||||
// in the Amazon Elastic Compute Cloud User Guide.
|
||||
//
|
||||
// This parameter is valid only for Provisioned IOPS SSD (io1) volumes.
|
||||
Iops *int64 `type:"integer"`
|
||||
|
@ -50732,26 +50726,6 @@ func (s InstanceNetworkInterfaceSpecification) GoString() string {
|
|||
return s.String()
|
||||
}
|
||||
|
||||
// Validate inspects the fields of the type to determine if they are valid.
|
||||
func (s *InstanceNetworkInterfaceSpecification) Validate() error {
|
||||
invalidParams := request.ErrInvalidParams{Context: "InstanceNetworkInterfaceSpecification"}
|
||||
if s.PrivateIpAddresses != nil {
|
||||
for i, v := range s.PrivateIpAddresses {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
if err := v.Validate(); err != nil {
|
||||
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PrivateIpAddresses", i), err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value.
|
||||
func (s *InstanceNetworkInterfaceSpecification) SetAssociatePublicIpAddress(v bool) *InstanceNetworkInterfaceSpecification {
|
||||
s.AssociatePublicIpAddress = &v
|
||||
|
@ -51936,6 +51910,75 @@ func (s *LaunchTemplateConfig) SetOverrides(v []*LaunchTemplateOverrides) *Launc
|
|||
return s
|
||||
}
|
||||
|
||||
// The CPU options for the instance.
|
||||
type LaunchTemplateCpuOptions struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The number of CPU cores for the instance.
|
||||
CoreCount *int64 `locationName:"coreCount" type:"integer"`
|
||||
|
||||
// The number of threads per CPU core.
|
||||
ThreadsPerCore *int64 `locationName:"threadsPerCore" type:"integer"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s LaunchTemplateCpuOptions) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s LaunchTemplateCpuOptions) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetCoreCount sets the CoreCount field's value.
|
||||
func (s *LaunchTemplateCpuOptions) SetCoreCount(v int64) *LaunchTemplateCpuOptions {
|
||||
s.CoreCount = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetThreadsPerCore sets the ThreadsPerCore field's value.
|
||||
func (s *LaunchTemplateCpuOptions) SetThreadsPerCore(v int64) *LaunchTemplateCpuOptions {
|
||||
s.ThreadsPerCore = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// The CPU options for the instance. Both the core count and threads per core
|
||||
// must be specified in the request.
|
||||
type LaunchTemplateCpuOptionsRequest struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The number of CPU cores for the instance.
|
||||
CoreCount *int64 `type:"integer"`
|
||||
|
||||
// The number of threads per CPU core. To disable Intel Hyper-Threading Technology
|
||||
// for the instance, specify a value of 1. Otherwise, specify the default value
|
||||
// of 2.
|
||||
ThreadsPerCore *int64 `type:"integer"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s LaunchTemplateCpuOptionsRequest) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s LaunchTemplateCpuOptionsRequest) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetCoreCount sets the CoreCount field's value.
|
||||
func (s *LaunchTemplateCpuOptionsRequest) SetCoreCount(v int64) *LaunchTemplateCpuOptionsRequest {
|
||||
s.CoreCount = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetThreadsPerCore sets the ThreadsPerCore field's value.
|
||||
func (s *LaunchTemplateCpuOptionsRequest) SetThreadsPerCore(v int64) *LaunchTemplateCpuOptionsRequest {
|
||||
s.ThreadsPerCore = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// Describes a block device for an EBS volume.
|
||||
type LaunchTemplateEbsBlockDevice struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
@ -52416,26 +52459,6 @@ func (s LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) GoString() s
|
|||
return s.String()
|
||||
}
|
||||
|
||||
// Validate inspects the fields of the type to determine if they are valid.
|
||||
func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) Validate() error {
|
||||
invalidParams := request.ErrInvalidParams{Context: "LaunchTemplateInstanceNetworkInterfaceSpecificationRequest"}
|
||||
if s.PrivateIpAddresses != nil {
|
||||
for i, v := range s.PrivateIpAddresses {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
if err := v.Validate(); err != nil {
|
||||
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PrivateIpAddresses", i), err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value.
|
||||
func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetAssociatePublicIpAddress(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest {
|
||||
s.AssociatePublicIpAddress = &v
|
||||
|
@ -54621,9 +54644,8 @@ func (s *ModifyReservedInstancesOutput) SetReservedInstancesModificationId(v str
|
|||
type ModifySnapshotAttributeInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The snapshot attribute to modify.
|
||||
//
|
||||
// Only volume creation permissions may be modified at the customer level.
|
||||
// The snapshot attribute to modify. Only volume creation permissions can be
|
||||
// modified.
|
||||
Attribute *string `type:"string" enum:"SnapshotAttributeName"`
|
||||
|
||||
// A JSON representation of the snapshot attribute modification.
|
||||
|
@ -54976,19 +54998,17 @@ type ModifyVolumeInput struct {
|
|||
// it is UnauthorizedOperation.
|
||||
DryRun *bool `type:"boolean"`
|
||||
|
||||
// Target IOPS rate of the volume to be modified.
|
||||
// The target IOPS rate of the volume.
|
||||
//
|
||||
// Only valid for Provisioned IOPS SSD (io1) volumes. For more information about
|
||||
// io1 IOPS configuration, see http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html#EBSVolumeTypes_piops
|
||||
// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html#EBSVolumeTypes_piops).
|
||||
// This is only valid for Provisioned IOPS SSD (io1) volumes. For more information,
|
||||
// see Provisioned IOPS SSD (io1) Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html#EBSVolumeTypes_piops).
|
||||
//
|
||||
// Default: If no IOPS value is specified, the existing value is retained.
|
||||
Iops *int64 `type:"integer"`
|
||||
|
||||
// Target size in GiB of the volume to be modified. Target volume size must
|
||||
// be greater than or equal to than the existing size of the volume. For information
|
||||
// about available EBS volume sizes, see http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html
|
||||
// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
|
||||
// The target size of the volume, in GiB. The target volume size must be greater
|
||||
// than or equal to than the existing size of the volume. For information about
|
||||
// available EBS volume sizes, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
|
||||
//
|
||||
// Default: If no size is specified, the existing size is retained.
|
||||
Size *int64 `type:"integer"`
|
||||
|
@ -54998,10 +55018,7 @@ type ModifyVolumeInput struct {
|
|||
// VolumeId is a required field
|
||||
VolumeId *string `type:"string" required:"true"`
|
||||
|
||||
// Target EBS volume type of the volume to be modified
|
||||
//
|
||||
// The API does not support modifications for volume type standard. You also
|
||||
// cannot change the type of a volume to standard.
|
||||
// The target EBS volume type of the volume.
|
||||
//
|
||||
// Default: If no type is specified, the existing type is retained.
|
||||
VolumeType *string `type:"string" enum:"VolumeType"`
|
||||
|
@ -55063,7 +55080,7 @@ func (s *ModifyVolumeInput) SetVolumeType(v string) *ModifyVolumeInput {
|
|||
type ModifyVolumeOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// A VolumeModification object.
|
||||
// Information about the volume modification.
|
||||
VolumeModification *VolumeModification `locationName:"volumeModification" type:"structure"`
|
||||
}
|
||||
|
||||
|
@ -57553,9 +57570,7 @@ type PrivateIpAddressSpecification struct {
|
|||
Primary *bool `locationName:"primary" type:"boolean"`
|
||||
|
||||
// The private IPv4 addresses.
|
||||
//
|
||||
// PrivateIpAddress is a required field
|
||||
PrivateIpAddress *string `locationName:"privateIpAddress" type:"string" required:"true"`
|
||||
PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
|
@ -57568,19 +57583,6 @@ func (s PrivateIpAddressSpecification) GoString() string {
|
|||
return s.String()
|
||||
}
|
||||
|
||||
// Validate inspects the fields of the type to determine if they are valid.
|
||||
func (s *PrivateIpAddressSpecification) Validate() error {
|
||||
invalidParams := request.ErrInvalidParams{Context: "PrivateIpAddressSpecification"}
|
||||
if s.PrivateIpAddress == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("PrivateIpAddress"))
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetPrimary sets the Primary field's value.
|
||||
func (s *PrivateIpAddressSpecification) SetPrimary(v bool) *PrivateIpAddressSpecification {
|
||||
s.Primary = &v
|
||||
|
@ -59549,6 +59551,11 @@ type RequestLaunchTemplateData struct {
|
|||
// cannot be changed using this action.
|
||||
BlockDeviceMappings []*LaunchTemplateBlockDeviceMappingRequest `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"`
|
||||
|
||||
// The CPU options for the instance. For more information, see Optimizing CPU
|
||||
// Options (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html)
|
||||
// in the Amazon Elastic Compute Cloud User Guide.
|
||||
CpuOptions *LaunchTemplateCpuOptionsRequest `type:"structure"`
|
||||
|
||||
// The credit option for CPU usage of the instance. Valid for T2 instances only.
|
||||
CreditSpecification *CreditSpecificationRequest `type:"structure"`
|
||||
|
||||
|
@ -59666,16 +59673,6 @@ func (s *RequestLaunchTemplateData) Validate() error {
|
|||
}
|
||||
}
|
||||
}
|
||||
if s.NetworkInterfaces != nil {
|
||||
for i, v := range s.NetworkInterfaces {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
if err := v.Validate(); err != nil {
|
||||
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NetworkInterfaces", i), err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
|
@ -59689,6 +59686,12 @@ func (s *RequestLaunchTemplateData) SetBlockDeviceMappings(v []*LaunchTemplateBl
|
|||
return s
|
||||
}
|
||||
|
||||
// SetCpuOptions sets the CpuOptions field's value.
|
||||
func (s *RequestLaunchTemplateData) SetCpuOptions(v *LaunchTemplateCpuOptionsRequest) *RequestLaunchTemplateData {
|
||||
s.CpuOptions = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetCreditSpecification sets the CreditSpecification field's value.
|
||||
func (s *RequestLaunchTemplateData) SetCreditSpecification(v *CreditSpecificationRequest) *RequestLaunchTemplateData {
|
||||
s.CreditSpecification = v
|
||||
|
@ -60181,16 +60184,6 @@ func (s *RequestSpotLaunchSpecification) Validate() error {
|
|||
invalidParams.AddNested("Monitoring", err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
if s.NetworkInterfaces != nil {
|
||||
for i, v := range s.NetworkInterfaces {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
if err := v.Validate(); err != nil {
|
||||
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NetworkInterfaces", i), err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
|
@ -61567,6 +61560,11 @@ type ResponseLaunchTemplateData struct {
|
|||
// The block device mappings.
|
||||
BlockDeviceMappings []*LaunchTemplateBlockDeviceMapping `locationName:"blockDeviceMappingSet" locationNameList:"item" type:"list"`
|
||||
|
||||
// The CPU options for the instance. For more information, see Optimizing CPU
|
||||
// Options (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html)
|
||||
// in the Amazon Elastic Compute Cloud User Guide.
|
||||
CpuOptions *LaunchTemplateCpuOptions `locationName:"cpuOptions" type:"structure"`
|
||||
|
||||
// The credit option for CPU usage of the instance.
|
||||
CreditSpecification *CreditSpecification `locationName:"creditSpecification" type:"structure"`
|
||||
|
||||
|
@ -61643,6 +61641,12 @@ func (s *ResponseLaunchTemplateData) SetBlockDeviceMappings(v []*LaunchTemplateB
|
|||
return s
|
||||
}
|
||||
|
||||
// SetCpuOptions sets the CpuOptions field's value.
|
||||
func (s *ResponseLaunchTemplateData) SetCpuOptions(v *LaunchTemplateCpuOptions) *ResponseLaunchTemplateData {
|
||||
s.CpuOptions = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetCreditSpecification sets the CreditSpecification field's value.
|
||||
func (s *ResponseLaunchTemplateData) SetCreditSpecification(v *CreditSpecification) *ResponseLaunchTemplateData {
|
||||
s.CreditSpecification = v
|
||||
|
@ -62597,16 +62601,6 @@ func (s *RunInstancesInput) Validate() error {
|
|||
invalidParams.AddNested("Monitoring", err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
if s.NetworkInterfaces != nil {
|
||||
for i, v := range s.NetworkInterfaces {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
if err := v.Validate(); err != nil {
|
||||
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NetworkInterfaces", i), err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
|
@ -65057,26 +65051,6 @@ func (s SpotFleetLaunchSpecification) GoString() string {
|
|||
return s.String()
|
||||
}
|
||||
|
||||
// Validate inspects the fields of the type to determine if they are valid.
|
||||
func (s *SpotFleetLaunchSpecification) Validate() error {
|
||||
invalidParams := request.ErrInvalidParams{Context: "SpotFleetLaunchSpecification"}
|
||||
if s.NetworkInterfaces != nil {
|
||||
for i, v := range s.NetworkInterfaces {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
if err := v.Validate(); err != nil {
|
||||
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NetworkInterfaces", i), err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAddressingType sets the AddressingType field's value.
|
||||
func (s *SpotFleetLaunchSpecification) SetAddressingType(v string) *SpotFleetLaunchSpecification {
|
||||
s.AddressingType = &v
|
||||
|
@ -65399,16 +65373,6 @@ func (s *SpotFleetRequestConfigData) Validate() error {
|
|||
if s.TargetCapacity == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("TargetCapacity"))
|
||||
}
|
||||
if s.LaunchSpecifications != nil {
|
||||
for i, v := range s.LaunchSpecifications {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
if err := v.Validate(); err != nil {
|
||||
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LaunchSpecifications", i), err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
}
|
||||
if s.LaunchTemplateConfigs != nil {
|
||||
for i, v := range s.LaunchTemplateConfigs {
|
||||
if v == nil {
|
||||
|
@ -68119,8 +68083,9 @@ type Volume struct {
|
|||
// For Provisioned IOPS SSD volumes, this represents the number of IOPS that
|
||||
// are provisioned for the volume. For General Purpose SSD volumes, this represents
|
||||
// the baseline performance of the volume and the rate at which the volume accumulates
|
||||
// I/O credits for bursting. For more information on General Purpose SSD baseline
|
||||
// performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
|
||||
// I/O credits for bursting. For more information about General Purpose SSD
|
||||
// baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types
|
||||
// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)
|
||||
// in the Amazon Elastic Compute Cloud User Guide.
|
||||
//
|
||||
// Constraint: Range is 100-32000 IOPS for io1 volumes and 100-10000 IOPS for
|
||||
|
@ -68351,41 +68316,41 @@ func (s *VolumeDetail) SetSize(v int64) *VolumeDetail {
|
|||
type VolumeModification struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Modification completion or failure time.
|
||||
// The modification completion or failure time.
|
||||
EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"`
|
||||
|
||||
// Current state of modification. Modification state is null for unmodified
|
||||
// The current modification state. The modification state is null for unmodified
|
||||
// volumes.
|
||||
ModificationState *string `locationName:"modificationState" type:"string" enum:"VolumeModificationState"`
|
||||
|
||||
// Original IOPS rate of the volume being modified.
|
||||
// The original IOPS rate of the volume.
|
||||
OriginalIops *int64 `locationName:"originalIops" type:"integer"`
|
||||
|
||||
// Original size of the volume being modified.
|
||||
// The original size of the volume.
|
||||
OriginalSize *int64 `locationName:"originalSize" type:"integer"`
|
||||
|
||||
// Original EBS volume type of the volume being modified.
|
||||
// The original EBS volume type of the volume.
|
||||
OriginalVolumeType *string `locationName:"originalVolumeType" type:"string" enum:"VolumeType"`
|
||||
|
||||
// Modification progress from 0 to 100%.
|
||||
// The modification progress, from 0 to 100 percent complete.
|
||||
Progress *int64 `locationName:"progress" type:"long"`
|
||||
|
||||
// Modification start time
|
||||
// The modification start time.
|
||||
StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"`
|
||||
|
||||
// Generic status message on modification progress or failure.
|
||||
// A status message about the modification progress or failure.
|
||||
StatusMessage *string `locationName:"statusMessage" type:"string"`
|
||||
|
||||
// Target IOPS rate of the volume being modified.
|
||||
// The target IOPS rate of the volume.
|
||||
TargetIops *int64 `locationName:"targetIops" type:"integer"`
|
||||
|
||||
// Target size of the volume being modified.
|
||||
// The target size of the volume, in GiB.
|
||||
TargetSize *int64 `locationName:"targetSize" type:"integer"`
|
||||
|
||||
// Target EBS volume type of the volume being modified.
|
||||
// The target EBS volume type of the volume.
|
||||
TargetVolumeType *string `locationName:"targetVolumeType" type:"string" enum:"VolumeType"`
|
||||
|
||||
// ID of the volume being modified.
|
||||
// The ID of the volume.
|
||||
VolumeId *string `locationName:"volumeId" type:"string"`
|
||||
}
|
||||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
// +build go1.6
|
||||
|
||||
// Copyright 2014 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
@ -32,7 +34,7 @@ const (
|
|||
|
||||
// Maximum allowed depth when recursively substituing variable names.
|
||||
_DEPTH_VALUES = 99
|
||||
_VERSION = "1.37.0"
|
||||
_VERSION = "1.38.0"
|
||||
)
|
||||
|
||||
// Version returns current package version literal.
|
||||
|
@ -132,6 +134,8 @@ type LoadOptions struct {
|
|||
IgnoreContinuation bool
|
||||
// IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
|
||||
IgnoreInlineComment bool
|
||||
// SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
|
||||
SkipUnrecognizableLines bool
|
||||
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
|
||||
// This type of keys are mostly used in my.cnf.
|
||||
AllowBooleanKeys bool
|
||||
|
@ -157,7 +161,7 @@ type LoadOptions struct {
|
|||
// when value is NOT surrounded by any quotes.
|
||||
// Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all.
|
||||
UnescapeValueCommentSymbols bool
|
||||
// Some INI formats allow group blocks that store a block of raw content that doesn't otherwise
|
||||
// UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise
|
||||
// conform to key/value pairs. Specify the names of those blocks here.
|
||||
UnparseableSections []string
|
||||
}
|
||||
|
|
|
@ -339,8 +339,7 @@ func (f *File) parse(reader io.Reader) (err error) {
|
|||
|
||||
// NOTE: Iterate and increase `currentPeekSize` until
|
||||
// the size of the parser buffer is found.
|
||||
// TODO: When Golang 1.10 is the lowest version supported,
|
||||
// replace with `parserBufferSize := p.buf.Size()`.
|
||||
// TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`.
|
||||
parserBufferSize := 0
|
||||
// NOTE: Peek 1kb at a time.
|
||||
currentPeekSize := 1024
|
||||
|
@ -390,8 +389,7 @@ func (f *File) parse(reader io.Reader) (err error) {
|
|||
// Section
|
||||
if line[0] == '[' {
|
||||
// Read to the next ']' (TODO: support quoted strings)
|
||||
// TODO(unknwon): use LastIndexByte when stop supporting Go1.4
|
||||
closeIdx := bytes.LastIndex(line, []byte("]"))
|
||||
closeIdx := bytes.LastIndexByte(line, ']')
|
||||
if closeIdx == -1 {
|
||||
return fmt.Errorf("unclosed section: %s", line)
|
||||
}
|
||||
|
@ -433,25 +431,31 @@ func (f *File) parse(reader io.Reader) (err error) {
|
|||
kname, offset, err := readKeyName(line)
|
||||
if err != nil {
|
||||
// Treat as boolean key when desired, and whole line is key name.
|
||||
if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
|
||||
kname, err := p.readValue(line,
|
||||
parserBufferSize,
|
||||
f.options.IgnoreContinuation,
|
||||
f.options.IgnoreInlineComment,
|
||||
f.options.UnescapeValueDoubleQuotes,
|
||||
f.options.UnescapeValueCommentSymbols,
|
||||
f.options.AllowPythonMultilineValues,
|
||||
f.options.SpaceBeforeInlineComment)
|
||||
if err != nil {
|
||||
return err
|
||||
if IsErrDelimiterNotFound(err) {
|
||||
switch {
|
||||
case f.options.AllowBooleanKeys:
|
||||
kname, err := p.readValue(line,
|
||||
parserBufferSize,
|
||||
f.options.IgnoreContinuation,
|
||||
f.options.IgnoreInlineComment,
|
||||
f.options.UnescapeValueDoubleQuotes,
|
||||
f.options.UnescapeValueCommentSymbols,
|
||||
f.options.AllowPythonMultilineValues,
|
||||
f.options.SpaceBeforeInlineComment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key, err := section.NewBooleanKey(kname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key.Comment = strings.TrimSpace(p.comment.String())
|
||||
p.comment.Reset()
|
||||
continue
|
||||
|
||||
case f.options.SkipUnrecognizableLines:
|
||||
continue
|
||||
}
|
||||
key, err := section.NewBooleanKey(kname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key.Comment = strings.TrimSpace(p.comment.String())
|
||||
p.comment.Reset()
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -181,11 +181,12 @@ WAIT:
|
|||
// Handle the one-shot mode.
|
||||
if l.opts.LockTryOnce && attempts > 0 {
|
||||
elapsed := time.Since(start)
|
||||
if elapsed > qOpts.WaitTime {
|
||||
if elapsed > l.opts.LockWaitTime {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
qOpts.WaitTime -= elapsed
|
||||
// Query wait time should not exceed the lock wait time
|
||||
qOpts.WaitTime = l.opts.LockWaitTime - elapsed
|
||||
}
|
||||
attempts++
|
||||
|
||||
|
|
|
@ -199,11 +199,12 @@ WAIT:
|
|||
// Handle the one-shot mode.
|
||||
if s.opts.SemaphoreTryOnce && attempts > 0 {
|
||||
elapsed := time.Since(start)
|
||||
if elapsed > qOpts.WaitTime {
|
||||
if elapsed > s.opts.SemaphoreWaitTime {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
qOpts.WaitTime -= elapsed
|
||||
// Query wait time should not exceed the semaphore wait time
|
||||
qOpts.WaitTime = s.opts.SemaphoreWaitTime - elapsed
|
||||
}
|
||||
attempts++
|
||||
|
||||
|
|
78
vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/authorizer_client_gcp.go
generated
vendored
Normal file
78
vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/authorizer_client_gcp.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
package gcpauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/vault/helper/strutil"
|
||||
"google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/iam/v1"
|
||||
)
|
||||
|
||||
var _ client = (*gcpClient)(nil)
|
||||
|
||||
// gcpClient implements client and communicates with the GCP API. It is
|
||||
// abstracted as an interface for stubbing during testing. See stubbedClient for
|
||||
// more details.
|
||||
type gcpClient struct {
|
||||
computeSvc *compute.Service
|
||||
iamSvc *iam.Service
|
||||
}
|
||||
|
||||
func (c *gcpClient) InstanceGroups(ctx context.Context, project string, boundInstanceGroups []string) (map[string][]string, error) {
|
||||
// map of zone names to a slice of instance group names in that zone.
|
||||
igz := make(map[string][]string)
|
||||
|
||||
if err := c.computeSvc.InstanceGroups.
|
||||
AggregatedList(project).
|
||||
Fields("items/*/instanceGroups/name").
|
||||
Pages(ctx, func(l *compute.InstanceGroupAggregatedList) error {
|
||||
for k, v := range l.Items {
|
||||
zone, err := zoneFromSelfLink(k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, g := range v.InstanceGroups {
|
||||
if strutil.StrListContains(boundInstanceGroups, g.Name) {
|
||||
igz[zone] = append(igz[zone], g.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return igz, nil
|
||||
}
|
||||
|
||||
func (c *gcpClient) InstanceGroupContainsInstance(ctx context.Context, project, zone, group, instanceSelfLink string) (bool, error) {
|
||||
var req compute.InstanceGroupsListInstancesRequest
|
||||
resp, err := c.computeSvc.InstanceGroups.
|
||||
ListInstances(project, zone, group, &req).
|
||||
Filter(fmt.Sprintf("instance eq %s", instanceSelfLink)).
|
||||
Context(ctx).
|
||||
Do()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if resp != nil && len(resp.Items) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (c *gcpClient) ServiceAccount(ctx context.Context, name string) (string, string, error) {
|
||||
account, err := c.iamSvc.Projects.ServiceAccounts.
|
||||
Get(name).
|
||||
Fields("uniqueId", "email").
|
||||
Context(ctx).
|
||||
Do()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return account.UniqueId, account.Email, nil
|
||||
}
|
25
vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/authorizer_client_stubbed.go
generated
vendored
Normal file
25
vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/authorizer_client_stubbed.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
package gcpauth
|
||||
|
||||
import "context"
|
||||
|
||||
var _ client = (*stubbedClient)(nil)
|
||||
|
||||
// stubbedClient is a simple client to use for testing where the API calls to
|
||||
// GCP are "stubbed" instead of hitting the actual API.
|
||||
type stubbedClient struct {
|
||||
instanceGroupsByZone map[string][]string
|
||||
instanceGroupContainsInstance bool
|
||||
saId, saEmail string
|
||||
}
|
||||
|
||||
func (c *stubbedClient) InstanceGroups(_ context.Context, _ string, _ []string) (map[string][]string, error) {
|
||||
return c.instanceGroupsByZone, nil
|
||||
}
|
||||
|
||||
func (c *stubbedClient) InstanceGroupContainsInstance(_ context.Context, _, _, _, _ string) (bool, error) {
|
||||
return c.instanceGroupContainsInstance, nil
|
||||
}
|
||||
|
||||
func (c *stubbedClient) ServiceAccount(_ context.Context, _ string) (string, string, error) {
|
||||
return c.saId, c.saEmail, nil
|
||||
}
|
159
vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/authorizer_gce.go
generated
vendored
Normal file
159
vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/authorizer_gce.go
generated
vendored
Normal file
|
@ -0,0 +1,159 @@
|
|||
package gcpauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/vault/helper/strutil"
|
||||
)
|
||||
|
||||
type client interface {
|
||||
InstanceGroups(context.Context, string, []string) (map[string][]string, error)
|
||||
InstanceGroupContainsInstance(context.Context, string, string, string, string) (bool, error)
|
||||
ServiceAccount(context.Context, string) (string, string, error)
|
||||
}
|
||||
|
||||
type AuthorizeGCEInput struct {
|
||||
client client
|
||||
|
||||
project string
|
||||
serviceAccount string
|
||||
|
||||
instanceLabels map[string]string
|
||||
instanceSelfLink string
|
||||
instanceZone string
|
||||
|
||||
boundLabels map[string]string
|
||||
boundRegions []string
|
||||
boundZones []string
|
||||
|
||||
boundInstanceGroups []string
|
||||
boundServiceAccounts []string
|
||||
}
|
||||
|
||||
func AuthorizeGCE(ctx context.Context, i *AuthorizeGCEInput) error {
|
||||
// Verify instance has role labels if labels were set on role.
|
||||
for k, v := range i.boundLabels {
|
||||
if act, ok := i.instanceLabels[k]; !ok || act != v {
|
||||
return fmt.Errorf("instance missing bound label \"%s:%s\"", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse the zone name from the self-link URI if given.
|
||||
zone, err := zoneFromSelfLink(i.instanceZone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Convert the zone to a region name.
|
||||
region, err := zoneToRegion(zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify the instance is in the zone/region
|
||||
switch {
|
||||
case len(i.boundZones) > 0:
|
||||
if !strutil.StrListContains(i.boundZones, zone) {
|
||||
return fmt.Errorf("instance not in bound zones %q", i.boundZones)
|
||||
}
|
||||
case len(i.boundRegions) > 0:
|
||||
if !strutil.StrListContains(i.boundRegions, region) {
|
||||
return fmt.Errorf("instance not in bound regions %q", i.boundRegions)
|
||||
}
|
||||
}
|
||||
|
||||
// For each bound instance group, verify the group exists and that the
|
||||
// instance is a member of that group.
|
||||
if len(i.boundInstanceGroups) > 0 {
|
||||
igz, err := i.client.InstanceGroups(ctx, i.project, i.boundInstanceGroups)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list instance groups for project %q: %s", i.project, err)
|
||||
}
|
||||
|
||||
// Keep track of whether we've successfully found an instance group of
|
||||
// which this instance is a member, which meets the zonal/regional criteria.
|
||||
found := false
|
||||
|
||||
for _, g := range i.boundInstanceGroups {
|
||||
if found {
|
||||
break
|
||||
}
|
||||
|
||||
var group, zone string
|
||||
|
||||
switch {
|
||||
case len(i.boundZones) > 0:
|
||||
for _, z := range i.boundZones {
|
||||
if groups, ok := igz[z]; ok && len(groups) > 0 {
|
||||
for _, grp := range groups {
|
||||
if grp == g {
|
||||
group = g
|
||||
zone = z
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if group == "" {
|
||||
return fmt.Errorf("instance group %q does not exist in zones %q for project %q",
|
||||
g, i.boundZones, i.project)
|
||||
}
|
||||
case len(i.boundRegions) > 0:
|
||||
for _, r := range i.boundRegions {
|
||||
for z, groups := range igz {
|
||||
if strings.HasPrefix(z, r) { // zone is prefixed with region
|
||||
for _, grp := range groups {
|
||||
if grp == g {
|
||||
group = g
|
||||
zone = z
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if group == "" {
|
||||
return fmt.Errorf("instance group %q does not exist in regions %q for project %q",
|
||||
g, i.boundRegions, i.project)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("instance group %q is not bound to any zones or regions", g)
|
||||
}
|
||||
|
||||
ok, err := i.client.InstanceGroupContainsInstance(ctx, i.project, zone, group, i.instanceSelfLink)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list instances in instance group %q for project %q: %s",
|
||||
group, i.project, err)
|
||||
}
|
||||
|
||||
if ok {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return fmt.Errorf("instance is not part of instance groups %q",
|
||||
i.boundInstanceGroups)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify instance is running under one of the allowed service accounts.
|
||||
if len(i.boundServiceAccounts) > 0 {
|
||||
// ServiceAccount wraps a call to the GCP IAM API to get a service account.
|
||||
name := fmt.Sprintf("projects/%s/serviceAccounts/%s", i.project, i.serviceAccount)
|
||||
|
||||
saId, saEmail, err := i.client.ServiceAccount(ctx, name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find service account %q in project %q: %s",
|
||||
i.serviceAccount, i.project, err)
|
||||
}
|
||||
|
||||
if !(strutil.StrListContains(i.boundServiceAccounts, saEmail) ||
|
||||
strutil.StrListContains(i.boundServiceAccounts, saId)) {
|
||||
return fmt.Errorf("service account %q (%q) is not in bound service accounts %q",
|
||||
saId, saEmail, i.boundServiceAccounts)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
67
vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/helpers.go
generated
vendored
Normal file
67
vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/helpers.go
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
package gcpauth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/logical/framework"
|
||||
)
|
||||
|
||||
// validateFields verifies that no bad arguments were given to the request.
|
||||
func validateFields(req *logical.Request, data *framework.FieldData) error {
|
||||
var unknownFields []string
|
||||
for k := range req.Data {
|
||||
if _, ok := data.Schema[k]; !ok {
|
||||
unknownFields = append(unknownFields, k)
|
||||
}
|
||||
}
|
||||
|
||||
if len(unknownFields) > 0 {
|
||||
// Sort since this is a human error
|
||||
sort.Strings(unknownFields)
|
||||
|
||||
return fmt.Errorf("unknown fields: %q", unknownFields)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// zoneToRegion converts a zone name to its corresponding region. From
|
||||
// https://cloud.google.com/compute/docs/regions-zones/, the FQDN of a zone is
|
||||
// always <region>-<zone>. Instead of doing an API call, this function uses
|
||||
// string parsing as an opimization.
|
||||
//
|
||||
// If the zone is a self-link, it is converted into a human name first. If the
|
||||
// zone cannot be converted to a region, an error is returned.
|
||||
func zoneToRegion(zone string) (string, error) {
|
||||
zone, err := zoneFromSelfLink(zone)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if i := strings.LastIndex(zone, "-"); i > -1 {
|
||||
return zone[0:i], nil
|
||||
}
|
||||
return "", fmt.Errorf("failed to extract region from zone name %q", zone)
|
||||
}
|
||||
|
||||
// zoneFromSelfLink converts a zone self-link into the human zone name.
|
||||
func zoneFromSelfLink(zone string) (string, error) {
|
||||
prefix := "zones/"
|
||||
|
||||
if zone == "" {
|
||||
return "", fmt.Errorf("failed to extract zone from self-link %q", zone)
|
||||
}
|
||||
|
||||
if strings.Contains(zone, "/") {
|
||||
if i := strings.LastIndex(zone, prefix); i > -1 {
|
||||
zone = zone[i+len(prefix) : len(zone)]
|
||||
} else {
|
||||
return "", fmt.Errorf("failed to extract zone from self-link %q", zone)
|
||||
}
|
||||
}
|
||||
|
||||
return zone, nil
|
||||
}
|
|
@ -38,6 +38,11 @@ If not specified, will use the OAuth2 library default. Useful for testing.`,
|
|||
}
|
||||
|
||||
func (b *GcpAuthBackend) pathConfigWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
// Validate we didn't get extraneous fields
|
||||
if err := validateFields(req, data); err != nil {
|
||||
return nil, logical.CodedError(422, err.Error())
|
||||
}
|
||||
|
||||
config, err := b.config(ctx, req.Storage)
|
||||
|
||||
if err != nil {
|
||||
|
@ -75,17 +80,27 @@ func (b *GcpAuthBackend) pathConfigRead(ctx context.Context, req *logical.Reques
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
resp := &logical.Response{
|
||||
Data: map[string]interface{}{
|
||||
"client_email": config.Credentials.ClientEmail,
|
||||
"client_id": config.Credentials.ClientId,
|
||||
"private_key_id": config.Credentials.PrivateKeyId,
|
||||
"project_id": config.Credentials.ProjectId,
|
||||
"google_certs_endpoint": config.GoogleCertsEndpoint,
|
||||
},
|
||||
resp := make(map[string]interface{})
|
||||
|
||||
if v := config.Credentials.ClientEmail; v != "" {
|
||||
resp["client_email"] = v
|
||||
}
|
||||
if v := config.Credentials.ClientId; v != "" {
|
||||
resp["client_id"] = v
|
||||
}
|
||||
if v := config.Credentials.PrivateKeyId; v != "" {
|
||||
resp["private_key_id"] = v
|
||||
}
|
||||
if v := config.Credentials.ProjectId; v != "" {
|
||||
resp["project_id"] = v
|
||||
}
|
||||
if v := config.GoogleCertsEndpoint; v != "" {
|
||||
resp["google_certs_endpoint"] = v
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
return &logical.Response{
|
||||
Data: resp,
|
||||
}, nil
|
||||
}
|
||||
|
||||
const confHelpSyn = `Configure credentials used to query the GCP IAM API to verify authenticating service accounts`
|
||||
|
@ -100,8 +115,8 @@ iam AUTH:
|
|||
|
||||
// gcpConfig contains all config required for the GCP backend.
|
||||
type gcpConfig struct {
|
||||
Credentials *gcputil.GcpCredentials `json:"credentials" structs:"credentials" mapstructure:"credentials"`
|
||||
GoogleCertsEndpoint string `json:"google_certs_endpoint" structs:"google_certs_endpoint" mapstructure:"google_certs_endpoint"`
|
||||
Credentials *gcputil.GcpCredentials `json:"credentials"`
|
||||
GoogleCertsEndpoint string `json:"google_certs_endpoint"`
|
||||
}
|
||||
|
||||
// Update sets gcpConfig values parsed from the FieldData.
|
||||
|
|
|
@ -52,6 +52,11 @@ GCE identity metadata token ('iam', 'gce' roles).`,
|
|||
}
|
||||
|
||||
func (b *GcpAuthBackend) pathLogin(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
// Validate we didn't get extraneous fields
|
||||
if err := validateFields(req, data); err != nil {
|
||||
return nil, logical.CodedError(422, err.Error())
|
||||
}
|
||||
|
||||
loginInfo, err := b.parseAndValidateJwt(ctx, req, data)
|
||||
if err != nil {
|
||||
return logical.ErrorResponse(err.Error()), nil
|
||||
|
@ -427,7 +432,7 @@ func (b *GcpAuthBackend) pathGceLogin(ctx context.Context, req *logical.Request,
|
|||
metadata.ProjectId, metadata.Zone, metadata.InstanceName, err)), nil
|
||||
}
|
||||
|
||||
if err := b.authorizeGCEInstance(ctx, instance, req.Storage, role, metadata.Zone, loginInfo.ServiceAccountId); err != nil {
|
||||
if err := b.authorizeGCEInstance(ctx, instance, req.Storage, role, loginInfo.ServiceAccountId); err != nil {
|
||||
return logical.ErrorResponse(err.Error()), nil
|
||||
}
|
||||
|
||||
|
@ -508,7 +513,7 @@ func (b *GcpAuthBackend) pathGceRenew(ctx context.Context, req *logical.Request,
|
|||
if !ok {
|
||||
return errors.New("invalid auth metadata: service_account_id not found")
|
||||
}
|
||||
if err := b.authorizeGCEInstance(ctx, instance, req.Storage, role, meta.Zone, serviceAccountId); err != nil {
|
||||
if err := b.authorizeGCEInstance(ctx, instance, req.Storage, role, serviceAccountId); err != nil {
|
||||
return fmt.Errorf("could not renew token for role %s: %v", roleName, err)
|
||||
}
|
||||
|
||||
|
@ -562,104 +567,39 @@ func getInstanceMetadataFromAuth(authMetadata map[string]string) (*gcputil.GCEId
|
|||
return meta, nil
|
||||
}
|
||||
|
||||
// validateGCEInstance returns an error if the given GCE instance is not authorized for the role.
|
||||
func (b *GcpAuthBackend) authorizeGCEInstance(ctx context.Context, instance *compute.Instance, s logical.Storage, role *gcpRole, zone, serviceAccountId string) error {
|
||||
gceClient, err := b.GCE(ctx, s)
|
||||
// authorizeGCEInstance returns an error if the given GCE instance is not
|
||||
// authorized for the role.
|
||||
func (b *GcpAuthBackend) authorizeGCEInstance(ctx context.Context, instance *compute.Instance, s logical.Storage, role *gcpRole, serviceAccountId string) error {
|
||||
computeSvc, err := b.GCE(ctx, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify instance has role labels if labels were set on role.
|
||||
for k, expectedV := range role.BoundLabels {
|
||||
actualV, ok := instance.Labels[k]
|
||||
if !ok || actualV != expectedV {
|
||||
return fmt.Errorf("role label '%s:%s' not found on GCE instance", k, expectedV)
|
||||
}
|
||||
iamSvc, err := b.IAM(ctx, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify that instance is in zone or region if given.
|
||||
if len(role.BoundZone) > 0 {
|
||||
if !compareResourceNameOrSelfLink(role.BoundZone, instance.Zone, "zones") {
|
||||
return fmt.Errorf("instance zone %s is not role zone '%s'", instance.Zone, role.BoundZone)
|
||||
}
|
||||
} else if len(role.BoundRegion) > 0 {
|
||||
zone, err := gceClient.Zones.Get(role.ProjectId, zone).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not verify instance zone '%s' is available for project '%s': %v", zone.Name, role.ProjectId, err)
|
||||
}
|
||||
if !compareResourceNameOrSelfLink(role.BoundRegion, zone.Region, "regions") {
|
||||
return fmt.Errorf("instance zone %s is not in role region '%s'", zone.Name, role.BoundRegion)
|
||||
}
|
||||
}
|
||||
return AuthorizeGCE(ctx, &AuthorizeGCEInput{
|
||||
client: &gcpClient{
|
||||
computeSvc: computeSvc,
|
||||
iamSvc: iamSvc,
|
||||
},
|
||||
|
||||
// If instance group is given, verify group exists and that instance is in group.
|
||||
if len(role.BoundInstanceGroup) > 0 {
|
||||
var group *compute.InstanceGroup
|
||||
var err error
|
||||
project: role.ProjectId,
|
||||
serviceAccount: serviceAccountId,
|
||||
|
||||
// Check if group should be zonal or regional.
|
||||
if len(role.BoundZone) > 0 {
|
||||
group, err = gceClient.InstanceGroups.Get(role.ProjectId, role.BoundZone, role.BoundInstanceGroup).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find role instance group %s (project %s, zone %s)", role.BoundInstanceGroup, role.ProjectId, role.BoundZone)
|
||||
}
|
||||
} else if len(role.BoundRegion) > 0 {
|
||||
group, err = gceClient.RegionInstanceGroups.Get(role.ProjectId, role.BoundRegion, role.BoundInstanceGroup).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find role instance group %s (project %s, region %s)", role.BoundInstanceGroup, role.ProjectId, role.BoundRegion)
|
||||
}
|
||||
} else {
|
||||
return errors.New("expected zone or region to be set for GCE role '%s' with instance group")
|
||||
}
|
||||
instanceLabels: instance.Labels,
|
||||
instanceSelfLink: instance.SelfLink,
|
||||
instanceZone: instance.Zone,
|
||||
|
||||
// Verify instance group contains authenticating instance.
|
||||
instanceIdFilter := fmt.Sprintf("instance eq %s", instance.SelfLink)
|
||||
listInstanceReq := &compute.InstanceGroupsListInstancesRequest{}
|
||||
listResp, err := gceClient.InstanceGroups.ListInstances(role.ProjectId, role.BoundZone, group.Name, listInstanceReq).Filter(instanceIdFilter).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not confirm instance %s is part of instance group %s: %s", instance.Name, role.BoundInstanceGroup, err)
|
||||
}
|
||||
boundLabels: role.BoundLabels,
|
||||
boundRegions: role.BoundRegions,
|
||||
boundZones: role.BoundZones,
|
||||
|
||||
if len(listResp.Items) == 0 {
|
||||
return fmt.Errorf("instance %s is not part of instance group %s", instance.Name, role.BoundInstanceGroup)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Verify instance is running under one of the allowed service accounts.
|
||||
if len(role.BoundServiceAccounts) > 0 {
|
||||
iamClient, err := b.IAM(ctx, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serviceAccount, err := gcputil.ServiceAccount(iamClient, &gcputil.ServiceAccountId{
|
||||
Project: role.ProjectId,
|
||||
EmailOrId: serviceAccountId,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find service account with id '%s': %v", serviceAccountId, err)
|
||||
}
|
||||
|
||||
if !(strutil.StrListContains(role.BoundServiceAccounts, serviceAccount.Email) ||
|
||||
strutil.StrListContains(role.BoundServiceAccounts, serviceAccount.UniqueId)) {
|
||||
return fmt.Errorf("GCE instance's service account email (%s) or id (%s) not found in role service accounts: %v",
|
||||
serviceAccount.Email, serviceAccount.UniqueId, role.BoundServiceAccounts)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func compareResourceNameOrSelfLink(expected, actual, collectionId string) bool {
|
||||
sep := fmt.Sprintf("%s/", collectionId)
|
||||
if strings.Contains(expected, sep) {
|
||||
return expected == actual
|
||||
}
|
||||
|
||||
actTkns := strings.SplitAfter(actual, sep)
|
||||
actualName := actTkns[len(actTkns)-1]
|
||||
return expected == actualName
|
||||
boundInstanceGroups: role.BoundInstanceGroups,
|
||||
boundServiceAccounts: role.BoundServiceAccounts,
|
||||
})
|
||||
}
|
||||
|
||||
const pathLoginHelpSyn = `Authenticates Google Cloud Platform entities with Vault.`
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/go-gcp-common/gcputil"
|
||||
vaultconsts "github.com/hashicorp/vault/helper/consts"
|
||||
"github.com/hashicorp/vault/helper/policyutil"
|
||||
"github.com/hashicorp/vault/helper/strutil"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
|
@ -38,7 +39,7 @@ const (
|
|||
maxJwtExpMaxMinutes int = 60
|
||||
)
|
||||
|
||||
var baseRoleFieldSchema map[string]*framework.FieldSchema = map[string]*framework.FieldSchema{
|
||||
var baseRoleFieldSchema = map[string]*framework.FieldSchema{
|
||||
"name": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Name of the role.",
|
||||
|
@ -89,7 +90,7 @@ var baseRoleFieldSchema map[string]*framework.FieldSchema = map[string]*framewor
|
|||
},
|
||||
}
|
||||
|
||||
var iamOnlyFieldSchema map[string]*framework.FieldSchema = map[string]*framework.FieldSchema{
|
||||
var iamOnlyFieldSchema = map[string]*framework.FieldSchema{
|
||||
"max_jwt_exp": {
|
||||
Type: framework.TypeDurationSecond,
|
||||
Default: defaultIamMaxJwtExpMinutes * 60,
|
||||
|
@ -102,29 +103,48 @@ var iamOnlyFieldSchema map[string]*framework.FieldSchema = map[string]*framework
|
|||
},
|
||||
}
|
||||
|
||||
var gceOnlyFieldSchema map[string]*framework.FieldSchema = map[string]*framework.FieldSchema{
|
||||
var gceOnlyFieldSchema = map[string]*framework.FieldSchema{
|
||||
"bound_zones": {
|
||||
Type: framework.TypeCommaStringSlice,
|
||||
Description: "Comma-separated list of permitted zones to which the GCE " +
|
||||
"instance must belong. If a group is provided, it is assumed to be a " +
|
||||
"zonal group. This can be a self-link or zone name. This option only " +
|
||||
"applies to \"gce\" roles.",
|
||||
},
|
||||
|
||||
"bound_regions": {
|
||||
Type: framework.TypeCommaStringSlice,
|
||||
Description: "Comma-separated list of permitted regions to which the GCE " +
|
||||
"instance must belong. If a group is provided, it is assumed to be a " +
|
||||
"regional group. If \"zone\" is provided, this option is ignored. This " +
|
||||
"can be a self-link or region name. This option only applies to \"gce\" roles.",
|
||||
},
|
||||
|
||||
"bound_instance_groups": {
|
||||
Type: framework.TypeCommaStringSlice,
|
||||
Description: "Comma-separated list of permitted instance groups to which " +
|
||||
"the GCE instance must belong. This option only applies to \"gce\" roles.",
|
||||
},
|
||||
|
||||
"bound_labels": {
|
||||
Type: framework.TypeCommaStringSlice,
|
||||
Description: "Comma-separated list of GCP labels formatted as" +
|
||||
"\"key:value\" strings that must be present on the GCE instance " +
|
||||
"in order to authenticate. This option only applies to \"gce\" roles.",
|
||||
},
|
||||
|
||||
// Deprecated roles
|
||||
"bound_zone": {
|
||||
Type: framework.TypeString,
|
||||
Description: `
|
||||
"gce" roles only. If set, determines the zone that a GCE instance must belong to. If a group is provided, it is assumed
|
||||
to be a zonal group and the group must belong to this zone. Accepts self-link or zone name.`,
|
||||
Type: framework.TypeString,
|
||||
Description: "Deprecated: use \"bound_zones\" instead.",
|
||||
},
|
||||
"bound_region": {
|
||||
Type: framework.TypeString,
|
||||
Description: `
|
||||
"gce" roles only. If set, determines the region that a GCE instance must belong to. If a group is provided, it is
|
||||
assumed to be a regional group and the group must belong to this region. If zone is provided, region will be ignored.
|
||||
Either self-link or region name are accepted.`,
|
||||
Type: framework.TypeString,
|
||||
Description: "Deprecated: use \"bound_regions\" instead.",
|
||||
},
|
||||
"bound_instance_group": {
|
||||
Type: framework.TypeString,
|
||||
Description: `"gce" roles only. If set, determines the instance group that an authorized instance must belong to.`,
|
||||
},
|
||||
"bound_labels": {
|
||||
Type: framework.TypeCommaStringSlice,
|
||||
Description: `
|
||||
"gce" roles only. A comma-separated list of Google Cloud Platform labels formatted as "$key:$value" strings that are
|
||||
required for authorized GCE instances.`,
|
||||
Description: "Deprecated: use \"bound_instance_groups\" instead.",
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -261,38 +281,67 @@ func (b *GcpAuthBackend) pathRoleRead(ctx context.Context, req *logical.Request,
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
roleMap := map[string]interface{}{
|
||||
"role_type": role.RoleType,
|
||||
"project_id": role.ProjectId,
|
||||
"policies": role.Policies,
|
||||
"ttl": int64(role.TTL / time.Second),
|
||||
"max_ttl": int64(role.MaxTTL / time.Second),
|
||||
"period": int64(role.Period / time.Second),
|
||||
"bound_service_accounts": role.BoundServiceAccounts,
|
||||
role.Period /= time.Second
|
||||
role.TTL /= time.Second
|
||||
role.MaxTTL /= time.Second
|
||||
role.MaxJwtExp /= time.Second
|
||||
|
||||
resp := make(map[string]interface{})
|
||||
|
||||
if role.RoleType != "" {
|
||||
resp["role_type"] = role.RoleType
|
||||
}
|
||||
if role.ProjectId != "" {
|
||||
resp["project_id"] = role.ProjectId
|
||||
}
|
||||
if len(role.Policies) > 0 {
|
||||
resp["policies"] = role.Policies
|
||||
}
|
||||
if role.TTL != 0 {
|
||||
resp["ttl"] = role.TTL
|
||||
}
|
||||
if role.MaxTTL != 0 {
|
||||
resp["max_ttl"] = role.MaxTTL
|
||||
}
|
||||
if role.Period != 0 {
|
||||
resp["period"] = role.Period
|
||||
}
|
||||
if len(role.BoundServiceAccounts) > 0 {
|
||||
resp["bound_service_accounts"] = role.BoundServiceAccounts
|
||||
}
|
||||
|
||||
switch role.RoleType {
|
||||
case iamRoleType:
|
||||
roleMap["max_jwt_exp"] = int64(role.MaxJwtExp / time.Second)
|
||||
roleMap["allow_gce_inference"] = role.AllowGCEInference
|
||||
if role.MaxJwtExp != 0 {
|
||||
resp["max_jwt_exp"] = role.MaxJwtExp
|
||||
}
|
||||
resp["allow_gce_inference"] = role.AllowGCEInference
|
||||
case gceRoleType:
|
||||
roleMap["bound_zone"] = role.BoundZone
|
||||
roleMap["bound_region"] = role.BoundRegion
|
||||
roleMap["bound_instance_group"] = role.BoundInstanceGroup
|
||||
// Ensure values are not nil to avoid errors during plugin RPC conversions.
|
||||
if role.BoundLabels != nil && len(role.BoundLabels) > 0 {
|
||||
roleMap["bound_labels"] = role.BoundLabels
|
||||
} else {
|
||||
roleMap["bound_labels"] = ""
|
||||
if len(role.BoundRegions) > 0 {
|
||||
resp["bound_regions"] = role.BoundRegions
|
||||
}
|
||||
if len(role.BoundZones) > 0 {
|
||||
resp["bound_zones"] = role.BoundZones
|
||||
}
|
||||
if len(role.BoundInstanceGroups) > 0 {
|
||||
resp["bound_instance_groups"] = role.BoundInstanceGroups
|
||||
}
|
||||
if len(role.BoundLabels) > 0 {
|
||||
resp["bound_labels"] = role.BoundLabels
|
||||
}
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
Data: roleMap,
|
||||
Data: resp,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *GcpAuthBackend) pathRoleCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
// Validate we didn't get extraneous fields
|
||||
if err := validateFields(req, data); err != nil {
|
||||
return nil, logical.CodedError(422, err.Error())
|
||||
}
|
||||
|
||||
name := strings.ToLower(data.Get("name").(string))
|
||||
if name == "" {
|
||||
return logical.ErrorResponse(errEmptyRoleName), nil
|
||||
|
@ -306,10 +355,15 @@ func (b *GcpAuthBackend) pathRoleCreateUpdate(ctx context.Context, req *logical.
|
|||
role = &gcpRole{}
|
||||
}
|
||||
|
||||
if err := role.updateRole(b.System(), req.Operation, data); err != nil {
|
||||
return logical.ErrorResponse(err.Error()), nil
|
||||
warnings, err := role.updateRole(b.System(), req.Operation, data)
|
||||
if err != nil {
|
||||
resp := logical.ErrorResponse(err.Error())
|
||||
for _, w := range warnings {
|
||||
resp.AddWarning(w)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
return b.storeRole(ctx, req.Storage, name, role)
|
||||
return b.storeRole(ctx, req.Storage, name, role, warnings)
|
||||
}
|
||||
|
||||
func (b *GcpAuthBackend) pathRoleList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
|
@ -332,6 +386,13 @@ const pathListRolesHelpSyn = `Lists all the roles that are registered with Vault
|
|||
const pathListRolesHelpDesc = `Lists all roles under the GCP backends by name.`
|
||||
|
||||
func (b *GcpAuthBackend) pathRoleEditIamServiceAccounts(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
// Validate we didn't get extraneous fields
|
||||
if err := validateFields(req, data); err != nil {
|
||||
return nil, logical.CodedError(422, err.Error())
|
||||
}
|
||||
|
||||
var warnings []string
|
||||
|
||||
roleName := data.Get("name").(string)
|
||||
if roleName == "" {
|
||||
return logical.ErrorResponse(errEmptyRoleName), nil
|
||||
|
@ -353,7 +414,7 @@ func (b *GcpAuthBackend) pathRoleEditIamServiceAccounts(ctx context.Context, req
|
|||
}
|
||||
role.BoundServiceAccounts = editStringValues(role.BoundServiceAccounts, toAdd, toRemove)
|
||||
|
||||
return b.storeRole(ctx, req.Storage, roleName, role)
|
||||
return b.storeRole(ctx, req.Storage, roleName, role, warnings)
|
||||
}
|
||||
|
||||
func editStringValues(initial []string, toAdd []string, toRemove []string) []string {
|
||||
|
@ -382,6 +443,13 @@ func editStringValues(initial []string, toAdd []string, toRemove []string) []str
|
|||
}
|
||||
|
||||
func (b *GcpAuthBackend) pathRoleEditGceLabels(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
// Validate we didn't get extraneous fields
|
||||
if err := validateFields(req, data); err != nil {
|
||||
return nil, logical.CodedError(422, err.Error())
|
||||
}
|
||||
|
||||
var warnings []string
|
||||
|
||||
roleName := data.Get("name").(string)
|
||||
if roleName == "" {
|
||||
return logical.ErrorResponse(errEmptyRoleName), nil
|
||||
|
@ -407,6 +475,9 @@ func (b *GcpAuthBackend) pathRoleEditGceLabels(ctx context.Context, req *logical
|
|||
return logical.ErrorResponse(fmt.Sprintf("given invalid labels to add: %q", invalidLabels)), nil
|
||||
}
|
||||
for k, v := range labelsToAdd {
|
||||
if role.BoundLabels == nil {
|
||||
role.BoundLabels = make(map[string]string, len(labelsToAdd))
|
||||
}
|
||||
role.BoundLabels[k] = v
|
||||
}
|
||||
|
||||
|
@ -414,13 +485,14 @@ func (b *GcpAuthBackend) pathRoleEditGceLabels(ctx context.Context, req *logical
|
|||
delete(role.BoundLabels, k)
|
||||
}
|
||||
|
||||
return b.storeRole(ctx, req.Storage, roleName, role)
|
||||
return b.storeRole(ctx, req.Storage, roleName, role, warnings)
|
||||
}
|
||||
|
||||
// role reads a gcpRole from storage. This assumes the caller has already obtained the role lock.
|
||||
func (b *GcpAuthBackend) role(ctx context.Context, s logical.Storage, name string) (*gcpRole, error) {
|
||||
entry, err := s.Get(ctx, fmt.Sprintf("role/%s", strings.ToLower(name)))
|
||||
name = strings.ToLower(name)
|
||||
|
||||
entry, err := s.Get(ctx, "role/"+name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -428,174 +500,242 @@ func (b *GcpAuthBackend) role(ctx context.Context, s logical.Storage, name strin
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
role := &gcpRole{}
|
||||
if err := entry.DecodeJSON(role); err != nil {
|
||||
var role gcpRole
|
||||
if err := entry.DecodeJSON(&role); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return role, nil
|
||||
// Keep track of whether we do an in-place upgrade of old fields
|
||||
modified := false
|
||||
|
||||
// Move old bindings to new fields.
|
||||
if role.BoundRegion != "" && len(role.BoundRegions) == 0 {
|
||||
role.BoundRegions = []string{role.BoundRegion}
|
||||
role.BoundRegion = ""
|
||||
modified = true
|
||||
}
|
||||
if role.BoundZone != "" && len(role.BoundZones) == 0 {
|
||||
role.BoundZones = []string{role.BoundZone}
|
||||
role.BoundZone = ""
|
||||
modified = true
|
||||
}
|
||||
if role.BoundInstanceGroup != "" && len(role.BoundInstanceGroups) == 0 {
|
||||
role.BoundInstanceGroups = []string{role.BoundInstanceGroup}
|
||||
role.BoundInstanceGroup = ""
|
||||
modified = true
|
||||
}
|
||||
|
||||
if modified && (b.System().LocalMount() || !b.System().ReplicationState().HasState(vaultconsts.ReplicationPerformanceSecondary)) {
|
||||
b.Logger().Info("upgrading role to new schema",
|
||||
"role", name)
|
||||
|
||||
updatedRole, err := logical.StorageEntryJSON("role/"+name, &role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.Put(ctx, updatedRole); err != nil {
|
||||
// Only perform upgrades on replication primary
|
||||
if !strings.Contains(err.Error(), logical.ErrReadOnly.Error()) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &role, nil
|
||||
}
|
||||
|
||||
// storeRole saves the gcpRole to storage.
|
||||
// The returned response may contain either warnings or an error response,
|
||||
// but will be nil if error is not nil
|
||||
func (b *GcpAuthBackend) storeRole(ctx context.Context, s logical.Storage, roleName string, role *gcpRole) (*logical.Response, error) {
|
||||
var resp *logical.Response
|
||||
warnings, err := role.validate(b.System())
|
||||
func (b *GcpAuthBackend) storeRole(ctx context.Context, s logical.Storage, roleName string, role *gcpRole, warnings []string) (*logical.Response, error) {
|
||||
var resp logical.Response
|
||||
for _, w := range warnings {
|
||||
resp.AddWarning(w)
|
||||
}
|
||||
|
||||
validateWarnings, err := role.validate(b.System())
|
||||
for _, w := range validateWarnings {
|
||||
resp.AddWarning(w)
|
||||
}
|
||||
if err != nil {
|
||||
return logical.ErrorResponse(err.Error()), nil
|
||||
} else if len(warnings) > 0 {
|
||||
resp = &logical.Response{
|
||||
Warnings: warnings,
|
||||
}
|
||||
}
|
||||
|
||||
entry, err := logical.StorageEntryJSON(fmt.Sprintf("role/%s", roleName), role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := s.Put(ctx, entry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
type gcpRole struct {
|
||||
// Type of this role. See path_role constants for currently supported types.
|
||||
RoleType string `json:"role_type" structs:"role_type" mapstructure:"role_type"`
|
||||
RoleType string `json:"role_type,omitempty"`
|
||||
|
||||
// Project ID in GCP for authorized entities.
|
||||
ProjectId string `json:"project_id" structs:"project_id" mapstructure:"project_id"`
|
||||
ProjectId string `json:"project_id,omitempty"`
|
||||
|
||||
// Policies for Vault to assign to authorized entities.
|
||||
Policies []string `json:"policies" structs:"policies" mapstructure:"policies"`
|
||||
Policies []string `json:"policies,omitempty"`
|
||||
|
||||
// TTL of Vault auth leases under this role.
|
||||
TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"`
|
||||
TTL time.Duration `json:"ttl,omitempty"`
|
||||
|
||||
// Max total TTL including renewals, of Vault auth leases under this role.
|
||||
MaxTTL time.Duration `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"`
|
||||
MaxTTL time.Duration `json:"max_ttl,omitempty"`
|
||||
|
||||
// Period, If set, indicates that this token should not expire and
|
||||
// should be automatically renewed within this time period
|
||||
// with TTL equal to this value.
|
||||
Period time.Duration `json:"period" structs:"period" mapstructure:"period"`
|
||||
Period time.Duration `json:"period,omitempty"`
|
||||
|
||||
// Service accounts allowed to login under this role.
|
||||
BoundServiceAccounts []string `json:"bound_service_accounts" structs:"bound_service_accounts" mapstructure:"bound_service_accounts"`
|
||||
BoundServiceAccounts []string `json:"bound_service_accounts,omitempty"`
|
||||
|
||||
// --| IAM-only attributes |--
|
||||
// MaxJwtExp is the duration from time of authentication that a JWT used to authenticate to role must expire within.
|
||||
// TODO(emilymye): Allow this to be updated for GCE roles once 'exp' parameter has been allowed for GCE metadata.
|
||||
MaxJwtExp time.Duration `json:"max_jwt_exp" structs:"max_jwt_exp" mapstructure:"max_jwt_exp"`
|
||||
MaxJwtExp time.Duration `json:"max_jwt_exp,omitempty"`
|
||||
|
||||
// AllowGCEInference, if false, does not allow a GCE instance to login under this 'iam' role. If true (default),
|
||||
// a service account is inferred from the instance metadata and used as the authenticating instance.
|
||||
AllowGCEInference bool `json:"allow_gce_inference" structs:"allow_gce_inference" mapstructure:"allow_gce_inference"`
|
||||
AllowGCEInference bool `json:"allow_gce_inference,omitempty"`
|
||||
|
||||
// --| GCE-only attributes |--
|
||||
// BoundRegion that instances must belong to in order to login under this role.
|
||||
BoundRegion string `json:"bound_region" structs:"bound_region" mapstructure:"bound_region"`
|
||||
// BoundRegions that instances must belong to in order to login under this role.
|
||||
BoundRegions []string `json:"bound_regions,omitempty"`
|
||||
|
||||
// BoundZone that instances must belong to in order to login under this role.
|
||||
BoundZone string `json:"bound_zone" structs:"bound_zone" mapstructure:"bound_zone"`
|
||||
// BoundZones that instances must belong to in order to login under this role.
|
||||
BoundZones []string `json:"bound_zones,omitempty"`
|
||||
|
||||
// Instance group that instances must belong to in order to login under this role.
|
||||
BoundInstanceGroup string `json:"bound_instance_group" structs:"bound_instance_group" mapstructure:"bound_instance_group"`
|
||||
// BoundInstanceGroups are the instance group that instances must belong to in order to login under this role.
|
||||
BoundInstanceGroups []string `json:"bound_instance_groups,omitempty"`
|
||||
|
||||
// BoundLabels that instances must currently have set in order to login under this role.
|
||||
BoundLabels map[string]string `json:"bound_labels" structs:"bound_labels" mapstructure:"bound_labels"`
|
||||
BoundLabels map[string]string `json:"bound_labels,omitempty"`
|
||||
|
||||
// Deprecated fields
|
||||
// TODO: Remove in 0.5.0+
|
||||
BoundRegion string `json:"bound_region,omitempty"`
|
||||
BoundZone string `json:"bound_zone,omitempty"`
|
||||
BoundInstanceGroup string `json:"bound_instance_group,omitempty"`
|
||||
}
|
||||
|
||||
// Update updates the given role with values parsed/validated from given FieldData.
|
||||
// Exactly one of the response and error will be nil. The response is only used to pass back warnings.
|
||||
// This method does not validate the role. Validation is done before storage.
|
||||
func (role *gcpRole) updateRole(sys logical.SystemView, op logical.Operation, data *framework.FieldData) error {
|
||||
func (role *gcpRole) updateRole(sys logical.SystemView, op logical.Operation, data *framework.FieldData) (warnings []string, err error) {
|
||||
// Set role type
|
||||
roleTypeRaw, ok := data.GetOk("type")
|
||||
if ok {
|
||||
roleType := roleTypeRaw.(string)
|
||||
if rt, ok := data.GetOk("type"); ok {
|
||||
roleType := rt.(string)
|
||||
if role.RoleType != roleType && op == logical.UpdateOperation {
|
||||
return errors.New("role type cannot be changed for an existing role")
|
||||
err = errors.New("role type cannot be changed for an existing role")
|
||||
return
|
||||
}
|
||||
role.RoleType = roleType
|
||||
} else if op == logical.CreateOperation {
|
||||
return errors.New(errEmptyRoleType)
|
||||
err = errors.New(errEmptyRoleType)
|
||||
return
|
||||
}
|
||||
|
||||
// Update policies.
|
||||
policies, ok := data.GetOk("policies")
|
||||
if ok {
|
||||
// Update policies
|
||||
if policies, ok := data.GetOk("policies"); ok {
|
||||
role.Policies = policyutil.ParsePolicies(policies)
|
||||
} else if op == logical.CreateOperation {
|
||||
role.Policies = policyutil.ParsePolicies(data.Get("policies"))
|
||||
// Force default policy
|
||||
role.Policies = policyutil.ParsePolicies(nil)
|
||||
}
|
||||
|
||||
// Update GCP project id.
|
||||
projectIdRaw, ok := data.GetOk("project_id")
|
||||
if ok {
|
||||
role.ProjectId = projectIdRaw.(string)
|
||||
if projectId, ok := data.GetOk("project_id"); ok {
|
||||
role.ProjectId = projectId.(string)
|
||||
} else if op == logical.CreateOperation {
|
||||
role.ProjectId = data.Get("project_id").(string)
|
||||
}
|
||||
|
||||
// Update token TTL.
|
||||
ttlRaw, ok := data.GetOk("ttl")
|
||||
if ok {
|
||||
role.TTL = time.Duration(ttlRaw.(int)) * time.Second
|
||||
if ttl, ok := data.GetOk("ttl"); ok {
|
||||
role.TTL = time.Duration(ttl.(int)) * time.Second
|
||||
|
||||
def := sys.DefaultLeaseTTL()
|
||||
if role.TTL > def {
|
||||
warnings = append(warnings, fmt.Sprintf(`Given "ttl" of %q is greater `+
|
||||
`than the maximum system/mount TTL of %q. The TTL will be capped at `+
|
||||
`%q during login.`, role.TTL, def, def))
|
||||
}
|
||||
} else if op == logical.CreateOperation {
|
||||
role.TTL = time.Duration(data.Get("ttl").(int)) * time.Second
|
||||
}
|
||||
|
||||
// Update token Max TTL.
|
||||
maxTTLRaw, ok := data.GetOk("max_ttl")
|
||||
if ok {
|
||||
role.MaxTTL = time.Duration(maxTTLRaw.(int)) * time.Second
|
||||
if maxTTL, ok := data.GetOk("max_ttl"); ok {
|
||||
role.MaxTTL = time.Duration(maxTTL.(int)) * time.Second
|
||||
|
||||
def := sys.MaxLeaseTTL()
|
||||
if role.MaxTTL > def {
|
||||
warnings = append(warnings, fmt.Sprintf(`Given "max_ttl" of %q is greater `+
|
||||
`than the maximum system/mount MaxTTL of %q. The MaxTTL will be `+
|
||||
`capped at %q during login.`, role.MaxTTL, def, def))
|
||||
}
|
||||
} else if op == logical.CreateOperation {
|
||||
role.MaxTTL = time.Duration(data.Get("max_ttl").(int)) * time.Second
|
||||
}
|
||||
|
||||
// Update token period.
|
||||
periodRaw, ok := data.GetOk("period")
|
||||
if ok {
|
||||
role.Period = time.Second * time.Duration(periodRaw.(int))
|
||||
if period, ok := data.GetOk("period"); ok {
|
||||
role.Period = time.Duration(period.(int)) * time.Second
|
||||
|
||||
def := sys.MaxLeaseTTL()
|
||||
if role.Period > def {
|
||||
warnings = append(warnings, fmt.Sprintf(`Given "period" of %q is greater `+
|
||||
`than the maximum system/mount period of %q. The period will be `+
|
||||
`capped at %q during login.`, role.Period, def, def))
|
||||
}
|
||||
} else if op == logical.CreateOperation {
|
||||
role.Period = time.Second * time.Duration(data.Get("period").(int))
|
||||
role.Period = time.Duration(data.Get("period").(int)) * time.Second
|
||||
}
|
||||
|
||||
// Update bound GCP service accounts.
|
||||
serviceAccountsRaw, ok := data.GetOk("bound_service_accounts")
|
||||
if ok {
|
||||
role.BoundServiceAccounts = serviceAccountsRaw.([]string)
|
||||
if sa, ok := data.GetOk("bound_service_accounts"); ok {
|
||||
role.BoundServiceAccounts = sa.([]string)
|
||||
} else {
|
||||
// Check for older version of param name
|
||||
serviceAccountsRaw, ok := data.GetOk("service_accounts")
|
||||
if ok {
|
||||
role.BoundServiceAccounts = serviceAccountsRaw.([]string)
|
||||
if sa, ok := data.GetOk("service_accounts"); ok {
|
||||
warnings = append(warnings, `The "service_accounts" field is deprecated. `+
|
||||
`Please use "bound_service_accounts" instead. The "service_accounts" `+
|
||||
`field will be removed in a later release, so please update accordingly.`)
|
||||
role.BoundServiceAccounts = sa.([]string)
|
||||
}
|
||||
}
|
||||
|
||||
if len(role.BoundServiceAccounts) > 0 {
|
||||
role.BoundServiceAccounts = strutil.TrimStrings(role.BoundServiceAccounts)
|
||||
role.BoundServiceAccounts = strutil.RemoveDuplicates(role.BoundServiceAccounts, false)
|
||||
}
|
||||
|
||||
// Update fields specific to this type
|
||||
switch role.RoleType {
|
||||
case iamRoleType:
|
||||
if err := checkInvalidRoleTypeArgs(data, gceOnlyFieldSchema); err != nil {
|
||||
return err
|
||||
if err = checkInvalidRoleTypeArgs(data, gceOnlyFieldSchema); err != nil {
|
||||
return
|
||||
}
|
||||
if err := role.updateIamFields(data, op); err != nil {
|
||||
return err
|
||||
if warnings, err = role.updateIamFields(data, op); err != nil {
|
||||
return
|
||||
}
|
||||
case gceRoleType:
|
||||
if err := checkInvalidRoleTypeArgs(data, iamOnlyFieldSchema); err != nil {
|
||||
return err
|
||||
if err = checkInvalidRoleTypeArgs(data, iamOnlyFieldSchema); err != nil {
|
||||
return
|
||||
}
|
||||
if err := role.updateGceFields(data, op); err != nil {
|
||||
return err
|
||||
if warnings, err = role.updateGceFields(data, op); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func (role *gcpRole) validate(sys logical.SystemView) (warnings []string, err error) {
|
||||
|
@ -648,51 +788,114 @@ func (role *gcpRole) validate(sys logical.SystemView) (warnings []string, err er
|
|||
}
|
||||
|
||||
// updateIamFields updates IAM-only fields for a role.
|
||||
func (role *gcpRole) updateIamFields(data *framework.FieldData, op logical.Operation) error {
|
||||
allowGCEInference, ok := data.GetOk("allow_gce_inference")
|
||||
if ok {
|
||||
func (role *gcpRole) updateIamFields(data *framework.FieldData, op logical.Operation) (warnings []string, err error) {
|
||||
if allowGCEInference, ok := data.GetOk("allow_gce_inference"); ok {
|
||||
role.AllowGCEInference = allowGCEInference.(bool)
|
||||
} else if op == logical.CreateOperation {
|
||||
role.AllowGCEInference = data.Get("allow_gce_inference").(bool)
|
||||
}
|
||||
|
||||
maxJwtExp, ok := data.GetOk("max_jwt_exp")
|
||||
if ok {
|
||||
if maxJwtExp, ok := data.GetOk("max_jwt_exp"); ok {
|
||||
role.MaxJwtExp = time.Duration(maxJwtExp.(int)) * time.Second
|
||||
} else if op == logical.CreateOperation {
|
||||
role.MaxJwtExp = time.Duration(defaultIamMaxJwtExpMinutes) * time.Minute
|
||||
}
|
||||
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
// updateGceFields updates GCE-only fields for a role.
|
||||
func (role *gcpRole) updateGceFields(data *framework.FieldData, op logical.Operation) error {
|
||||
region, hasRegion := data.GetOk("bound_region")
|
||||
if hasRegion {
|
||||
role.BoundRegion = strings.TrimSpace(region.(string))
|
||||
func (role *gcpRole) updateGceFields(data *framework.FieldData, op logical.Operation) (warnings []string, err error) {
|
||||
if regions, ok := data.GetOk("bound_regions"); ok {
|
||||
role.BoundRegions = regions.([]string)
|
||||
} else if op == logical.CreateOperation {
|
||||
role.BoundRegions = data.Get("bound_regions").([]string)
|
||||
}
|
||||
|
||||
zone, hasZone := data.GetOk("bound_zone")
|
||||
if hasZone {
|
||||
role.BoundZone = strings.TrimSpace(zone.(string))
|
||||
if zones, ok := data.GetOk("bound_zones"); ok {
|
||||
role.BoundZones = zones.([]string)
|
||||
} else if op == logical.CreateOperation {
|
||||
role.BoundZones = data.Get("bound_zones").([]string)
|
||||
}
|
||||
|
||||
instanceGroup, ok := data.GetOk("bound_instance_group")
|
||||
if ok {
|
||||
role.BoundInstanceGroup = strings.TrimSpace(instanceGroup.(string))
|
||||
if instanceGroups, ok := data.GetOk("bound_instance_groups"); ok {
|
||||
role.BoundInstanceGroups = instanceGroups.([]string)
|
||||
} else if op == logical.CreateOperation {
|
||||
role.BoundInstanceGroups = data.Get("bound_instance_groups").([]string)
|
||||
}
|
||||
|
||||
labels, ok := data.GetOk("bound_labels")
|
||||
if ok {
|
||||
var invalidLabels []string
|
||||
role.BoundLabels, invalidLabels = gcputil.ParseGcpLabels(labels.([]string))
|
||||
if len(invalidLabels) > 0 {
|
||||
return fmt.Errorf("invalid labels given: %q", invalidLabels)
|
||||
if boundRegion, ok := data.GetOk("bound_region"); ok {
|
||||
if _, ok := data.GetOk("bound_regions"); ok {
|
||||
err = fmt.Errorf(`cannot specify both "bound_region" and "bound_regions"`)
|
||||
return
|
||||
}
|
||||
|
||||
warnings = append(warnings, `The "bound_region" field is deprecated. `+
|
||||
`Please use "bound_regions" (plural) instead. You can still specify a `+
|
||||
`single region, but multiple regions are also now supported. The `+
|
||||
`"bound_region" field will be removed in a later release, so please `+
|
||||
`update accordingly.`)
|
||||
role.BoundRegions = append(role.BoundRegions, boundRegion.(string))
|
||||
}
|
||||
|
||||
return nil
|
||||
if boundZone, ok := data.GetOk("bound_zone"); ok {
|
||||
if _, ok := data.GetOk("bound_zones"); ok {
|
||||
err = fmt.Errorf(`cannot specify both "bound_zone" and "bound_zones"`)
|
||||
return
|
||||
}
|
||||
|
||||
warnings = append(warnings, `The "bound_zone" field is deprecated. `+
|
||||
`Please use "bound_zones" (plural) instead. You can still specify a `+
|
||||
`single zone, but multiple zones are also now supported. The `+
|
||||
`"bound_zone" field will be removed in a later release, so please `+
|
||||
`update accordingly.`)
|
||||
role.BoundZones = append(role.BoundZones, boundZone.(string))
|
||||
}
|
||||
|
||||
if boundInstanceGroup, ok := data.GetOk("bound_instance_group"); ok {
|
||||
if _, ok := data.GetOk("bound_instance_groups"); ok {
|
||||
err = fmt.Errorf(`cannot specify both "bound_instance_group" and "bound_instance_groups"`)
|
||||
return
|
||||
}
|
||||
|
||||
warnings = append(warnings, `The "bound_instance_group" field is deprecated. `+
|
||||
`Please use "bound_instance_groups" (plural) instead. You can still specify a `+
|
||||
`single instance group, but multiple instance groups are also now supported. The `+
|
||||
`"bound_instance_group" field will be removed in a later release, so please `+
|
||||
`update accordingly.`)
|
||||
role.BoundInstanceGroups = append(role.BoundInstanceGroups, boundInstanceGroup.(string))
|
||||
}
|
||||
|
||||
if labelsRaw, ok := data.GetOk("bound_labels"); ok {
|
||||
labels, invalidLabels := gcputil.ParseGcpLabels(labelsRaw.([]string))
|
||||
if len(invalidLabels) > 0 {
|
||||
err = fmt.Errorf("invalid labels given: %q", invalidLabels)
|
||||
return
|
||||
}
|
||||
role.BoundLabels = labels
|
||||
}
|
||||
|
||||
if len(role.Policies) > 0 {
|
||||
role.Policies = strutil.TrimStrings(role.Policies)
|
||||
role.Policies = strutil.RemoveDuplicates(role.Policies, false)
|
||||
}
|
||||
|
||||
if len(role.BoundRegions) > 0 {
|
||||
role.BoundRegions = strutil.TrimStrings(role.BoundRegions)
|
||||
role.BoundRegions = strutil.RemoveDuplicates(role.BoundRegions, false)
|
||||
}
|
||||
|
||||
if len(role.BoundZones) > 0 {
|
||||
role.BoundZones = strutil.TrimStrings(role.BoundZones)
|
||||
role.BoundZones = strutil.RemoveDuplicates(role.BoundZones, false)
|
||||
}
|
||||
|
||||
if len(role.BoundInstanceGroups) > 0 {
|
||||
role.BoundInstanceGroups = strutil.TrimStrings(role.BoundInstanceGroups)
|
||||
role.BoundInstanceGroups = strutil.RemoveDuplicates(role.BoundInstanceGroups, false)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// validateIamFields validates the IAM-only fields for a role.
|
||||
|
@ -717,18 +920,21 @@ func (role *gcpRole) validateForIAM() (warnings []string, err error) {
|
|||
func (role *gcpRole) validateForGCE() (warnings []string, err error) {
|
||||
warnings = []string{}
|
||||
|
||||
hasRegion := len(role.BoundRegion) > 0
|
||||
hasZone := len(role.BoundZone) > 0
|
||||
hasRegion := len(role.BoundRegions) > 0
|
||||
hasZone := len(role.BoundZones) > 0
|
||||
hasRegionOrZone := hasRegion || hasZone
|
||||
|
||||
hasInstanceGroup := len(role.BoundInstanceGroup) > 0
|
||||
hasInstanceGroup := len(role.BoundInstanceGroups) > 0
|
||||
|
||||
if hasInstanceGroup && !hasRegionOrZone {
|
||||
return warnings, errors.New(`region or zone information must be specified if a group is given`)
|
||||
return warnings, errors.New(`region or zone information must be specified if an instance group is given`)
|
||||
}
|
||||
|
||||
if hasRegion && hasZone {
|
||||
warnings = append(warnings, "Given both region and zone for role of type 'gce' - region will be ignored.")
|
||||
warnings = append(warnings, `Given both "bound_regions" and "bound_zones" `+
|
||||
`fields for role type "gce", "bound_regions" will be ignored in favor `+
|
||||
`of the more specific "bound_zones" field. To fix this warning, update `+
|
||||
`the role to remove either the "bound_regions" or "bound_zones" field.`)
|
||||
}
|
||||
|
||||
return warnings, nil
|
||||
|
|
|
@ -29,10 +29,11 @@ Currently, OpenCensus supports:
|
|||
|
||||
* [Prometheus][exporter-prom] for stats
|
||||
* [OpenZipkin][exporter-zipkin] for traces
|
||||
* Stackdriver [Monitoring][exporter-stackdriver] and [Trace][exporter-stackdriver]
|
||||
* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces
|
||||
* [Jaeger][exporter-jaeger] for traces
|
||||
* [AWS X-Ray][exporter-xray] for traces
|
||||
* [Datadog][exporter-datadog] for stats and traces
|
||||
|
||||
## Overview
|
||||
|
||||
![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg)
|
||||
|
@ -129,12 +130,65 @@ exported via the registered exporters.
|
|||
|
||||
## Traces
|
||||
|
||||
A distributed trace tracks the progression of a single user request as
|
||||
it is handled by the services and processes that make up an application.
|
||||
Each step is called a span in the trace. Spans include metadata about the step,
|
||||
including especially the time spent in the step, called the span’s latency.
|
||||
|
||||
Below you see a trace and several spans underneath it.
|
||||
|
||||
![Traces and spans](https://i.imgur.com/7hZwRVj.png)
|
||||
|
||||
### Spans
|
||||
|
||||
Span is the unit step in a trace. Each span has a name, latency, status and
|
||||
additional metadata.
|
||||
|
||||
Below we are starting a span for a cache read and ending it
|
||||
when we are done:
|
||||
|
||||
[embedmd]:# (internal/readme/trace.go startend)
|
||||
```go
|
||||
ctx, span := trace.StartSpan(ctx, "your choice of name")
|
||||
ctx, span := trace.StartSpan(ctx, "cache.Get")
|
||||
defer span.End()
|
||||
|
||||
// Do work to get from cache.
|
||||
```
|
||||
|
||||
### Propagation
|
||||
|
||||
Spans can have parents or can be root spans if they don't have any parents.
|
||||
The current span is propagated in-process and across the network to allow associating
|
||||
new child spans with the parent.
|
||||
|
||||
In the same process, context.Context is used to propagate spans.
|
||||
trace.StartSpan creates a new span as a root if the current context
|
||||
doesn't contain a span. Or, it creates a child of the span that is
|
||||
already in current context. The returned context can be used to keep
|
||||
propagating the newly created span in the current context.
|
||||
|
||||
[embedmd]:# (internal/readme/trace.go startend)
|
||||
```go
|
||||
ctx, span := trace.StartSpan(ctx, "cache.Get")
|
||||
defer span.End()
|
||||
|
||||
// Do work to get from cache.
|
||||
```
|
||||
|
||||
Across the network, OpenCensus provides different propagation
|
||||
methods for different protocols.
|
||||
|
||||
* gRPC integrations uses the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation).
|
||||
* HTTP integrations uses Zipkin's [B3](https://github.com/openzipkin/b3-propagation)
|
||||
by default but can be configured to use a custom propagation method by setting another
|
||||
[propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat).
|
||||
|
||||
## Execution Tracer
|
||||
|
||||
With Go 1.11, OpenCensus Go will support integration with the Go execution tracer.
|
||||
See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68)
|
||||
for an example of their mutual use.
|
||||
|
||||
## Profiles
|
||||
|
||||
OpenCensus tags can be applied as profiler labels
|
||||
|
|
|
@ -91,15 +91,6 @@ var (
|
|||
TagKeys: []tag.Key{KeyClientMethod},
|
||||
Aggregation: DefaultMillisecondsDistribution,
|
||||
}
|
||||
|
||||
// Deprecated: This view is going to be removed, if you need it please define it
|
||||
// yourself.
|
||||
ClientRequestCountView = &view.View{
|
||||
Name: "Count of request messages per client RPC",
|
||||
TagKeys: []tag.Key{KeyClientMethod},
|
||||
Measure: ClientRoundtripLatency,
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
)
|
||||
|
||||
// DefaultClientViews are the default client views provided by this package.
|
||||
|
|
|
@ -53,11 +53,11 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
|||
name := t.formatSpanName(req)
|
||||
// TODO(jbd): Discuss whether we want to prefix
|
||||
// outgoing requests with Sent.
|
||||
_, span := trace.StartSpan(req.Context(), name,
|
||||
ctx, span := trace.StartSpan(req.Context(), name,
|
||||
trace.WithSampler(t.startOptions.Sampler),
|
||||
trace.WithSpanKind(trace.SpanKindClient))
|
||||
|
||||
req = req.WithContext(trace.WithSpan(req.Context(), span))
|
||||
req = req.WithContext(ctx)
|
||||
if t.format != nil {
|
||||
t.format.SpanContextToRequest(span.SpanContext(), req)
|
||||
}
|
||||
|
|
|
@ -98,13 +98,6 @@ func FromContext(ctx context.Context) *Span {
|
|||
return s
|
||||
}
|
||||
|
||||
// WithSpan returns a new context with the given Span attached.
|
||||
//
|
||||
// Deprecated: Use NewContext.
|
||||
func WithSpan(parent context.Context, s *Span) context.Context {
|
||||
return NewContext(parent, s)
|
||||
}
|
||||
|
||||
// NewContext returns a new context with the given Span attached.
|
||||
func NewContext(parent context.Context, s *Span) context.Context {
|
||||
return context.WithValue(parent, contextKey{}, s)
|
||||
|
@ -154,6 +147,9 @@ func WithSampler(sampler Sampler) StartOption {
|
|||
|
||||
// StartSpan starts a new child span of the current span in the context. If
|
||||
// there is no span in the context, creates a new trace and span.
|
||||
//
|
||||
// Returned context contains the newly created span. You can use it to
|
||||
// propagate the returned span in process.
|
||||
func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) {
|
||||
var opts StartOptions
|
||||
var parent SpanContext
|
||||
|
@ -174,6 +170,9 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont
|
|||
//
|
||||
// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is
|
||||
// preferred for cases where the parent is propagated via an incoming request.
|
||||
//
|
||||
// Returned context contains the newly created span. You can use it to
|
||||
// propagate the returned span in process.
|
||||
func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) {
|
||||
var opts StartOptions
|
||||
for _, op := range o {
|
||||
|
@ -185,26 +184,6 @@ func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanCont
|
|||
return NewContext(ctx, span), span
|
||||
}
|
||||
|
||||
// NewSpan returns a new span.
|
||||
//
|
||||
// If parent is not nil, created span will be a child of the parent.
|
||||
//
|
||||
// Deprecated: Use StartSpan.
|
||||
func NewSpan(name string, parent *Span, o StartOptions) *Span {
|
||||
var parentSpanContext SpanContext
|
||||
if parent != nil {
|
||||
parentSpanContext = parent.SpanContext()
|
||||
}
|
||||
return startSpanInternal(name, parent != nil, parentSpanContext, false, o)
|
||||
}
|
||||
|
||||
// NewSpanWithRemoteParent returns a new span with the given parent SpanContext.
|
||||
//
|
||||
// Deprecated: Use StartSpanWithRemoteParent.
|
||||
func NewSpanWithRemoteParent(name string, parent SpanContext, o StartOptions) *Span {
|
||||
return startSpanInternal(name, true, parent, true, o)
|
||||
}
|
||||
|
||||
func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span {
|
||||
span := &Span{}
|
||||
span.spanContext = parent
|
||||
|
|
|
@ -52,9 +52,31 @@ const (
|
|||
noDialOnMiss = false
|
||||
)
|
||||
|
||||
// shouldTraceGetConn reports whether getClientConn should call any
|
||||
// ClientTrace.GetConn hook associated with the http.Request.
|
||||
//
|
||||
// This complexity is needed to avoid double calls of the GetConn hook
|
||||
// during the back-and-forth between net/http and x/net/http2 (when the
|
||||
// net/http.Transport is upgraded to also speak http2), as well as support
|
||||
// the case where x/net/http2 is being used directly.
|
||||
func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool {
|
||||
// If our Transport wasn't made via ConfigureTransport, always
|
||||
// trace the GetConn hook if provided, because that means the
|
||||
// http2 package is being used directly and it's the one
|
||||
// dialing, as opposed to net/http.
|
||||
if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok {
|
||||
return true
|
||||
}
|
||||
// Otherwise, only use the GetConn hook if this connection has
|
||||
// been used previously for other requests. For fresh
|
||||
// connections, the net/http package does the dialing.
|
||||
return !st.freshConn
|
||||
}
|
||||
|
||||
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
||||
if isConnectionCloseRequest(req) && dialOnMiss {
|
||||
// It gets its own connection.
|
||||
traceGetConn(req, addr)
|
||||
const singleUse = true
|
||||
cc, err := p.t.dialClientConn(addr, singleUse)
|
||||
if err != nil {
|
||||
|
@ -64,7 +86,10 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
|
|||
}
|
||||
p.mu.Lock()
|
||||
for _, cc := range p.conns[addr] {
|
||||
if cc.CanTakeNewRequest() {
|
||||
if st := cc.idleState(); st.canTakeNewRequest {
|
||||
if p.shouldTraceGetConn(st) {
|
||||
traceGetConn(req, addr)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
return cc, nil
|
||||
}
|
||||
|
@ -73,6 +98,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
|
|||
p.mu.Unlock()
|
||||
return nil, ErrNoCachedConn
|
||||
}
|
||||
traceGetConn(req, addr)
|
||||
call := p.getStartDialLocked(addr)
|
||||
p.mu.Unlock()
|
||||
<-call.done
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.11
|
||||
|
||||
package http2
|
||||
|
||||
func traceHasWroteHeaderField(trace *clientTrace) bool {
|
||||
return trace != nil && trace.WroteHeaderField != nil
|
||||
}
|
||||
|
||||
func traceWroteHeaderField(trace *clientTrace, k, v string) {
|
||||
if trace != nil && trace.WroteHeaderField != nil {
|
||||
trace.WroteHeaderField(k, []string{v})
|
||||
}
|
||||
}
|
|
@ -18,6 +18,8 @@ type contextContext interface {
|
|||
context.Context
|
||||
}
|
||||
|
||||
var errCanceled = context.Canceled
|
||||
|
||||
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
|
||||
|
@ -48,6 +50,14 @@ func (t *Transport) idleConnTimeout() time.Duration {
|
|||
|
||||
func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
|
||||
|
||||
func traceGetConn(req *http.Request, hostPort string) {
|
||||
trace := httptrace.ContextClientTrace(req.Context())
|
||||
if trace == nil || trace.GetConn == nil {
|
||||
return
|
||||
}
|
||||
trace.GetConn(hostPort)
|
||||
}
|
||||
|
||||
func traceGotConn(req *http.Request, cc *ClientConn) {
|
||||
trace := httptrace.ContextClientTrace(req.Context())
|
||||
if trace == nil || trace.GotConn == nil {
|
||||
|
@ -104,3 +114,8 @@ func requestTrace(req *http.Request) *clientTrace {
|
|||
func (cc *ClientConn) Ping(ctx context.Context) error {
|
||||
return cc.ping(ctx)
|
||||
}
|
||||
|
||||
// Shutdown gracefully closes the client connection, waiting for running streams to complete.
|
||||
func (cc *ClientConn) Shutdown(ctx context.Context) error {
|
||||
return cc.shutdown(ctx)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.11
|
||||
|
||||
package http2
|
||||
|
||||
func traceHasWroteHeaderField(trace *clientTrace) bool { return false }
|
||||
|
||||
func traceWroteHeaderField(trace *clientTrace, k, v string) {}
|
|
@ -8,6 +8,7 @@ package http2
|
|||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
@ -18,6 +19,8 @@ type contextContext interface {
|
|||
Err() error
|
||||
}
|
||||
|
||||
var errCanceled = errors.New("canceled")
|
||||
|
||||
type fakeContext struct{}
|
||||
|
||||
func (fakeContext) Done() <-chan struct{} { return nil }
|
||||
|
@ -34,6 +37,7 @@ func setResponseUncompressed(res *http.Response) {
|
|||
type clientTrace struct{}
|
||||
|
||||
func requestTrace(*http.Request) *clientTrace { return nil }
|
||||
func traceGetConn(*http.Request, string) {}
|
||||
func traceGotConn(*http.Request, *ClientConn) {}
|
||||
func traceFirstResponseByte(*clientTrace) {}
|
||||
func traceWroteHeaders(*clientTrace) {}
|
||||
|
@ -84,4 +88,8 @@ func (cc *ClientConn) Ping(ctx contextContext) error {
|
|||
return cc.ping(ctx)
|
||||
}
|
||||
|
||||
func (cc *ClientConn) Shutdown(ctx contextContext) error {
|
||||
return cc.shutdown(ctx)
|
||||
}
|
||||
|
||||
func (t *Transport) idleConnTimeout() time.Duration { return 0 }
|
||||
|
|
|
@ -1575,6 +1575,12 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
|||
// type PROTOCOL_ERROR."
|
||||
return ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
// RFC 7540, sec 6.1: If a DATA frame is received whose stream is not in
|
||||
// "open" or "half-closed (local)" state, the recipient MUST respond with a
|
||||
// stream error (Section 5.4.2) of type STREAM_CLOSED.
|
||||
if state == stateClosed {
|
||||
return streamError(id, ErrCodeStreamClosed)
|
||||
}
|
||||
if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued {
|
||||
// This includes sending a RST_STREAM if the stream is
|
||||
// in stateHalfClosedLocal (which currently means that
|
||||
|
@ -1721,6 +1727,13 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
|||
// processing this frame.
|
||||
return nil
|
||||
}
|
||||
// RFC 7540, sec 5.1: If an endpoint receives additional frames, other than
|
||||
// WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in
|
||||
// this state, it MUST respond with a stream error (Section 5.4.2) of
|
||||
// type STREAM_CLOSED.
|
||||
if st.state == stateHalfClosedRemote {
|
||||
return streamError(id, ErrCodeStreamClosed)
|
||||
}
|
||||
return st.processTrailerHeaders(f)
|
||||
}
|
||||
|
||||
|
|
|
@ -159,6 +159,7 @@ type ClientConn struct {
|
|||
cond *sync.Cond // hold mu; broadcast on flow/closed changes
|
||||
flow flow // our conn-level flow control quota (cs.flow is per stream)
|
||||
inflow flow // peer's conn-level flow control
|
||||
closing bool
|
||||
closed bool
|
||||
wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
|
||||
goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
|
||||
|
@ -630,12 +631,32 @@ func (cc *ClientConn) CanTakeNewRequest() bool {
|
|||
return cc.canTakeNewRequestLocked()
|
||||
}
|
||||
|
||||
func (cc *ClientConn) canTakeNewRequestLocked() bool {
|
||||
// clientConnIdleState describes the suitability of a client
|
||||
// connection to initiate a new RoundTrip request.
|
||||
type clientConnIdleState struct {
|
||||
canTakeNewRequest bool
|
||||
freshConn bool // whether it's unused by any previous request
|
||||
}
|
||||
|
||||
func (cc *ClientConn) idleState() clientConnIdleState {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
return cc.idleStateLocked()
|
||||
}
|
||||
|
||||
func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
|
||||
if cc.singleUse && cc.nextStreamID > 1 {
|
||||
return false
|
||||
return
|
||||
}
|
||||
return cc.goAway == nil && !cc.closed &&
|
||||
st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing &&
|
||||
int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32
|
||||
st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest
|
||||
return
|
||||
}
|
||||
|
||||
func (cc *ClientConn) canTakeNewRequestLocked() bool {
|
||||
st := cc.idleStateLocked()
|
||||
return st.canTakeNewRequest
|
||||
}
|
||||
|
||||
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
|
||||
|
@ -665,6 +686,88 @@ func (cc *ClientConn) closeIfIdle() {
|
|||
cc.tconn.Close()
|
||||
}
|
||||
|
||||
var shutdownEnterWaitStateHook = func() {}
|
||||
|
||||
// Shutdown gracefully close the client connection, waiting for running streams to complete.
|
||||
// Public implementation is in go17.go and not_go17.go
|
||||
func (cc *ClientConn) shutdown(ctx contextContext) error {
|
||||
if err := cc.sendGoAway(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Wait for all in-flight streams to complete or connection to close
|
||||
done := make(chan error, 1)
|
||||
cancelled := false // guarded by cc.mu
|
||||
go func() {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
for {
|
||||
if len(cc.streams) == 0 || cc.closed {
|
||||
cc.closed = true
|
||||
done <- cc.tconn.Close()
|
||||
break
|
||||
}
|
||||
if cancelled {
|
||||
break
|
||||
}
|
||||
cc.cond.Wait()
|
||||
}
|
||||
}()
|
||||
shutdownEnterWaitStateHook()
|
||||
select {
|
||||
case err := <-done:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
cc.mu.Lock()
|
||||
// Free the goroutine above
|
||||
cancelled = true
|
||||
cc.cond.Broadcast()
|
||||
cc.mu.Unlock()
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (cc *ClientConn) sendGoAway() error {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
cc.wmu.Lock()
|
||||
defer cc.wmu.Unlock()
|
||||
if cc.closing {
|
||||
// GOAWAY sent already
|
||||
return nil
|
||||
}
|
||||
// Send a graceful shutdown frame to server
|
||||
maxStreamID := cc.nextStreamID
|
||||
if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cc.bw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Prevent new requests
|
||||
cc.closing = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the client connection immediately.
|
||||
//
|
||||
// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
|
||||
func (cc *ClientConn) Close() error {
|
||||
cc.mu.Lock()
|
||||
defer cc.cond.Broadcast()
|
||||
defer cc.mu.Unlock()
|
||||
err := errors.New("http2: client connection force closed via ClientConn.Close")
|
||||
for id, cs := range cc.streams {
|
||||
select {
|
||||
case cs.resc <- resAndError{err: err}:
|
||||
default:
|
||||
}
|
||||
cs.bufPipe.CloseWithError(err)
|
||||
delete(cc.streams, id)
|
||||
}
|
||||
cc.closed = true
|
||||
return cc.tconn.Close()
|
||||
}
|
||||
|
||||
const maxAllocFrameSize = 512 << 10
|
||||
|
||||
// frameBuffer returns a scratch buffer suitable for writing DATA frames.
|
||||
|
@ -1291,9 +1394,16 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
|
|||
return nil, errRequestHeaderListSize
|
||||
}
|
||||
|
||||
trace := requestTrace(req)
|
||||
traceHeaders := traceHasWroteHeaderField(trace)
|
||||
|
||||
// Header list size is ok. Write the headers.
|
||||
enumerateHeaders(func(name, value string) {
|
||||
cc.writeHeader(strings.ToLower(name), value)
|
||||
name = strings.ToLower(name)
|
||||
cc.writeHeader(name, value)
|
||||
if traceHeaders {
|
||||
traceWroteHeaderField(trace, name, value)
|
||||
}
|
||||
})
|
||||
|
||||
return cc.hbuf.Bytes(), nil
|
||||
|
|
|
@ -143,7 +143,11 @@ type Builder interface {
|
|||
}
|
||||
|
||||
// PickOptions contains addition information for the Pick operation.
|
||||
type PickOptions struct{}
|
||||
type PickOptions struct {
|
||||
// FullMethodName is the method name that NewClientStream() is called
|
||||
// with. The canonical format is /service/Method.
|
||||
FullMethodName string
|
||||
}
|
||||
|
||||
// DoneInfo contains additional information for done.
|
||||
type DoneInfo struct {
|
||||
|
|
|
@ -41,13 +41,13 @@ import (
|
|||
"google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/resolver"
|
||||
_ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
|
||||
_ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -473,6 +473,14 @@ func defaultDialOptions() dialOptions {
|
|||
}
|
||||
}
|
||||
|
||||
// WithMaxHeaderListSize returns a DialOption that specifies the maximum (uncompressed) size of
|
||||
// header list that the client is prepared to accept.
|
||||
func WithMaxHeaderListSize(s uint32) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.copts.MaxHeaderListSize = &s
|
||||
}
|
||||
}
|
||||
|
||||
// Dial creates a client connection to the given target.
|
||||
func Dial(target string, opts ...DialOption) (*ClientConn, error) {
|
||||
return DialContext(context.Background(), target, opts...)
|
||||
|
@ -1051,8 +1059,10 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
|
|||
return m
|
||||
}
|
||||
|
||||
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||
t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{})
|
||||
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||
t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{
|
||||
FullMethodName: method,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, toRPCErr(err)
|
||||
}
|
||||
|
@ -1352,7 +1362,7 @@ func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline,
|
|||
// Didn't receive server preface, must kill this new transport now.
|
||||
grpclog.Warningf("grpc: addrConn.createTransport failed to receive server preface before deadline.")
|
||||
newTr.Close()
|
||||
break
|
||||
continue
|
||||
case <-ac.ctx.Done():
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,8 +28,8 @@ import (
|
|||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// dialContext connects to the address on the named network.
|
||||
|
|
|
@ -29,8 +29,8 @@ import (
|
|||
|
||||
netctx "golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// dialContext connects to the address on the named network.
|
||||
|
|
|
@ -285,6 +285,21 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{
|
|||
return true, nil
|
||||
}
|
||||
|
||||
// Note argument f should never be nil.
|
||||
func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return false, c.err
|
||||
}
|
||||
if !f(it) { // f wasn't successful
|
||||
c.mu.Unlock()
|
||||
return false, nil
|
||||
}
|
||||
c.mu.Unlock()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *controlBuffer) get(block bool) (interface{}, error) {
|
||||
for {
|
||||
c.mu.Lock()
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// The default value of flow control window size in HTTP2 spec.
|
||||
defaultWindowSize = 65535
|
||||
// The initial window size for flow control.
|
||||
initialWindowSize = defaultWindowSize // for an RPC
|
||||
infinity = time.Duration(math.MaxInt64)
|
||||
defaultClientKeepaliveTime = infinity
|
||||
defaultClientKeepaliveTimeout = 20 * time.Second
|
||||
defaultMaxStreamsClient = 100
|
||||
defaultMaxConnectionIdle = infinity
|
||||
defaultMaxConnectionAge = infinity
|
||||
defaultMaxConnectionAgeGrace = infinity
|
||||
defaultServerKeepaliveTime = 2 * time.Hour
|
||||
defaultServerKeepaliveTimeout = 20 * time.Second
|
||||
defaultKeepalivePolicyMinTime = 5 * time.Minute
|
||||
// max window limit set by HTTP2 Specs.
|
||||
maxWindowSize = math.MaxInt32
|
||||
// defaultWriteQuota is the default value for number of data
|
||||
// bytes that each stream can schedule before some of it being
|
||||
// flushed out.
|
||||
defaultWriteQuota = 64 * 1024
|
||||
defaultClientMaxHeaderListSize = uint32(16 << 20)
|
||||
defaultServerMaxHeaderListSize = uint32(16 << 20)
|
||||
)
|
|
@ -23,30 +23,6 @@ import (
|
|||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// The default value of flow control window size in HTTP2 spec.
|
||||
defaultWindowSize = 65535
|
||||
// The initial window size for flow control.
|
||||
initialWindowSize = defaultWindowSize // for an RPC
|
||||
infinity = time.Duration(math.MaxInt64)
|
||||
defaultClientKeepaliveTime = infinity
|
||||
defaultClientKeepaliveTimeout = 20 * time.Second
|
||||
defaultMaxStreamsClient = 100
|
||||
defaultMaxConnectionIdle = infinity
|
||||
defaultMaxConnectionAge = infinity
|
||||
defaultMaxConnectionAgeGrace = infinity
|
||||
defaultServerKeepaliveTime = 2 * time.Hour
|
||||
defaultServerKeepaliveTimeout = 20 * time.Second
|
||||
defaultKeepalivePolicyMinTime = 5 * time.Minute
|
||||
// max window limit set by HTTP2 Specs.
|
||||
maxWindowSize = math.MaxInt32
|
||||
// defaultWriteQuota is the default value for number of data
|
||||
// bytes that each stream can schedule before some of it being
|
||||
// flushed out.
|
||||
defaultWriteQuota = 64 * 1024
|
||||
)
|
||||
|
||||
// writeQuota is a soft limit on the amount of data a stream can
|
|
@ -85,6 +85,9 @@ type http2Client struct {
|
|||
|
||||
initialWindowSize int32
|
||||
|
||||
// configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE
|
||||
maxSendHeaderListSize *uint32
|
||||
|
||||
bdpEst *bdpEstimator
|
||||
// onSuccess is a callback that client transport calls upon
|
||||
// receiving server preface to signal that a succefull HTTP2
|
||||
|
@ -199,6 +202,10 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
|||
}
|
||||
writeBufSize := opts.WriteBufferSize
|
||||
readBufSize := opts.ReadBufferSize
|
||||
maxHeaderListSize := defaultClientMaxHeaderListSize
|
||||
if opts.MaxHeaderListSize != nil {
|
||||
maxHeaderListSize = *opts.MaxHeaderListSize
|
||||
}
|
||||
t := &http2Client{
|
||||
ctx: ctx,
|
||||
ctxDone: ctx.Done(), // Cache Done chan.
|
||||
|
@ -213,7 +220,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
|||
writerDone: make(chan struct{}),
|
||||
goAway: make(chan struct{}),
|
||||
awakenKeepalive: make(chan struct{}, 1),
|
||||
framer: newFramer(conn, writeBufSize, readBufSize),
|
||||
framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
|
||||
fc: &trInFlow{limit: uint32(icwz)},
|
||||
scheme: scheme,
|
||||
activeStreams: make(map[uint32]*Stream),
|
||||
|
@ -273,14 +280,21 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
|||
t.Close()
|
||||
return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
|
||||
}
|
||||
var ss []http2.Setting
|
||||
|
||||
if t.initialWindowSize != defaultWindowSize {
|
||||
err = t.framer.fr.WriteSettings(http2.Setting{
|
||||
ss = append(ss, http2.Setting{
|
||||
ID: http2.SettingInitialWindowSize,
|
||||
Val: uint32(t.initialWindowSize),
|
||||
})
|
||||
} else {
|
||||
err = t.framer.fr.WriteSettings()
|
||||
}
|
||||
if opts.MaxHeaderListSize != nil {
|
||||
ss = append(ss, http2.Setting{
|
||||
ID: http2.SettingMaxHeaderListSize,
|
||||
Val: *opts.MaxHeaderListSize,
|
||||
})
|
||||
}
|
||||
err = t.framer.fr.WriteSettings(ss...)
|
||||
if err != nil {
|
||||
t.Close()
|
||||
return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
|
||||
|
@ -588,14 +602,40 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
}
|
||||
return true
|
||||
}
|
||||
var hdrListSizeErr error
|
||||
checkForHeaderListSize := func(it interface{}) bool {
|
||||
if t.maxSendHeaderListSize == nil {
|
||||
return true
|
||||
}
|
||||
hdrFrame := it.(*headerFrame)
|
||||
var sz int64
|
||||
for _, f := range hdrFrame.hf {
|
||||
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
|
||||
hdrListSizeErr = streamErrorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
for {
|
||||
success, err := t.controlBuf.executeAndPut(checkForStreamQuota, hdr)
|
||||
success, err := t.controlBuf.executeAndPut(func(it interface{}) bool {
|
||||
if !checkForStreamQuota(it) {
|
||||
return false
|
||||
}
|
||||
if !checkForHeaderListSize(it) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, hdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if success {
|
||||
break
|
||||
}
|
||||
if hdrListSizeErr != nil {
|
||||
return nil, hdrListSizeErr
|
||||
}
|
||||
firstTry = false
|
||||
select {
|
||||
case <-ch:
|
||||
|
@ -908,6 +948,13 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
|||
warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
|
||||
statusCode = codes.Unknown
|
||||
}
|
||||
if statusCode == codes.Canceled {
|
||||
// Our deadline was already exceeded, and that was likely the cause of
|
||||
// this cancelation. Alter the status code accordingly.
|
||||
if d, ok := s.ctx.Deadline(); ok && d.After(time.Now()) {
|
||||
statusCode = codes.DeadlineExceeded
|
||||
}
|
||||
}
|
||||
t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false)
|
||||
}
|
||||
|
||||
|
@ -917,13 +964,20 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
|
|||
}
|
||||
var maxStreams *uint32
|
||||
var ss []http2.Setting
|
||||
var updateFuncs []func()
|
||||
f.ForeachSetting(func(s http2.Setting) error {
|
||||
if s.ID == http2.SettingMaxConcurrentStreams {
|
||||
switch s.ID {
|
||||
case http2.SettingMaxConcurrentStreams:
|
||||
maxStreams = new(uint32)
|
||||
*maxStreams = s.Val
|
||||
return nil
|
||||
case http2.SettingMaxHeaderListSize:
|
||||
updateFuncs = append(updateFuncs, func() {
|
||||
t.maxSendHeaderListSize = new(uint32)
|
||||
*t.maxSendHeaderListSize = s.Val
|
||||
})
|
||||
default:
|
||||
ss = append(ss, s)
|
||||
}
|
||||
ss = append(ss, s)
|
||||
return nil
|
||||
})
|
||||
if isFirst && maxStreams == nil {
|
||||
|
@ -933,21 +987,24 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
|
|||
sf := &incomingSettings{
|
||||
ss: ss,
|
||||
}
|
||||
if maxStreams == nil {
|
||||
t.controlBuf.put(sf)
|
||||
return
|
||||
if maxStreams != nil {
|
||||
updateStreamQuota := func() {
|
||||
delta := int64(*maxStreams) - int64(t.maxConcurrentStreams)
|
||||
t.maxConcurrentStreams = *maxStreams
|
||||
t.streamQuota += delta
|
||||
if delta > 0 && t.waitingStreams > 0 {
|
||||
close(t.streamsQuotaAvailable) // wake all of them up.
|
||||
t.streamsQuotaAvailable = make(chan struct{}, 1)
|
||||
}
|
||||
}
|
||||
updateFuncs = append(updateFuncs, updateStreamQuota)
|
||||
}
|
||||
updateStreamQuota := func(interface{}) bool {
|
||||
delta := int64(*maxStreams) - int64(t.maxConcurrentStreams)
|
||||
t.maxConcurrentStreams = *maxStreams
|
||||
t.streamQuota += delta
|
||||
if delta > 0 && t.waitingStreams > 0 {
|
||||
close(t.streamsQuotaAvailable) // wake all of them up.
|
||||
t.streamsQuotaAvailable = make(chan struct{}, 1)
|
||||
t.controlBuf.executeAndPut(func(interface{}) bool {
|
||||
for _, f := range updateFuncs {
|
||||
f()
|
||||
}
|
||||
return true
|
||||
}
|
||||
t.controlBuf.executeAndPut(updateStreamQuota, sf)
|
||||
}, sf)
|
||||
}
|
||||
|
||||
func (t *http2Client) handlePing(f *http2.PingFrame) {
|
||||
|
@ -1058,7 +1115,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
|||
}
|
||||
atomic.StoreUint32(&s.bytesReceived, 1)
|
||||
var state decodeState
|
||||
if err := state.decodeResponseHeader(frame); err != nil {
|
||||
if err := state.decodeHeader(frame); err != nil {
|
||||
t.closeStream(s, err, true, http2.ErrCodeProtocol, status.New(codes.Internal, err.Error()), nil, false)
|
||||
// Something wrong. Stops reading even when there is remaining.
|
||||
return
|
|
@ -48,9 +48,14 @@ import (
|
|||
"google.golang.org/grpc/tap"
|
||||
)
|
||||
|
||||
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
|
||||
// the stream's state.
|
||||
var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
|
||||
var (
|
||||
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
|
||||
// the stream's state.
|
||||
ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
|
||||
// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
|
||||
// than the limit set by peer.
|
||||
ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
|
||||
)
|
||||
|
||||
// http2Server implements the ServerTransport interface with HTTP2.
|
||||
type http2Server struct {
|
||||
|
@ -89,9 +94,10 @@ type http2Server struct {
|
|||
// Flag to signify that number of ping strikes should be reset to 0.
|
||||
// This is set whenever data or header frames are sent.
|
||||
// 1 means yes.
|
||||
resetPingStrikes uint32 // Accessed atomically.
|
||||
initialWindowSize int32
|
||||
bdpEst *bdpEstimator
|
||||
resetPingStrikes uint32 // Accessed atomically.
|
||||
initialWindowSize int32
|
||||
bdpEst *bdpEstimator
|
||||
maxSendHeaderListSize *uint32
|
||||
|
||||
mu sync.Mutex // guard the following
|
||||
|
||||
|
@ -132,7 +138,11 @@ type http2Server struct {
|
|||
func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
|
||||
writeBufSize := config.WriteBufferSize
|
||||
readBufSize := config.ReadBufferSize
|
||||
framer := newFramer(conn, writeBufSize, readBufSize)
|
||||
maxHeaderListSize := defaultServerMaxHeaderListSize
|
||||
if config.MaxHeaderListSize != nil {
|
||||
maxHeaderListSize = *config.MaxHeaderListSize
|
||||
}
|
||||
framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize)
|
||||
// Send initial settings as connection preface to client.
|
||||
var isettings []http2.Setting
|
||||
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
|
||||
|
@ -162,6 +172,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
|||
ID: http2.SettingInitialWindowSize,
|
||||
Val: uint32(iwz)})
|
||||
}
|
||||
if config.MaxHeaderListSize != nil {
|
||||
isettings = append(isettings, http2.Setting{
|
||||
ID: http2.SettingMaxHeaderListSize,
|
||||
Val: *config.MaxHeaderListSize,
|
||||
})
|
||||
}
|
||||
if err := framer.fr.WriteSettings(isettings...); err != nil {
|
||||
return nil, connectionErrorf(false, err, "transport: %v", err)
|
||||
}
|
||||
|
@ -281,19 +297,17 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
|||
// operateHeader takes action on the decoded headers.
|
||||
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) {
|
||||
streamID := frame.Header().StreamID
|
||||
var state decodeState
|
||||
for _, hf := range frame.Fields {
|
||||
if err := state.processHeaderField(hf); err != nil {
|
||||
if se, ok := err.(StreamError); ok {
|
||||
t.controlBuf.put(&cleanupStream{
|
||||
streamID: streamID,
|
||||
rst: true,
|
||||
rstCode: statusCodeConvTab[se.Code],
|
||||
onWrite: func() {},
|
||||
})
|
||||
}
|
||||
return
|
||||
state := decodeState{serverSide: true}
|
||||
if err := state.decodeHeader(frame); err != nil {
|
||||
if se, ok := err.(StreamError); ok {
|
||||
t.controlBuf.put(&cleanupStream{
|
||||
streamID: streamID,
|
||||
rst: true,
|
||||
rstCode: statusCodeConvTab[se.Code],
|
||||
onWrite: func() {},
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
buf := newRecvBuffer()
|
||||
|
@ -613,11 +627,25 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
|
|||
return
|
||||
}
|
||||
var ss []http2.Setting
|
||||
var updateFuncs []func()
|
||||
f.ForeachSetting(func(s http2.Setting) error {
|
||||
ss = append(ss, s)
|
||||
switch s.ID {
|
||||
case http2.SettingMaxHeaderListSize:
|
||||
updateFuncs = append(updateFuncs, func() {
|
||||
t.maxSendHeaderListSize = new(uint32)
|
||||
*t.maxSendHeaderListSize = s.Val
|
||||
})
|
||||
default:
|
||||
ss = append(ss, s)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
t.controlBuf.put(&incomingSettings{
|
||||
t.controlBuf.executeAndPut(func(interface{}) bool {
|
||||
for _, f := range updateFuncs {
|
||||
f()
|
||||
}
|
||||
return true
|
||||
}, &incomingSettings{
|
||||
ss: ss,
|
||||
})
|
||||
}
|
||||
|
@ -697,6 +725,21 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD)
|
|||
return headerFields
|
||||
}
|
||||
|
||||
func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
|
||||
if t.maxSendHeaderListSize == nil {
|
||||
return true
|
||||
}
|
||||
hdrFrame := it.(*headerFrame)
|
||||
var sz int64
|
||||
for _, f := range hdrFrame.hf {
|
||||
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
|
||||
errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteHeader sends the header metedata md back to the client.
|
||||
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
if s.updateHeaderSent() || s.getState() == streamDone {
|
||||
|
@ -710,12 +753,15 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
|||
s.header = md
|
||||
}
|
||||
}
|
||||
t.writeHeaderLocked(s)
|
||||
if err := t.writeHeaderLocked(s); err != nil {
|
||||
s.hdrMu.Unlock()
|
||||
return err
|
||||
}
|
||||
s.hdrMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *http2Server) writeHeaderLocked(s *Stream) {
|
||||
func (t *http2Server) writeHeaderLocked(s *Stream) error {
|
||||
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
|
||||
// first and create a slice of that exact size.
|
||||
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
|
||||
|
@ -725,7 +771,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) {
|
|||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
|
||||
}
|
||||
headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
|
||||
t.controlBuf.put(&headerFrame{
|
||||
success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{
|
||||
streamID: s.id,
|
||||
hf: headerFields,
|
||||
endStream: false,
|
||||
|
@ -733,12 +779,20 @@ func (t *http2Server) writeHeaderLocked(s *Stream) {
|
|||
atomic.StoreUint32(&t.resetPingStrikes, 1)
|
||||
},
|
||||
})
|
||||
if !success {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.closeStream(s, true, http2.ErrCodeInternal, nil, false)
|
||||
return ErrHeaderListSizeLimitViolation
|
||||
}
|
||||
if t.stats != nil {
|
||||
// Note: WireLength is not set in outHeader.
|
||||
// TODO(mmukhi): Revisit this later, if needed.
|
||||
outHeader := &stats.OutHeader{}
|
||||
t.stats.HandleRPC(s.Context(), outHeader)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteStatus sends stream status to the client and terminates the stream.
|
||||
|
@ -755,7 +809,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
|||
headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
|
||||
if !s.updateHeaderSent() { // No headers have been sent.
|
||||
if len(s.header) > 0 { // Send a separate header frame.
|
||||
t.writeHeaderLocked(s)
|
||||
if err := t.writeHeaderLocked(s); err != nil {
|
||||
s.hdrMu.Unlock()
|
||||
return err
|
||||
}
|
||||
} else { // Send a trailer only response.
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
|
||||
|
@ -785,6 +842,14 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
|||
},
|
||||
}
|
||||
s.hdrMu.Unlock()
|
||||
success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
|
||||
if !success {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.closeStream(s, true, http2.ErrCodeInternal, nil, false)
|
||||
return ErrHeaderListSizeLimitViolation
|
||||
}
|
||||
t.closeStream(s, false, 0, trailingHeader, true)
|
||||
if t.stats != nil {
|
||||
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
|
@ -119,6 +119,8 @@ type decodeState struct {
|
|||
statsTags []byte
|
||||
statsTrace []byte
|
||||
contentSubtype string
|
||||
// whether decoding on server side or not
|
||||
serverSide bool
|
||||
}
|
||||
|
||||
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
||||
|
@ -235,13 +237,22 @@ func decodeMetadataHeader(k, v string) (string, error) {
|
|||
return v, nil
|
||||
}
|
||||
|
||||
func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error {
|
||||
func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
|
||||
// frame.Truncated is set to true when framer detects that the current header
|
||||
// list size hits MaxHeaderListSize limit.
|
||||
if frame.Truncated {
|
||||
return streamErrorf(codes.Internal, "peer header list size exceeded limit")
|
||||
}
|
||||
for _, hf := range frame.Fields {
|
||||
if err := d.processHeaderField(hf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if d.serverSide {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If grpc status exists, no need to check further.
|
||||
if d.rawStatusCode != nil || d.statusGen != nil {
|
||||
return nil
|
||||
|
@ -270,7 +281,6 @@ func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error
|
|||
code := int(codes.Unknown)
|
||||
d.rawStatusCode = &code
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (d *decodeState) addMetadata(k, v string) {
|
||||
|
@ -581,7 +591,7 @@ type framer struct {
|
|||
fr *http2.Framer
|
||||
}
|
||||
|
||||
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer {
|
||||
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer {
|
||||
if writeBufferSize < 0 {
|
||||
writeBufferSize = 0
|
||||
}
|
||||
|
@ -597,6 +607,7 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer {
|
|||
// Opt-in to Frame reuse API on framer to reduce garbage.
|
||||
// Frames aren't safe to read from after a subsequent call to ReadFrame.
|
||||
f.fr.SetReuseFrames()
|
||||
f.fr.MaxHeaderListSize = maxHeaderListSize
|
||||
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
|
||||
return f
|
||||
}
|
|
@ -19,7 +19,7 @@
|
|||
// Package transport defines and implements message oriented communication
|
||||
// channel to complete various transactions (e.g., an RPC). It is meant for
|
||||
// grpc-internal usage and is not intended to be imported directly by users.
|
||||
package transport // externally used as import "google.golang.org/grpc/transport"
|
||||
package transport
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
@ -454,6 +454,7 @@ type ServerConfig struct {
|
|||
WriteBufferSize int
|
||||
ReadBufferSize int
|
||||
ChannelzParentID int64
|
||||
MaxHeaderListSize *uint32
|
||||
}
|
||||
|
||||
// NewServerTransport creates a ServerTransport with conn or non-nil error
|
||||
|
@ -491,6 +492,8 @@ type ConnectOptions struct {
|
|||
ReadBufferSize int
|
||||
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
|
||||
ChannelzParentID int64
|
||||
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
|
||||
MaxHeaderListSize *uint32
|
||||
}
|
||||
|
||||
// TargetInfo contains the information of the target such as network address and metadata.
|
|
@ -27,8 +27,8 @@ import (
|
|||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
|
||||
|
|
|
@ -36,11 +36,11 @@ import (
|
|||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/encoding"
|
||||
"google.golang.org/grpc/encoding/proto"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// Compressor defines the interface gRPC uses to compress a message.
|
||||
|
|
|
@ -45,12 +45,12 @@ import (
|
|||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/tap"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -135,6 +135,7 @@ type options struct {
|
|||
writeBufferSize int
|
||||
readBufferSize int
|
||||
connectionTimeout time.Duration
|
||||
maxHeaderListSize *uint32
|
||||
}
|
||||
|
||||
var defaultServerOptions = options{
|
||||
|
@ -343,6 +344,14 @@ func ConnectionTimeout(d time.Duration) ServerOption {
|
|||
}
|
||||
}
|
||||
|
||||
// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
|
||||
// of header list that the server is prepared to accept.
|
||||
func MaxHeaderListSize(s uint32) ServerOption {
|
||||
return func(o *options) {
|
||||
o.maxHeaderListSize = &s
|
||||
}
|
||||
}
|
||||
|
||||
// NewServer creates a gRPC server which has no service registered and has not
|
||||
// started to accept requests yet.
|
||||
func NewServer(opt ...ServerOption) *Server {
|
||||
|
@ -665,6 +674,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
|
|||
WriteBufferSize: s.opts.writeBufferSize,
|
||||
ReadBufferSize: s.opts.readBufferSize,
|
||||
ChannelzParentID: s.channelzID,
|
||||
MaxHeaderListSize: s.opts.maxHeaderListSize,
|
||||
}
|
||||
st, err := transport.NewServerTransport("http2", c, config)
|
||||
if err != nil {
|
||||
|
|
|
@ -34,10 +34,10 @@ import (
|
|||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// StreamHandler defines the handler called by gRPC server to complete the
|
||||
|
@ -59,43 +59,20 @@ type StreamDesc struct {
|
|||
|
||||
// Stream defines the common interface a client or server stream has to satisfy.
|
||||
//
|
||||
// All errors returned from Stream are compatible with the status package.
|
||||
// Deprecated: See ClientStream and ServerStream documentation instead.
|
||||
type Stream interface {
|
||||
// Context returns the context for this stream. If called from the client,
|
||||
// Should be done after Header or RecvMsg. Otherwise, retries may not be
|
||||
// possible to perform.
|
||||
// Deprecated: See ClientStream and ServerStream documentation instead.
|
||||
Context() context.Context
|
||||
// SendMsg is generally called by generated code. On error, SendMsg aborts
|
||||
// the stream and returns an RPC status on the client side. On the server
|
||||
// side, it simply returns the error to the caller.
|
||||
//
|
||||
// SendMsg blocks until:
|
||||
// - It schedules m with the transport, or
|
||||
// - The stream is done, or
|
||||
// - The stream breaks.
|
||||
//
|
||||
// SendMsg does not wait for an ack. An untimely stream closure
|
||||
// can result in any buffered messages along the way (including
|
||||
// those in the client-side buffer that comes with gRPC by default)
|
||||
// being lost. To ensure delivery, users must call RecvMsg until
|
||||
// receiving an EOF before closing the stream.
|
||||
//
|
||||
// It's safe to have a goroutine calling SendMsg and another
|
||||
// goroutine calling RecvMsg on the same stream at the same
|
||||
// time. It is not safe to call SendMsg on the same stream
|
||||
// in different goroutines.
|
||||
// Deprecated: See ClientStream and ServerStream documentation instead.
|
||||
SendMsg(m interface{}) error
|
||||
// RecvMsg blocks until it receives a message or the stream is
|
||||
// done. On client side, it returns io.EOF when the stream is done. On
|
||||
// any other error, it aborts the stream and returns an RPC status. On
|
||||
// server side, it simply returns the error to the caller.
|
||||
// It's safe to have a goroutine calling SendMsg and another goroutine calling
|
||||
// recvMsg on the same stream at the same time.
|
||||
// But it is not safe to call RecvMsg on the same stream in different goroutines.
|
||||
// Deprecated: See ClientStream and ServerStream documentation instead.
|
||||
RecvMsg(m interface{}) error
|
||||
}
|
||||
|
||||
// ClientStream defines the interface a client stream has to satisfy.
|
||||
// ClientStream defines the client-side behavior of a streaming RPC.
|
||||
//
|
||||
// All errors returned from ClientStream methods are compatible with the
|
||||
// status package.
|
||||
type ClientStream interface {
|
||||
// Header returns the header metadata received from the server if there
|
||||
// is any. It blocks if the metadata is not ready to read.
|
||||
|
@ -107,13 +84,38 @@ type ClientStream interface {
|
|||
// CloseSend closes the send direction of the stream. It closes the stream
|
||||
// when non-nil error is met.
|
||||
CloseSend() error
|
||||
// Stream.SendMsg() may return a non-nil error when something wrong happens sending
|
||||
// the request. The returned error indicates the status of this sending, not the final
|
||||
// status of the RPC.
|
||||
// Context returns the context for this stream.
|
||||
//
|
||||
// Always call Stream.RecvMsg() to drain the stream and get the final
|
||||
// status, otherwise there could be leaked resources.
|
||||
Stream
|
||||
// It should not be called until after Header or RecvMsg has returned. Once
|
||||
// called, subsequent client-side retries are disabled.
|
||||
Context() context.Context
|
||||
// SendMsg is generally called by generated code. On error, SendMsg aborts
|
||||
// the stream. If the error was generated by the client, the status is
|
||||
// returned directly; otherwise, io.EOF is returned and the status of
|
||||
// the stream may be discovered using RecvMsg.
|
||||
//
|
||||
// SendMsg blocks until:
|
||||
// - There is sufficient flow control to schedule m with the transport, or
|
||||
// - The stream is done, or
|
||||
// - The stream breaks.
|
||||
//
|
||||
// SendMsg does not wait until the message is received by the server. An
|
||||
// untimely stream closure may result in lost messages. To ensure delivery,
|
||||
// users should ensure the RPC completed successfully using RecvMsg.
|
||||
//
|
||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||
// calling RecvMsg on the same stream at the same time, but it is not safe
|
||||
// to call SendMsg on the same stream in different goroutines.
|
||||
SendMsg(m interface{}) error
|
||||
// RecvMsg blocks until it receives a message into m or the stream is
|
||||
// done. It returns io.EOF when the stream completes successfully. On
|
||||
// any other error, the stream is aborted and the error contains the RPC
|
||||
// status.
|
||||
//
|
||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||
// calling RecvMsg on the same stream at the same time, but it is not
|
||||
// safe to call RecvMsg on the same stream in different goroutines.
|
||||
RecvMsg(m interface{}) error
|
||||
}
|
||||
|
||||
// NewStream creates a new Stream for the client side. This is typically
|
||||
|
@ -144,8 +146,6 @@ func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method st
|
|||
}
|
||||
|
||||
// NewClientStream is a wrapper for ClientConn.NewStream.
|
||||
//
|
||||
// DEPRECATED: Use ClientConn.NewStream instead.
|
||||
func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
|
||||
return cc.NewStream(ctx, desc, method, opts...)
|
||||
}
|
||||
|
@ -311,7 +311,7 @@ func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo traceInfo) err
|
|||
if err := cs.ctx.Err(); err != nil {
|
||||
return toRPCErr(err)
|
||||
}
|
||||
t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast)
|
||||
t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -845,7 +845,10 @@ func (a *csAttempt) finish(err error) {
|
|||
a.mu.Unlock()
|
||||
}
|
||||
|
||||
// ServerStream defines the interface a server stream has to satisfy.
|
||||
// ServerStream defines the server-side behavior of a streaming RPC.
|
||||
//
|
||||
// All errors returned from ServerStream methods are compatible with the
|
||||
// status package.
|
||||
type ServerStream interface {
|
||||
// SetHeader sets the header metadata. It may be called multiple times.
|
||||
// When call multiple times, all the provided metadata will be merged.
|
||||
|
@ -861,7 +864,32 @@ type ServerStream interface {
|
|||
// SetTrailer sets the trailer metadata which will be sent with the RPC status.
|
||||
// When called more than once, all the provided metadata will be merged.
|
||||
SetTrailer(metadata.MD)
|
||||
Stream
|
||||
// Context returns the context for this stream.
|
||||
Context() context.Context
|
||||
// SendMsg sends a message. On error, SendMsg aborts the stream and the
|
||||
// error is returned directly.
|
||||
//
|
||||
// SendMsg blocks until:
|
||||
// - There is sufficient flow control to schedule m with the transport, or
|
||||
// - The stream is done, or
|
||||
// - The stream breaks.
|
||||
//
|
||||
// SendMsg does not wait until the message is received by the client. An
|
||||
// untimely stream closure may result in lost messages.
|
||||
//
|
||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||
// calling RecvMsg on the same stream at the same time, but it is not safe
|
||||
// to call SendMsg on the same stream in different goroutines.
|
||||
SendMsg(m interface{}) error
|
||||
// RecvMsg blocks until it receives a message into m or the stream is
|
||||
// done. It returns io.EOF when the client has performed a CloseSend. On
|
||||
// any non-EOF error, the stream is aborted and the error contains the
|
||||
// RPC status.
|
||||
//
|
||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||
// calling RecvMsg on the same stream at the same time, but it is not
|
||||
// safe to call RecvMsg on the same stream in different goroutines.
|
||||
RecvMsg(m interface{}) error
|
||||
}
|
||||
|
||||
// serverStream implements a server side Stream.
|
||||
|
|
|
@ -78,7 +78,7 @@ fi
|
|||
|
||||
# TODO(menghanl): fix errors in transport_test.
|
||||
staticcheck -ignore '
|
||||
google.golang.org/grpc/transport/transport_test.go:SA2002
|
||||
google.golang.org/grpc/internal/transport/transport_test.go:SA2002
|
||||
google.golang.org/grpc/benchmark/benchmain/main.go:SA1019
|
||||
google.golang.org/grpc/stats/stats_test.go:SA1019
|
||||
google.golang.org/grpc/test/end2end_test.go:SA1019
|
||||
|
|
|
@ -185,7 +185,7 @@ type ObjectConvertor interface {
|
|||
// This method is similar to Convert() but handles specific details of choosing the correct
|
||||
// output version.
|
||||
ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error)
|
||||
ConvertFieldLabel(version, kind, label, value string) (string, string, error)
|
||||
ConvertFieldLabel(gvk schema.GroupVersionKind, label, value string) (string, string, error)
|
||||
}
|
||||
|
||||
// ObjectTyper contains methods for extracting the APIVersion and Kind
|
||||
|
|
|
@ -63,7 +63,7 @@ type Scheme struct {
|
|||
|
||||
// Map from version and resource to the corresponding func to convert
|
||||
// resource field labels in that version to internal version.
|
||||
fieldLabelConversionFuncs map[string]map[string]FieldLabelConversionFunc
|
||||
fieldLabelConversionFuncs map[schema.GroupVersionKind]FieldLabelConversionFunc
|
||||
|
||||
// defaulterFuncs is an array of interfaces to be called with an object to provide defaulting
|
||||
// the provided object must be a pointer.
|
||||
|
@ -95,7 +95,7 @@ func NewScheme() *Scheme {
|
|||
typeToGVK: map[reflect.Type][]schema.GroupVersionKind{},
|
||||
unversionedTypes: map[reflect.Type]schema.GroupVersionKind{},
|
||||
unversionedKinds: map[string]reflect.Type{},
|
||||
fieldLabelConversionFuncs: map[string]map[string]FieldLabelConversionFunc{},
|
||||
fieldLabelConversionFuncs: map[schema.GroupVersionKind]FieldLabelConversionFunc{},
|
||||
defaulterFuncs: map[reflect.Type]func(interface{}){},
|
||||
versionPriority: map[string][]string{},
|
||||
schemeName: naming.GetNameFromCallsite(internalPackages...),
|
||||
|
@ -368,12 +368,8 @@ func (s *Scheme) AddGeneratedConversionFuncs(conversionFuncs ...interface{}) err
|
|||
|
||||
// AddFieldLabelConversionFunc adds a conversion function to convert field selectors
|
||||
// of the given kind from the given version to internal version representation.
|
||||
func (s *Scheme) AddFieldLabelConversionFunc(version, kind string, conversionFunc FieldLabelConversionFunc) error {
|
||||
if s.fieldLabelConversionFuncs[version] == nil {
|
||||
s.fieldLabelConversionFuncs[version] = map[string]FieldLabelConversionFunc{}
|
||||
}
|
||||
|
||||
s.fieldLabelConversionFuncs[version][kind] = conversionFunc
|
||||
func (s *Scheme) AddFieldLabelConversionFunc(gvk schema.GroupVersionKind, conversionFunc FieldLabelConversionFunc) error {
|
||||
s.fieldLabelConversionFuncs[gvk] = conversionFunc
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -486,11 +482,8 @@ func (s *Scheme) Convert(in, out interface{}, context interface{}) error {
|
|||
|
||||
// ConvertFieldLabel alters the given field label and value for an kind field selector from
|
||||
// versioned representation to an unversioned one or returns an error.
|
||||
func (s *Scheme) ConvertFieldLabel(version, kind, label, value string) (string, string, error) {
|
||||
if s.fieldLabelConversionFuncs[version] == nil {
|
||||
return DefaultMetaV1FieldSelectorConversion(label, value)
|
||||
}
|
||||
conversionFunc, ok := s.fieldLabelConversionFuncs[version][kind]
|
||||
func (s *Scheme) ConvertFieldLabel(gvk schema.GroupVersionKind, label, value string) (string, string, error) {
|
||||
conversionFunc, ok := s.fieldLabelConversionFuncs[gvk]
|
||||
if !ok {
|
||||
return DefaultMetaV1FieldSelectorConversion(label, value)
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ func UserName_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(UserName_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func UserName_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -70,7 +70,7 @@ func UserName_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(UserName_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func UserName_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -153,7 +153,7 @@ func UserPassword_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(UserPassword_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func UserPassword_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -163,7 +163,7 @@ func UserPassword_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(UserPassword_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func UserPassword_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -254,7 +254,7 @@ func CHAPPassword_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(CHAPPassword_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func CHAPPassword_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -264,7 +264,7 @@ func CHAPPassword_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(CHAPPassword_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func CHAPPassword_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -347,7 +347,7 @@ func NASIPAddress_Add(p *radius.Packet, value net.IP) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(NASIPAddress_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func NASIPAddress_Get(p *radius.Packet) (value net.IP) {
|
||||
|
@ -384,7 +384,7 @@ func NASIPAddress_Set(p *radius.Packet, value net.IP) (err error) {
|
|||
return
|
||||
}
|
||||
p.Set(NASIPAddress_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
type NASPort uint32
|
||||
|
@ -401,7 +401,7 @@ func (a NASPort) String() string {
|
|||
func NASPort_Add(p *radius.Packet, value NASPort) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Add(NASPort_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func NASPort_Get(p *radius.Packet) (value NASPort) {
|
||||
|
@ -439,7 +439,7 @@ func NASPort_Lookup(p *radius.Packet) (value NASPort, err error) {
|
|||
func NASPort_Set(p *radius.Packet, value NASPort) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Set(NASPort_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
type ServiceType uint32
|
||||
|
@ -482,7 +482,7 @@ func (a ServiceType) String() string {
|
|||
func ServiceType_Add(p *radius.Packet, value ServiceType) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Add(ServiceType_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func ServiceType_Get(p *radius.Packet) (value ServiceType) {
|
||||
|
@ -520,7 +520,7 @@ func ServiceType_Lookup(p *radius.Packet) (value ServiceType, err error) {
|
|||
func ServiceType_Set(p *radius.Packet, value ServiceType) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Set(ServiceType_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
type FramedProtocol uint32
|
||||
|
@ -553,7 +553,7 @@ func (a FramedProtocol) String() string {
|
|||
func FramedProtocol_Add(p *radius.Packet, value FramedProtocol) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Add(FramedProtocol_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FramedProtocol_Get(p *radius.Packet) (value FramedProtocol) {
|
||||
|
@ -591,7 +591,7 @@ func FramedProtocol_Lookup(p *radius.Packet) (value FramedProtocol, err error) {
|
|||
func FramedProtocol_Set(p *radius.Packet, value FramedProtocol) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Set(FramedProtocol_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FramedIPAddress_Add(p *radius.Packet, value net.IP) (err error) {
|
||||
|
@ -601,7 +601,7 @@ func FramedIPAddress_Add(p *radius.Packet, value net.IP) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(FramedIPAddress_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FramedIPAddress_Get(p *radius.Packet) (value net.IP) {
|
||||
|
@ -638,7 +638,7 @@ func FramedIPAddress_Set(p *radius.Packet, value net.IP) (err error) {
|
|||
return
|
||||
}
|
||||
p.Set(FramedIPAddress_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FramedIPNetmask_Add(p *radius.Packet, value net.IP) (err error) {
|
||||
|
@ -648,7 +648,7 @@ func FramedIPNetmask_Add(p *radius.Packet, value net.IP) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(FramedIPNetmask_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FramedIPNetmask_Get(p *radius.Packet) (value net.IP) {
|
||||
|
@ -685,7 +685,7 @@ func FramedIPNetmask_Set(p *radius.Packet, value net.IP) (err error) {
|
|||
return
|
||||
}
|
||||
p.Set(FramedIPNetmask_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
type FramedRouting uint32
|
||||
|
@ -714,7 +714,7 @@ func (a FramedRouting) String() string {
|
|||
func FramedRouting_Add(p *radius.Packet, value FramedRouting) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Add(FramedRouting_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FramedRouting_Get(p *radius.Packet) (value FramedRouting) {
|
||||
|
@ -752,7 +752,7 @@ func FramedRouting_Lookup(p *radius.Packet) (value FramedRouting, err error) {
|
|||
func FramedRouting_Set(p *radius.Packet, value FramedRouting) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Set(FramedRouting_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FilterID_Add(p *radius.Packet, value []byte) (err error) {
|
||||
|
@ -762,7 +762,7 @@ func FilterID_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(FilterID_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FilterID_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -772,7 +772,7 @@ func FilterID_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(FilterID_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FilterID_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -862,7 +862,7 @@ func (a FramedMTU) String() string {
|
|||
func FramedMTU_Add(p *radius.Packet, value FramedMTU) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Add(FramedMTU_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FramedMTU_Get(p *radius.Packet) (value FramedMTU) {
|
||||
|
@ -900,7 +900,7 @@ func FramedMTU_Lookup(p *radius.Packet) (value FramedMTU, err error) {
|
|||
func FramedMTU_Set(p *radius.Packet, value FramedMTU) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Set(FramedMTU_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
type FramedCompression uint32
|
||||
|
@ -929,7 +929,7 @@ func (a FramedCompression) String() string {
|
|||
func FramedCompression_Add(p *radius.Packet, value FramedCompression) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Add(FramedCompression_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FramedCompression_Get(p *radius.Packet) (value FramedCompression) {
|
||||
|
@ -967,7 +967,7 @@ func FramedCompression_Lookup(p *radius.Packet) (value FramedCompression, err er
|
|||
func FramedCompression_Set(p *radius.Packet, value FramedCompression) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Set(FramedCompression_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func LoginIPHost_Add(p *radius.Packet, value net.IP) (err error) {
|
||||
|
@ -977,7 +977,7 @@ func LoginIPHost_Add(p *radius.Packet, value net.IP) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(LoginIPHost_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func LoginIPHost_Get(p *radius.Packet) (value net.IP) {
|
||||
|
@ -1014,7 +1014,7 @@ func LoginIPHost_Set(p *radius.Packet, value net.IP) (err error) {
|
|||
return
|
||||
}
|
||||
p.Set(LoginIPHost_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
type LoginService uint32
|
||||
|
@ -1051,7 +1051,7 @@ func (a LoginService) String() string {
|
|||
func LoginService_Add(p *radius.Packet, value LoginService) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Add(LoginService_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func LoginService_Get(p *radius.Packet) (value LoginService) {
|
||||
|
@ -1089,7 +1089,7 @@ func LoginService_Lookup(p *radius.Packet) (value LoginService, err error) {
|
|||
func LoginService_Set(p *radius.Packet, value LoginService) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Set(LoginService_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
type LoginTCPPort uint32
|
||||
|
@ -1116,7 +1116,7 @@ func (a LoginTCPPort) String() string {
|
|||
func LoginTCPPort_Add(p *radius.Packet, value LoginTCPPort) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Add(LoginTCPPort_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func LoginTCPPort_Get(p *radius.Packet) (value LoginTCPPort) {
|
||||
|
@ -1154,7 +1154,7 @@ func LoginTCPPort_Lookup(p *radius.Packet) (value LoginTCPPort, err error) {
|
|||
func LoginTCPPort_Set(p *radius.Packet, value LoginTCPPort) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Set(LoginTCPPort_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func ReplyMessage_Add(p *radius.Packet, value []byte) (err error) {
|
||||
|
@ -1164,7 +1164,7 @@ func ReplyMessage_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(ReplyMessage_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func ReplyMessage_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -1174,7 +1174,7 @@ func ReplyMessage_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(ReplyMessage_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func ReplyMessage_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -1257,7 +1257,7 @@ func CallbackNumber_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(CallbackNumber_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func CallbackNumber_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -1267,7 +1267,7 @@ func CallbackNumber_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(CallbackNumber_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func CallbackNumber_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -1350,7 +1350,7 @@ func CallbackID_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(CallbackID_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func CallbackID_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -1360,7 +1360,7 @@ func CallbackID_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(CallbackID_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func CallbackID_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -1443,7 +1443,7 @@ func FramedRoute_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(FramedRoute_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FramedRoute_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -1453,7 +1453,7 @@ func FramedRoute_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(FramedRoute_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FramedRoute_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -1536,7 +1536,7 @@ func FramedIPXNetwork_Add(p *radius.Packet, value net.IP) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(FramedIPXNetwork_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FramedIPXNetwork_Get(p *radius.Packet) (value net.IP) {
|
||||
|
@ -1573,7 +1573,7 @@ func FramedIPXNetwork_Set(p *radius.Packet, value net.IP) (err error) {
|
|||
return
|
||||
}
|
||||
p.Set(FramedIPXNetwork_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func State_Add(p *radius.Packet, value []byte) (err error) {
|
||||
|
@ -1583,7 +1583,7 @@ func State_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(State_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func State_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -1593,7 +1593,7 @@ func State_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(State_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func State_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -1676,7 +1676,7 @@ func Class_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(Class_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func Class_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -1686,7 +1686,7 @@ func Class_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(Class_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func Class_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -1776,7 +1776,7 @@ func (a SessionTimeout) String() string {
|
|||
func SessionTimeout_Add(p *radius.Packet, value SessionTimeout) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Add(SessionTimeout_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func SessionTimeout_Get(p *radius.Packet) (value SessionTimeout) {
|
||||
|
@ -1814,7 +1814,7 @@ func SessionTimeout_Lookup(p *radius.Packet) (value SessionTimeout, err error) {
|
|||
func SessionTimeout_Set(p *radius.Packet, value SessionTimeout) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Set(SessionTimeout_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
type IdleTimeout uint32
|
||||
|
@ -1831,7 +1831,7 @@ func (a IdleTimeout) String() string {
|
|||
func IdleTimeout_Add(p *radius.Packet, value IdleTimeout) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Add(IdleTimeout_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func IdleTimeout_Get(p *radius.Packet) (value IdleTimeout) {
|
||||
|
@ -1869,7 +1869,7 @@ func IdleTimeout_Lookup(p *radius.Packet) (value IdleTimeout, err error) {
|
|||
func IdleTimeout_Set(p *radius.Packet, value IdleTimeout) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Set(IdleTimeout_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
type TerminationAction uint32
|
||||
|
@ -1894,7 +1894,7 @@ func (a TerminationAction) String() string {
|
|||
func TerminationAction_Add(p *radius.Packet, value TerminationAction) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Add(TerminationAction_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func TerminationAction_Get(p *radius.Packet) (value TerminationAction) {
|
||||
|
@ -1932,7 +1932,7 @@ func TerminationAction_Lookup(p *radius.Packet) (value TerminationAction, err er
|
|||
func TerminationAction_Set(p *radius.Packet, value TerminationAction) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Set(TerminationAction_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func CalledStationID_Add(p *radius.Packet, value []byte) (err error) {
|
||||
|
@ -1942,7 +1942,7 @@ func CalledStationID_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(CalledStationID_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func CalledStationID_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -1952,7 +1952,7 @@ func CalledStationID_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(CalledStationID_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func CalledStationID_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -2035,7 +2035,7 @@ func CallingStationID_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(CallingStationID_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func CallingStationID_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -2045,7 +2045,7 @@ func CallingStationID_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(CallingStationID_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func CallingStationID_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -2128,7 +2128,7 @@ func NASIdentifier_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(NASIdentifier_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func NASIdentifier_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -2138,7 +2138,7 @@ func NASIdentifier_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(NASIdentifier_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func NASIdentifier_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -2221,7 +2221,7 @@ func ProxyState_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(ProxyState_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func ProxyState_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -2231,7 +2231,7 @@ func ProxyState_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(ProxyState_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func ProxyState_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -2314,7 +2314,7 @@ func LoginLATService_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(LoginLATService_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func LoginLATService_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -2324,7 +2324,7 @@ func LoginLATService_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(LoginLATService_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func LoginLATService_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -2407,7 +2407,7 @@ func LoginLATNode_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(LoginLATNode_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func LoginLATNode_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -2417,7 +2417,7 @@ func LoginLATNode_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(LoginLATNode_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func LoginLATNode_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -2500,7 +2500,7 @@ func LoginLATGroup_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(LoginLATGroup_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func LoginLATGroup_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -2510,7 +2510,7 @@ func LoginLATGroup_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(LoginLATGroup_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func LoginLATGroup_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -2600,7 +2600,7 @@ func (a FramedAppleTalkLink) String() string {
|
|||
func FramedAppleTalkLink_Add(p *radius.Packet, value FramedAppleTalkLink) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Add(FramedAppleTalkLink_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FramedAppleTalkLink_Get(p *radius.Packet) (value FramedAppleTalkLink) {
|
||||
|
@ -2638,7 +2638,7 @@ func FramedAppleTalkLink_Lookup(p *radius.Packet) (value FramedAppleTalkLink, er
|
|||
func FramedAppleTalkLink_Set(p *radius.Packet, value FramedAppleTalkLink) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Set(FramedAppleTalkLink_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
type FramedAppleTalkNetwork uint32
|
||||
|
@ -2655,7 +2655,7 @@ func (a FramedAppleTalkNetwork) String() string {
|
|||
func FramedAppleTalkNetwork_Add(p *radius.Packet, value FramedAppleTalkNetwork) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Add(FramedAppleTalkNetwork_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FramedAppleTalkNetwork_Get(p *radius.Packet) (value FramedAppleTalkNetwork) {
|
||||
|
@ -2693,7 +2693,7 @@ func FramedAppleTalkNetwork_Lookup(p *radius.Packet) (value FramedAppleTalkNetwo
|
|||
func FramedAppleTalkNetwork_Set(p *radius.Packet, value FramedAppleTalkNetwork) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Set(FramedAppleTalkNetwork_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FramedAppleTalkZone_Add(p *radius.Packet, value []byte) (err error) {
|
||||
|
@ -2703,7 +2703,7 @@ func FramedAppleTalkZone_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(FramedAppleTalkZone_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FramedAppleTalkZone_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -2713,7 +2713,7 @@ func FramedAppleTalkZone_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(FramedAppleTalkZone_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func FramedAppleTalkZone_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -2796,7 +2796,7 @@ func CHAPChallenge_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(CHAPChallenge_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func CHAPChallenge_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -2806,7 +2806,7 @@ func CHAPChallenge_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(CHAPChallenge_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func CHAPChallenge_Get(p *radius.Packet) (value []byte) {
|
||||
|
@ -2940,7 +2940,7 @@ func (a NASPortType) String() string {
|
|||
func NASPortType_Add(p *radius.Packet, value NASPortType) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Add(NASPortType_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func NASPortType_Get(p *radius.Packet) (value NASPortType) {
|
||||
|
@ -2978,7 +2978,7 @@ func NASPortType_Lookup(p *radius.Packet) (value NASPortType, err error) {
|
|||
func NASPortType_Set(p *radius.Packet, value NASPortType) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Set(NASPortType_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
type PortLimit uint32
|
||||
|
@ -2995,7 +2995,7 @@ func (a PortLimit) String() string {
|
|||
func PortLimit_Add(p *radius.Packet, value PortLimit) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Add(PortLimit_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func PortLimit_Get(p *radius.Packet) (value PortLimit) {
|
||||
|
@ -3033,7 +3033,7 @@ func PortLimit_Lookup(p *radius.Packet) (value PortLimit, err error) {
|
|||
func PortLimit_Set(p *radius.Packet, value PortLimit) (err error) {
|
||||
a := radius.NewInteger(uint32(value))
|
||||
p.Set(PortLimit_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func LoginLATPort_Add(p *radius.Packet, value []byte) (err error) {
|
||||
|
@ -3043,7 +3043,7 @@ func LoginLATPort_Add(p *radius.Packet, value []byte) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(LoginLATPort_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func LoginLATPort_AddString(p *radius.Packet, value string) (err error) {
|
||||
|
@ -3053,7 +3053,7 @@ func LoginLATPort_AddString(p *radius.Packet, value string) (err error) {
|
|||
return
|
||||
}
|
||||
p.Add(LoginLATPort_Type, a)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func LoginLATPort_Get(p *radius.Packet) (value []byte) {
|
||||
|
|
|
@ -218,7 +218,7 @@ func (s *PacketServer) ListenAndServe() error {
|
|||
// Shutdown returns after nil all handlers have completed. ctx.Err() is
|
||||
// returned if ctx is canceled.
|
||||
//
|
||||
// Any Serve methods return ErrShutdown if used after Shutdown is called.
|
||||
// Any Serve methods return ErrShutdown after Shutdown is called.
|
||||
func (s *PacketServer) Shutdown(ctx context.Context) error {
|
||||
s.mu.Lock()
|
||||
s.initLocked()
|
||||
|
|
|
@ -69,6 +69,8 @@ type ResponseWriter interface {
|
|||
// authorizing and decrypting packets.
|
||||
//
|
||||
// ctx is canceled if the server's Shutdown method is called.
|
||||
//
|
||||
// Returning an empty secret will discard the incoming packet.
|
||||
type SecretSource interface {
|
||||
RADIUSSecret(ctx context.Context, remoteAddr net.Addr) ([]byte, error)
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue