Minimal changes to solve Dependency CVEs [VAULT-871] (#11015)
* minimal changes to solve most of the cves * cleanup * finished go mod vendor upgrades
This commit is contained in:
parent
08d8f65e01
commit
2da7de2fec
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
changelog: Add dependencies listed in dependencies/2-25-21
|
||||
```
|
|
@ -0,0 +1,17 @@
|
|||
hridoyroy@Hridoys-MBP vault % python3 deps_upgrade.py dep.txt
|
||||
github.com/satori/go.uuid
|
||||
golang.org/x/text
|
||||
github.com/hashicorp/go-gcp-common
|
||||
github.com/hashicorp/vault-plugin-secrets-azure
|
||||
go.mongodb.org/mongo-driver
|
||||
github.com/Microsoft/hcsshim
|
||||
package github.com/Microsoft/hcsshim
|
||||
imports github.com/Microsoft/go-winio/pkg/guid
|
||||
imports golang.org/x/sys/windows: build constraints exclude all Go files in /Users/hridoyroy/go/pkg/mod/golang.org/x/sys@v0.0.0-20210124154548-22da62e12c0c/windows
|
||||
golang.org/x/crypto
|
||||
github.com/containerd/containerd
|
||||
github.com/aws/aws-sdk-go
|
||||
github.com/hashicorp/serf
|
||||
github.com/miekg/dns
|
||||
github.com/hashicorp/go-discover
|
||||
github.com/hashicorp/serf
|
|
@ -0,0 +1,12 @@
|
|||
golang.org/x/text
|
||||
github.com/hashicorp/go-gcp-common
|
||||
github.com/hashicorp/vault-plugin-secrets-azure
|
||||
go.mongodb.org/mongo-driver
|
||||
github.com/Microsoft/hcsshim
|
||||
golang.org/x/crypto
|
||||
github.com/containerd/containerd
|
||||
github.com/aws/aws-sdk-go
|
||||
github.com/hashicorp/serf
|
||||
github.com/miekg/dns
|
||||
github.com/hashicorp/go-discover
|
||||
github.com/hashicorp/serf
|
17
go.mod
17
go.mod
|
@ -13,6 +13,7 @@ require (
|
|||
github.com/Azure/azure-storage-blob-go v0.11.0
|
||||
github.com/Azure/go-autorest/autorest v0.11.10
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5
|
||||
github.com/Microsoft/hcsshim v0.8.14 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1
|
||||
github.com/SAP/go-hdb v0.14.1
|
||||
github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a
|
||||
|
@ -25,12 +26,13 @@ require (
|
|||
github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e
|
||||
github.com/armon/go-radix v1.0.0
|
||||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf
|
||||
github.com/aws/aws-sdk-go v1.34.28
|
||||
github.com/aws/aws-sdk-go v1.37.19
|
||||
github.com/bitly/go-hostpool v0.1.0 // indirect
|
||||
github.com/cenkalti/backoff/v3 v3.0.0
|
||||
github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c
|
||||
github.com/containerd/containerd v1.4.3 // indirect
|
||||
github.com/coreos/go-semver v0.2.0
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20200309214505-aa6a9891b09c+incompatible
|
||||
|
@ -56,7 +58,7 @@ require (
|
|||
github.com/hashicorp/errwrap v1.1.0
|
||||
github.com/hashicorp/go-bindata v3.0.8-0.20180209072458-bf7910af8997+incompatible
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1
|
||||
github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f
|
||||
github.com/hashicorp/go-discover v0.0.0-20201029210230-738cb3105cd0
|
||||
github.com/hashicorp/go-gcp-common v0.6.0
|
||||
github.com/hashicorp/go-hclog v0.14.1
|
||||
github.com/hashicorp/go-kms-wrapping v0.5.16
|
||||
|
@ -74,6 +76,7 @@ require (
|
|||
github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d
|
||||
github.com/hashicorp/raft v1.1.3-0.20201002073007-f367681f9c48
|
||||
github.com/hashicorp/raft-snapshot v1.0.3
|
||||
github.com/hashicorp/serf v0.9.5 // indirect
|
||||
github.com/hashicorp/vault-plugin-auth-alicloud v0.7.0
|
||||
github.com/hashicorp/vault-plugin-auth-azure v0.6.0
|
||||
github.com/hashicorp/vault-plugin-auth-centrify v0.7.0
|
||||
|
@ -90,7 +93,7 @@ require (
|
|||
github.com/hashicorp/vault-plugin-mock v0.16.1
|
||||
github.com/hashicorp/vault-plugin-secrets-ad v0.8.0
|
||||
github.com/hashicorp/vault-plugin-secrets-alicloud v0.7.0
|
||||
github.com/hashicorp/vault-plugin-secrets-azure v0.8.0
|
||||
github.com/hashicorp/vault-plugin-secrets-azure v0.8.1
|
||||
github.com/hashicorp/vault-plugin-secrets-gcp v0.6.6-0.20210121193032-bb12fd5092bd
|
||||
github.com/hashicorp/vault-plugin-secrets-gcpkms v0.7.0
|
||||
github.com/hashicorp/vault-plugin-secrets-kv v0.7.0
|
||||
|
@ -111,6 +114,7 @@ require (
|
|||
github.com/mattn/go-colorable v0.1.7
|
||||
github.com/mholt/archiver v3.1.1+incompatible
|
||||
github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5
|
||||
github.com/miekg/dns v1.1.40 // indirect
|
||||
github.com/mitchellh/cli v1.1.1
|
||||
github.com/mitchellh/copystructure v1.0.0
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
|
@ -149,12 +153,13 @@ require (
|
|||
github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da // indirect
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547
|
||||
go.mongodb.org/mongo-driver v1.4.2
|
||||
go.mongodb.org/mongo-driver v1.4.6
|
||||
go.uber.org/atomic v1.6.0
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0
|
||||
golang.org/x/net v0.0.0-20201002202402-0a1ea396d57c
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
|
||||
golang.org/x/text v0.3.5 // indirect
|
||||
golang.org/x/tools v0.0.0-20200521155704-91d71f6c2f04
|
||||
google.golang.org/api v0.29.0
|
||||
google.golang.org/grpc v1.29.1
|
||||
|
|
45
go.sum
45
go.sum
|
@ -110,8 +110,12 @@ github.com/Microsoft/go-winio v0.4.13/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jB
|
|||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
||||
github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331 h1:3YnB7Hpmh1lPecPE8doMOtYCrMdrpedZOvxfuNES/Vk=
|
||||
github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
|
||||
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
|
||||
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
|
||||
github.com/Microsoft/hcsshim v0.8.14 h1:lbPVK25c1cu5xTLITwpUcxoA9vKrKErASPYygvouJns=
|
||||
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
|
@ -172,6 +176,8 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN
|
|||
github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/aws/aws-sdk-go v1.34.28 h1:sscPpn/Ns3i0F4HPEWAVcwdIRaZZCuL7llJ2/60yPIk=
|
||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.37.19 h1:/xKHoSsYfH9qe16pJAHIjqTVpMM2DRSsEt8Ok1bzYiw=
|
||||
github.com/aws/aws-sdk-go v1.37.19/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA=
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
|
||||
|
@ -209,6 +215,7 @@ github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0/go.mod h1:5d
|
|||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=
|
||||
|
@ -228,19 +235,27 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z
|
|||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=
|
||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
|
||||
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59 h1:qWj4qVYZ95vLWwqyNJCQg7rDsG5wPdze0UaPolH7DUk=
|
||||
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
|
||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.4 h1:3o0smo5SKY7H6AJCmJhsnCjR2/V2T8VmiHt7seN2/kI=
|
||||
github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.4.3 h1:ijQT13JedHSHrQGWFcGEwzcNKrAGIiZ+jSD5QQG07SY=
|
||||
github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20200709052629-daa8e1ccc0bc/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
|
||||
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe h1:PEmIrUvwG9Yyv+0WKZqjXfSFDeZjs/q15g0m08BYS9k=
|
||||
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 h1:PUD50EuOMkXVcpBIA/R95d56duJR9VxhwncsFbNnxW4=
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de h1:dlfGmNcE3jDAecLqwKPMNX6nk2qh1c1Vg1/YTzpOOF4=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd h1:JNn81o/xG+8NEo3bC/vx9pbi/g2WI8mtP2/nXzu297Y=
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
|
@ -263,6 +278,7 @@ github.com/couchbase/gocbcore/v9 v9.0.4 h1:VM7IiKoK25mq9CdFLLchJMzmHa5Grkn+94pQN
|
|||
github.com/couchbase/gocbcore/v9 v9.0.4/go.mod h1:jOSQeBSECyNvD7aS4lfuaw+pD5t6ciTOf8hrDP/4Nus=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
|
@ -553,8 +569,8 @@ github.com/hashicorp/go-bindata v3.0.8-0.20180209072458-bf7910af8997+incompatibl
|
|||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f h1:7WFMVeuJQp6BkzuTv9O52pzwtEFVUJubKYN+zez8eTI=
|
||||
github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f/go.mod h1:D4eo8/CN92vm9/9UDG+ldX1/fMFa4kpl8qzyTolus8o=
|
||||
github.com/hashicorp/go-discover v0.0.0-20201029210230-738cb3105cd0 h1:UgODETBAoROFMSSVgg0v8vVpD9Tol8FtYcAeomcWJtY=
|
||||
github.com/hashicorp/go-discover v0.0.0-20201029210230-738cb3105cd0/go.mod h1:D4eo8/CN92vm9/9UDG+ldX1/fMFa4kpl8qzyTolus8o=
|
||||
github.com/hashicorp/go-gatedio v0.5.0 h1:Jm1X5yP4yCqqWj5L1TgW7iZwCVPGtVc+mro5r/XX7Tg=
|
||||
github.com/hashicorp/go-gatedio v0.5.0/go.mod h1:Lr3t8L6IyxD3DAeaUxGcgl2JnRUpWMCsmBl4Omu/2t4=
|
||||
github.com/hashicorp/go-gcp-common v0.5.0/go.mod h1:IDGUI2N/OS3PiU4qZcXJeWKPI6O/9Y8hOrbSiMcqyYw=
|
||||
|
@ -642,6 +658,8 @@ github.com/hashicorp/raft-snapshot v1.0.3/go.mod h1:5sL9eUn72lH5DzsFIJ9jaysITbHk
|
|||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hashicorp/serf v0.9.4 h1:xrZ4ZR0wT5Dz8oQHHdfOzr0ei1jMToWlFFz3hh/DI7I=
|
||||
github.com/hashicorp/serf v0.9.4/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
|
||||
github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM=
|
||||
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
|
||||
github.com/hashicorp/vault-plugin-auth-alicloud v0.7.0 h1:qCTaVIb+FUWatLXszACH47Q/yTMrbedHt0CMGX9X41w=
|
||||
github.com/hashicorp/vault-plugin-auth-alicloud v0.7.0/go.mod h1:lyfBMcULXKJfVu8dNo1w9bUikV5oem5HKmJoWwXupnY=
|
||||
github.com/hashicorp/vault-plugin-auth-azure v0.6.0 h1:LWUAYV+Td30+AnwjlBiYDk3j4XMDKOyk+OZyBSXU8tk=
|
||||
|
@ -675,8 +693,8 @@ github.com/hashicorp/vault-plugin-secrets-ad v0.8.0 h1:SBOxEjYtxkipmp8AC7Yy3vjBV
|
|||
github.com/hashicorp/vault-plugin-secrets-ad v0.8.0/go.mod h1:L5L6NoJFxRvgxhuA2sWhloc3sbgmE7KxhNcoRxcaH9U=
|
||||
github.com/hashicorp/vault-plugin-secrets-alicloud v0.7.0 h1:VoB3Q11LX+wF5w5TC8j3LYh6PNfeL1XM0zQAMaT04sY=
|
||||
github.com/hashicorp/vault-plugin-secrets-alicloud v0.7.0/go.mod h1:SSkKpSTOMnX84PfgYiWHgwVg+YMhxHNjo+YCJGNBoZk=
|
||||
github.com/hashicorp/vault-plugin-secrets-azure v0.8.0 h1:3BAhoqqDN198vynAfS3rcxUW2STBjREluGPsYCOy2mA=
|
||||
github.com/hashicorp/vault-plugin-secrets-azure v0.8.0/go.mod h1:4jCVjTG809NCQ8mrSnbBtX17gX1Iush+558BVO6MJeo=
|
||||
github.com/hashicorp/vault-plugin-secrets-azure v0.8.1 h1:OVPAfhr4NVNu/CP8lR+QEk3l4uocP+gHRKQtKiB1AUo=
|
||||
github.com/hashicorp/vault-plugin-secrets-azure v0.8.1/go.mod h1:4jCVjTG809NCQ8mrSnbBtX17gX1Iush+558BVO6MJeo=
|
||||
github.com/hashicorp/vault-plugin-secrets-gcp v0.6.6-0.20210121193032-bb12fd5092bd h1:l+yD2mcdsZrtUhakzylE/7cMN6bJMZJU06l2w4MeXNA=
|
||||
github.com/hashicorp/vault-plugin-secrets-gcp v0.6.6-0.20210121193032-bb12fd5092bd/go.mod h1:psRQ/dm5XatoUKLDUeWrpP9icMJNtu/jmscUr37YGK4=
|
||||
github.com/hashicorp/vault-plugin-secrets-gcpkms v0.7.0 h1:dKPQIr6tLcMmhNKdc2A9pbwaIFLooC80UfNZL+jWMlA=
|
||||
|
@ -832,6 +850,8 @@ github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5/go.mod
|
|||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.26 h1:gPxPSwALAeHJSjarOs00QjVdV9QoBvc1D2ujQUr5BzU=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.40 h1:pyyPFfGMnciYUk/mXpKkVmeMQjfXqt3FAJ2hy7tPiLA=
|
||||
github.com/miekg/dns v1.1.40/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
|
||||
github.com/mitchellh/cli v1.1.1 h1:J64v/xD7Clql+JVKSvkYojLOXu1ibnY9ZjGLwSt/89w=
|
||||
|
@ -933,7 +953,10 @@ github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5X
|
|||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc=
|
||||
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 h1:eNUVfm/RFLIi1G7flU5/ZRTHvd4kcVuzfRnL6OFlzCI=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/openlyinc/pointy v1.1.2 h1:LywVV2BWC5Sp5v7FoP4bUD+2Yn5k0VNeRbU5vq9jUMY=
|
||||
github.com/openlyinc/pointy v1.1.2/go.mod h1:w2Sytx+0FVuMKn37xpXIAyBNhFNBIJGR/v2m7ik1WtM=
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
|
||||
|
@ -1143,6 +1166,7 @@ github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oW
|
|||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vmware/govmomi v0.18.0 h1:f7QxSmP7meCtoAmiKZogvVbLInT+CZx6Px6K5rYsJZo=
|
||||
github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
|
||||
|
@ -1174,6 +1198,8 @@ go.mongodb.org/atlas v0.5.0 h1:WoeXdXSLCyquhSqSqTa0PwpjxWZbm70E1Gkbx+w3Sco=
|
|||
go.mongodb.org/atlas v0.5.0/go.mod h1:CIaBeO8GLHhtYLw7xSSXsw7N90Z4MFY87Oy9qcPyuEs=
|
||||
go.mongodb.org/mongo-driver v1.4.2 h1:WlnEglfTg/PfPq4WXs2Vkl/5ICC6hoG8+r+LraPmGk4=
|
||||
go.mongodb.org/mongo-driver v1.4.2/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc=
|
||||
go.mongodb.org/mongo-driver v1.4.6 h1:rh7GdYmDrb8AQSkF8yteAus8qYOgOASWDOv1BWqBXkU=
|
||||
go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc=
|
||||
go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A=
|
||||
go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
|
@ -1225,6 +1251,8 @@ golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -1299,6 +1327,8 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
|
|||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201002202402-0a1ea396d57c h1:dk0ukUIHmGHqASjP0iue2261isepFCC6XRCSd1nHgDw=
|
||||
golang.org/x/net v0.0.0-20201002202402-0a1ea396d57c/go.mod h1:iQL9McJNjoIa5mjH6nYTCTZXUN6RP+XW3eib7Ya3XcI=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1364,6 +1394,7 @@ golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1380,8 +1411,11 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1390,6 +1424,8 @@ golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5f
|
|||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -1432,6 +1468,7 @@ golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtn
|
|||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
filename = sys.argv[1]
|
||||
with open(filename) as f:
|
||||
content = f.readlines()
|
||||
for l in content:
|
||||
name = l.split()[0]
|
||||
print(name)
|
||||
os.system("go get " + name + "@latest")
|
|
@ -3,7 +3,7 @@ module github.com/Microsoft/go-winio
|
|||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sirupsen/logrus v1.4.1
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3
|
||||
)
|
||||
|
|
|
@ -2,8 +2,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
|||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
|
||||
|
@ -12,7 +12,5 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
|||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
|
@ -182,13 +182,14 @@ func (s pipeAddress) String() string {
|
|||
}
|
||||
|
||||
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
||||
func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
|
||||
func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) {
|
||||
for {
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return syscall.Handle(0), ctx.Err()
|
||||
default:
|
||||
h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
if err == nil {
|
||||
return h, nil
|
||||
}
|
||||
|
@ -197,7 +198,7 @@ func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
|
|||
}
|
||||
// Wait 10 msec and try again. This is a rather simplistic
|
||||
// view, as we always try each 10 milliseconds.
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -210,7 +211,7 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
|||
if timeout != nil {
|
||||
absTimeout = time.Now().Add(*timeout)
|
||||
} else {
|
||||
absTimeout = time.Now().Add(time.Second * 2)
|
||||
absTimeout = time.Now().Add(2 * time.Second)
|
||||
}
|
||||
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
|
||||
conn, err := DialPipeContext(ctx, path)
|
||||
|
@ -223,9 +224,15 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
|||
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
|
||||
// cancellation or timeout.
|
||||
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
|
||||
return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE)
|
||||
}
|
||||
|
||||
// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx`
|
||||
// cancellation or timeout.
|
||||
func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
|
||||
var err error
|
||||
var h syscall.Handle
|
||||
h, err = tryDialPipe(ctx, &path)
|
||||
h, err = tryDialPipe(ctx, &path, access)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -422,10 +429,10 @@ type PipeConfig struct {
|
|||
// when the pipe is in message mode.
|
||||
MessageMode bool
|
||||
|
||||
// InputBufferSize specifies the size the input buffer, in bytes.
|
||||
// InputBufferSize specifies the size of the input buffer, in bytes.
|
||||
InputBufferSize int32
|
||||
|
||||
// OutputBufferSize specifies the size the input buffer, in bytes.
|
||||
// OutputBufferSize specifies the size of the output buffer, in bytes.
|
||||
OutputBufferSize int32
|
||||
}
|
||||
|
||||
|
|
|
@ -24,4 +24,15 @@ const (
|
|||
// V19H1 (version 1903) corresponds to Windows Server 1903 (semi-annual
|
||||
// channel).
|
||||
V19H1 = 18362
|
||||
|
||||
// V19H2 (version 1909) corresponds to Windows Server 1909 (semi-annual
|
||||
// channel).
|
||||
V19H2 = 18363
|
||||
|
||||
// V20H1 (version 2004) corresponds to Windows Server 2004 (semi-annual
|
||||
// channel).
|
||||
V20H1 = 19041
|
||||
|
||||
// V20H2 corresponds to Windows Server 20H2 (semi-annual channel).
|
||||
V20H2 = 19042
|
||||
)
|
||||
|
|
|
@ -88,10 +88,6 @@ func (c *Client) NewRequest(operation *request.Operation, params interface{}, da
|
|||
// AddDebugHandlers injects debug logging handlers into the service to log request
|
||||
// debug information.
|
||||
func (c *Client) AddDebugHandlers() {
|
||||
if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
|
||||
return
|
||||
}
|
||||
|
||||
c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
|
||||
c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
|
||||
}
|
||||
|
|
|
@ -53,6 +53,10 @@ var LogHTTPRequestHandler = request.NamedHandler{
|
|||
}
|
||||
|
||||
func logRequest(r *request.Request) {
|
||||
if !r.Config.LogLevel.AtLeast(aws.LogDebug) {
|
||||
return
|
||||
}
|
||||
|
||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||
bodySeekable := aws.IsReaderSeekable(r.Body)
|
||||
|
||||
|
@ -120,6 +124,10 @@ var LogHTTPResponseHandler = request.NamedHandler{
|
|||
}
|
||||
|
||||
func logResponse(r *request.Request) {
|
||||
if !r.Config.LogLevel.AtLeast(aws.LogDebug) {
|
||||
return
|
||||
}
|
||||
|
||||
lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
|
||||
|
||||
if r.HTTPResponse == nil {
|
||||
|
|
|
@ -438,13 +438,6 @@ func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
|
|||
return c
|
||||
}
|
||||
|
||||
// MergeIn merges the passed in configs into the existing config object.
|
||||
func (c *Config) MergeIn(cfgs ...*Config) {
|
||||
for _, other := range cfgs {
|
||||
mergeInConfig(c, other)
|
||||
}
|
||||
}
|
||||
|
||||
// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag
|
||||
// when resolving the endpoint for a service
|
||||
func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config {
|
||||
|
@ -459,6 +452,27 @@ func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEn
|
|||
return c
|
||||
}
|
||||
|
||||
// WithLowerCaseHeaderMaps sets a config LowerCaseHeaderMaps value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithLowerCaseHeaderMaps(t bool) *Config {
|
||||
c.LowerCaseHeaderMaps = &t
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableRestProtocolURICleaning sets a config DisableRestProtocolURICleaning value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config {
|
||||
c.DisableRestProtocolURICleaning = &t
|
||||
return c
|
||||
}
|
||||
|
||||
// MergeIn merges the passed in configs into the existing config object.
|
||||
func (c *Config) MergeIn(cfgs ...*Config) {
|
||||
for _, other := range cfgs {
|
||||
mergeInConfig(c, other)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeInConfig(dst *Config, other *Config) {
|
||||
if other == nil {
|
||||
return
|
||||
|
@ -571,6 +585,10 @@ func mergeInConfig(dst *Config, other *Config) {
|
|||
if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint {
|
||||
dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint
|
||||
}
|
||||
|
||||
if other.LowerCaseHeaderMaps != nil {
|
||||
dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps
|
||||
}
|
||||
}
|
||||
|
||||
// Copy will return a shallow copy of the Config object. If any additional
|
||||
|
|
|
@ -50,7 +50,7 @@ package credentials
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
|
@ -173,7 +173,9 @@ type Expiry struct {
|
|||
// the expiration time given to ensure no requests are made with expired
|
||||
// tokens.
|
||||
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
|
||||
e.expiration = expiration
|
||||
// Passed in expirations should have the monotonic clock values stripped.
|
||||
// This ensures time comparisons will be based on wall-time.
|
||||
e.expiration = expiration.Round(0)
|
||||
if window > 0 {
|
||||
e.expiration = e.expiration.Add(-window)
|
||||
}
|
||||
|
@ -205,9 +207,10 @@ func (e *Expiry) ExpiresAt() time.Time {
|
|||
// first instance of the credentials Value. All calls to Get() after that
|
||||
// will return the cached credentials Value until IsExpired() returns true.
|
||||
type Credentials struct {
|
||||
creds atomic.Value
|
||||
sf singleflight.Group
|
||||
|
||||
m sync.RWMutex
|
||||
creds Value
|
||||
provider Provider
|
||||
}
|
||||
|
||||
|
@ -216,7 +219,6 @@ func NewCredentials(provider Provider) *Credentials {
|
|||
c := &Credentials{
|
||||
provider: provider,
|
||||
}
|
||||
c.creds.Store(Value{})
|
||||
return c
|
||||
}
|
||||
|
||||
|
@ -233,8 +235,17 @@ func NewCredentials(provider Provider) *Credentials {
|
|||
//
|
||||
// Passed in Context is equivalent to aws.Context, and context.Context.
|
||||
func (c *Credentials) GetWithContext(ctx Context) (Value, error) {
|
||||
if curCreds := c.creds.Load(); !c.isExpired(curCreds) {
|
||||
return curCreds.(Value), nil
|
||||
// Check if credentials are cached, and not expired.
|
||||
select {
|
||||
case curCreds, ok := <-c.asyncIsExpired():
|
||||
// ok will only be true, of the credentials were not expired. ok will
|
||||
// be false and have no value if the credentials are expired.
|
||||
if ok {
|
||||
return curCreds, nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return Value{}, awserr.New("RequestCanceled",
|
||||
"request context canceled", ctx.Err())
|
||||
}
|
||||
|
||||
// Cannot pass context down to the actual retrieve, because the first
|
||||
|
@ -252,18 +263,23 @@ func (c *Credentials) GetWithContext(ctx Context) (Value, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Credentials) singleRetrieve(ctx Context) (creds interface{}, err error) {
|
||||
if curCreds := c.creds.Load(); !c.isExpired(curCreds) {
|
||||
return curCreds.(Value), nil
|
||||
func (c *Credentials) singleRetrieve(ctx Context) (interface{}, error) {
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
|
||||
if curCreds := c.creds; !c.isExpiredLocked(curCreds) {
|
||||
return curCreds, nil
|
||||
}
|
||||
|
||||
var creds Value
|
||||
var err error
|
||||
if p, ok := c.provider.(ProviderWithContext); ok {
|
||||
creds, err = p.RetrieveWithContext(ctx)
|
||||
} else {
|
||||
creds, err = c.provider.Retrieve()
|
||||
}
|
||||
if err == nil {
|
||||
c.creds.Store(creds)
|
||||
c.creds = creds
|
||||
}
|
||||
|
||||
return creds, err
|
||||
|
@ -288,7 +304,10 @@ func (c *Credentials) Get() (Value, error) {
|
|||
// This will override the Provider's expired state, and force Credentials
|
||||
// to call the Provider's Retrieve().
|
||||
func (c *Credentials) Expire() {
|
||||
c.creds.Store(Value{})
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
|
||||
c.creds = Value{}
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials are no longer valid, and need
|
||||
|
@ -297,11 +316,32 @@ func (c *Credentials) Expire() {
|
|||
// If the Credentials were forced to be expired with Expire() this will
|
||||
// reflect that override.
|
||||
func (c *Credentials) IsExpired() bool {
|
||||
return c.isExpired(c.creds.Load())
|
||||
c.m.RLock()
|
||||
defer c.m.RUnlock()
|
||||
|
||||
return c.isExpiredLocked(c.creds)
|
||||
}
|
||||
|
||||
// isExpired helper method wrapping the definition of expired credentials.
|
||||
func (c *Credentials) isExpired(creds interface{}) bool {
|
||||
// asyncIsExpired returns a channel of credentials Value. If the channel is
|
||||
// closed the credentials are expired and credentials value are not empty.
|
||||
func (c *Credentials) asyncIsExpired() <-chan Value {
|
||||
ch := make(chan Value, 1)
|
||||
go func() {
|
||||
c.m.RLock()
|
||||
defer c.m.RUnlock()
|
||||
|
||||
if curCreds := c.creds; !c.isExpiredLocked(curCreds) {
|
||||
ch <- curCreds
|
||||
}
|
||||
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// isExpiredLocked helper method wrapping the definition of expired credentials.
|
||||
func (c *Credentials) isExpiredLocked(creds interface{}) bool {
|
||||
return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired()
|
||||
}
|
||||
|
||||
|
@ -309,13 +349,17 @@ func (c *Credentials) isExpired(creds interface{}) bool {
|
|||
// the underlying Provider, if it supports that interface. Otherwise, it returns
|
||||
// an error.
|
||||
func (c *Credentials) ExpiresAt() (time.Time, error) {
|
||||
c.m.RLock()
|
||||
defer c.m.RUnlock()
|
||||
|
||||
expirer, ok := c.provider.(Expirer)
|
||||
if !ok {
|
||||
return time.Time{}, awserr.New("ProviderNotExpirer",
|
||||
fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.Load().(Value).ProviderName),
|
||||
fmt.Sprintf("provider %s does not support ExpiresAt()",
|
||||
c.creds.ProviderName),
|
||||
nil)
|
||||
}
|
||||
if c.creds.Load().(Value) == (Value{}) {
|
||||
if c.creds == (Value{}) {
|
||||
// set expiration time to the distant past
|
||||
return time.Time{}, nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token.
|
||||
//
|
||||
// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider
|
||||
// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by
|
||||
// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in
|
||||
// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned.
|
||||
//
|
||||
// Loading AWS SSO credentials with the AWS shared configuration file
|
||||
//
|
||||
// You can use configure AWS SSO credentials from the AWS shared configuration file by
|
||||
// providing the specifying the required keys in the profile:
|
||||
//
|
||||
// sso_account_id
|
||||
// sso_region
|
||||
// sso_role_name
|
||||
// sso_start_url
|
||||
//
|
||||
// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target
|
||||
// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be
|
||||
// provided, or an error will be returned.
|
||||
//
|
||||
// [profile devsso]
|
||||
// sso_start_url = https://my-sso-portal.awsapps.com/start
|
||||
// sso_role_name = SSOReadOnlyRole
|
||||
// sso_region = us-east-1
|
||||
// sso_account_id = 123456789012
|
||||
//
|
||||
// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to
|
||||
// retrieve credentials. For example:
|
||||
//
|
||||
// sess, err := session.NewSessionWithOptions(session.Options{
|
||||
// SharedConfigState: session.SharedConfigEnable,
|
||||
// Profile: "devsso",
|
||||
// })
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// Programmatically loading AWS SSO credentials directly
|
||||
//
|
||||
// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information
|
||||
// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache.
|
||||
//
|
||||
// svc := sso.New(sess, &aws.Config{
|
||||
// Region: aws.String("us-west-2"), // Client Region must correspond to the AWS SSO user portal region
|
||||
// })
|
||||
//
|
||||
// provider := ssocreds.NewCredentialsWithClient(svc, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start")
|
||||
//
|
||||
// credentials, err := provider.Get()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// Additional Resources
|
||||
//
|
||||
// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
|
||||
//
|
||||
// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html
|
||||
package ssocreds
|
|
@ -0,0 +1,9 @@
|
|||
// +build !windows
|
||||
|
||||
package ssocreds
|
||||
|
||||
import "os"
|
||||
|
||||
func getHomeDirectory() string {
|
||||
return os.Getenv("HOME")
|
||||
}
|
7
vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go
generated
vendored
Normal file
7
vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
package ssocreds
|
||||
|
||||
import "os"
|
||||
|
||||
func getHomeDirectory() string {
|
||||
return os.Getenv("USERPROFILE")
|
||||
}
|
180
vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go
generated
vendored
Normal file
180
vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go
generated
vendored
Normal file
|
@ -0,0 +1,180 @@
|
|||
package ssocreds
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/service/sso"
|
||||
"github.com/aws/aws-sdk-go/service/sso/ssoiface"
|
||||
)
|
||||
|
||||
// ErrCodeSSOProviderInvalidToken is the code type that is returned if loaded token has expired or is otherwise invalid.
|
||||
// To refresh the SSO session run aws sso login with the corresponding profile.
|
||||
const ErrCodeSSOProviderInvalidToken = "SSOProviderInvalidToken"
|
||||
|
||||
const invalidTokenMessage = "the SSO session has expired or is invalid"
|
||||
|
||||
func init() {
|
||||
nowTime = time.Now
|
||||
defaultCacheLocation = defaultCacheLocationImpl
|
||||
}
|
||||
|
||||
var nowTime func() time.Time
|
||||
|
||||
// ProviderName is the name of the provider used to specify the source of credentials.
|
||||
const ProviderName = "SSOProvider"
|
||||
|
||||
var defaultCacheLocation func() string
|
||||
|
||||
func defaultCacheLocationImpl() string {
|
||||
return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache")
|
||||
}
|
||||
|
||||
// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token.
|
||||
type Provider struct {
|
||||
credentials.Expiry
|
||||
|
||||
// The Client which is configured for the AWS Region where the AWS SSO user portal is located.
|
||||
Client ssoiface.SSOAPI
|
||||
|
||||
// The AWS account that is assigned to the user.
|
||||
AccountID string
|
||||
|
||||
// The role name that is assigned to the user.
|
||||
RoleName string
|
||||
|
||||
// The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal.
|
||||
StartURL string
|
||||
}
|
||||
|
||||
// NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured
|
||||
// for the AWS Region where the AWS SSO user portal is located.
|
||||
func NewCredentials(configProvider client.ConfigProvider, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials {
|
||||
return NewCredentialsWithClient(sso.New(configProvider), accountID, roleName, startURL, optFns...)
|
||||
}
|
||||
|
||||
// NewCredentialsWithClient returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured
|
||||
// for the AWS Region where the AWS SSO user portal is located.
|
||||
func NewCredentialsWithClient(client ssoiface.SSOAPI, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials {
|
||||
p := &Provider{
|
||||
Client: client,
|
||||
AccountID: accountID,
|
||||
RoleName: roleName,
|
||||
StartURL: startURL,
|
||||
}
|
||||
|
||||
for _, fn := range optFns {
|
||||
fn(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal
|
||||
// by exchanging the accessToken present in ~/.aws/sso/cache.
|
||||
func (p *Provider) Retrieve() (credentials.Value, error) {
|
||||
return p.RetrieveWithContext(aws.BackgroundContext())
|
||||
}
|
||||
|
||||
// RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal
|
||||
// by exchanging the accessToken present in ~/.aws/sso/cache.
|
||||
func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
|
||||
tokenFile, err := loadTokenFile(p.StartURL)
|
||||
if err != nil {
|
||||
return credentials.Value{}, err
|
||||
}
|
||||
|
||||
output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{
|
||||
AccessToken: &tokenFile.AccessToken,
|
||||
AccountId: &p.AccountID,
|
||||
RoleName: &p.RoleName,
|
||||
})
|
||||
if err != nil {
|
||||
return credentials.Value{}, err
|
||||
}
|
||||
|
||||
expireTime := time.Unix(0, aws.Int64Value(output.RoleCredentials.Expiration)*int64(time.Millisecond)).UTC()
|
||||
p.SetExpiration(expireTime, 0)
|
||||
|
||||
return credentials.Value{
|
||||
AccessKeyID: aws.StringValue(output.RoleCredentials.AccessKeyId),
|
||||
SecretAccessKey: aws.StringValue(output.RoleCredentials.SecretAccessKey),
|
||||
SessionToken: aws.StringValue(output.RoleCredentials.SessionToken),
|
||||
ProviderName: ProviderName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getCacheFileName(url string) (string, error) {
|
||||
hash := sha1.New()
|
||||
_, err := hash.Write([]byte(url))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil
|
||||
}
|
||||
|
||||
type rfc3339 time.Time
|
||||
|
||||
func (r *rfc3339) UnmarshalJSON(bytes []byte) error {
|
||||
var value string
|
||||
|
||||
if err := json.Unmarshal(bytes, &value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parse, err := time.Parse(time.RFC3339, value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected RFC3339 timestamp: %v", err)
|
||||
}
|
||||
|
||||
*r = rfc3339(parse)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type token struct {
|
||||
AccessToken string `json:"accessToken"`
|
||||
ExpiresAt rfc3339 `json:"expiresAt"`
|
||||
Region string `json:"region,omitempty"`
|
||||
StartURL string `json:"startUrl,omitempty"`
|
||||
}
|
||||
|
||||
func (t token) Expired() bool {
|
||||
return nowTime().Round(0).After(time.Time(t.ExpiresAt))
|
||||
}
|
||||
|
||||
func loadTokenFile(startURL string) (t token, err error) {
|
||||
key, err := getCacheFileName(startURL)
|
||||
if err != nil {
|
||||
return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
|
||||
}
|
||||
|
||||
fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key))
|
||||
if err != nil {
|
||||
return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(fileBytes, &t); err != nil {
|
||||
return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
|
||||
}
|
||||
|
||||
if len(t.AccessToken) == 0 {
|
||||
return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil)
|
||||
}
|
||||
|
||||
if t.Expired() {
|
||||
return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
14
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
|
@ -95,7 +95,7 @@ import (
|
|||
// StdinTokenProvider will prompt on stderr and read from stdin for a string value.
|
||||
// An error is returned if reading from stdin fails.
|
||||
//
|
||||
// Use this function go read MFA tokens from stdin. The function makes no attempt
|
||||
// Use this function to read MFA tokens from stdin. The function makes no attempt
|
||||
// to make atomic prompts from stdin across multiple gorouties.
|
||||
//
|
||||
// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
|
||||
|
@ -244,9 +244,11 @@ type AssumeRoleProvider struct {
|
|||
MaxJitterFrac float64
|
||||
}
|
||||
|
||||
// NewCredentials returns a pointer to a new Credentials object wrapping the
|
||||
// NewCredentials returns a pointer to a new Credentials value wrapping the
|
||||
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
||||
// role will be named after a nanosecond timestamp of this operation.
|
||||
// role will be named after a nanosecond timestamp of this operation. The
|
||||
// Credentials value will attempt to refresh the credentials using the provider
|
||||
// when Credentials.Get is called, if the cached credentials are expiring.
|
||||
//
|
||||
// Takes a Config provider to create the STS client. The ConfigProvider is
|
||||
// satisfied by the session.Session type.
|
||||
|
@ -268,9 +270,11 @@ func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*As
|
|||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
|
||||
// NewCredentialsWithClient returns a pointer to a new Credentials value wrapping the
|
||||
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
||||
// role will be named after a nanosecond timestamp of this operation.
|
||||
// role will be named after a nanosecond timestamp of this operation. The
|
||||
// Credentials value will attempt to refresh the credentials using the provider
|
||||
// when Credentials.Get is called, if the cached credentials are expiring.
|
||||
//
|
||||
// Takes an AssumeRoler which can be satisfied by the STS client.
|
||||
//
|
||||
|
|
|
@ -87,6 +87,7 @@ func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) {
|
|||
// If the error code status is 401, we enable the token provider
|
||||
if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil &&
|
||||
e.StatusCode() == http.StatusUnauthorized {
|
||||
t.token.Store(ec2Token{})
|
||||
atomic.StoreUint32(&t.disabled, 0)
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/processcreds"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ssocreds"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
|
@ -100,6 +101,9 @@ func resolveCredsFromProfile(cfg *aws.Config,
|
|||
sharedCfg.Creds,
|
||||
)
|
||||
|
||||
case sharedCfg.hasSSOConfiguration():
|
||||
creds, err = resolveSSOCredentials(cfg, sharedCfg, handlers)
|
||||
|
||||
case len(sharedCfg.CredentialProcess) != 0:
|
||||
// Get credentials from CredentialProcess
|
||||
creds = processcreds.NewCredentials(sharedCfg.CredentialProcess)
|
||||
|
@ -151,6 +155,25 @@ func resolveCredsFromProfile(cfg *aws.Config,
|
|||
return creds, nil
|
||||
}
|
||||
|
||||
func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) (*credentials.Credentials, error) {
|
||||
if err := sharedCfg.validateSSOConfiguration(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfgCopy := cfg.Copy()
|
||||
cfgCopy.Region = &sharedCfg.SSORegion
|
||||
|
||||
return ssocreds.NewCredentials(
|
||||
&Session{
|
||||
Config: cfgCopy,
|
||||
Handlers: handlers.Copy(),
|
||||
},
|
||||
sharedCfg.SSOAccountID,
|
||||
sharedCfg.SSORoleName,
|
||||
sharedCfg.SSOStartURL,
|
||||
), nil
|
||||
}
|
||||
|
||||
// valid credential source values
|
||||
const (
|
||||
credSourceEc2Metadata = "Ec2InstanceMetadata"
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
// +build go1.13
|
||||
|
||||
package session
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Transport that should be used when a custom CA bundle is specified with the
|
||||
// SDK.
|
||||
func getCustomTransport() *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}).DialContext,
|
||||
ForceAttemptHTTP2: true,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// +build go1.7
|
||||
// +build !go1.13,go1.7
|
||||
|
||||
package session
|
||||
|
||||
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
// Transport that should be used when a custom CA bundle is specified with the
|
||||
// SDK.
|
||||
func getCABundleTransport() *http.Transport {
|
||||
func getCustomTransport() *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
// Transport that should be used when a custom CA bundle is specified with the
|
||||
// SDK.
|
||||
func getCABundleTransport() *http.Transport {
|
||||
func getCustomTransport() *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
// Transport that should be used when a custom CA bundle is specified with the
|
||||
// SDK.
|
||||
func getCABundleTransport() *http.Transport {
|
||||
func getCustomTransport() *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
|
@ -208,6 +208,8 @@ env values as well.
|
|||
|
||||
AWS_SDK_LOAD_CONFIG=1
|
||||
|
||||
Custom Shared Config and Credential Files
|
||||
|
||||
Shared credentials file path can be set to instruct the SDK to use an alternative
|
||||
file for the shared credentials. If not set the file will be loaded from
|
||||
$HOME/.aws/credentials on Linux/Unix based systems, and
|
||||
|
@ -222,6 +224,8 @@ $HOME/.aws/config on Linux/Unix based systems, and
|
|||
|
||||
AWS_CONFIG_FILE=$HOME/my_shared_config
|
||||
|
||||
Custom CA Bundle
|
||||
|
||||
Path to a custom Credentials Authority (CA) bundle PEM file that the SDK
|
||||
will use instead of the default system's root CA bundle. Use this only
|
||||
if you want to replace the CA bundle the SDK uses for TLS requests.
|
||||
|
@ -242,6 +246,29 @@ Setting a custom HTTPClient in the aws.Config options will override this setting
|
|||
To use this option and custom HTTP client, the HTTP client needs to be provided
|
||||
when creating the session. Not the service client.
|
||||
|
||||
Custom Client TLS Certificate
|
||||
|
||||
The SDK supports the environment and session option being configured with
|
||||
Client TLS certificates that are sent as a part of the client's TLS handshake
|
||||
for client authentication. If used, both Cert and Key values are required. If
|
||||
one is missing, or either fail to load the contents of the file an error will
|
||||
be returned.
|
||||
|
||||
HTTP Client's Transport concrete implementation must be a http.Transport
|
||||
or creating the session will fail.
|
||||
|
||||
AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key
|
||||
AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert
|
||||
|
||||
This can also be configured via the session.Options ClientTLSCert and ClientTLSKey.
|
||||
|
||||
sess, err := session.NewSessionWithOptions(session.Options{
|
||||
ClientTLSCert: myCertFile,
|
||||
ClientTLSKey: myKeyFile,
|
||||
})
|
||||
|
||||
Custom EC2 IMDS Endpoint
|
||||
|
||||
The endpoint of the EC2 IMDS client can be configured via the environment
|
||||
variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a
|
||||
Session. See Options.EC2IMDSEndpoint for more details.
|
||||
|
|
|
@ -101,6 +101,18 @@ type envConfig struct {
|
|||
// AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
|
||||
CustomCABundle string
|
||||
|
||||
// Sets the TLC client certificate that should be used by the SDK's HTTP transport
|
||||
// when making requests. The certificate must be paired with a TLS client key file.
|
||||
//
|
||||
// AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert
|
||||
ClientTLSCert string
|
||||
|
||||
// Sets the TLC client key that should be used by the SDK's HTTP transport
|
||||
// when making requests. The key must be paired with a TLS client certificate file.
|
||||
//
|
||||
// AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key
|
||||
ClientTLSKey string
|
||||
|
||||
csmEnabled string
|
||||
CSMEnabled *bool
|
||||
CSMPort string
|
||||
|
@ -219,6 +231,15 @@ var (
|
|||
ec2IMDSEndpointEnvKey = []string{
|
||||
"AWS_EC2_METADATA_SERVICE_ENDPOINT",
|
||||
}
|
||||
useCABundleKey = []string{
|
||||
"AWS_CA_BUNDLE",
|
||||
}
|
||||
useClientTLSCert = []string{
|
||||
"AWS_SDK_GO_CLIENT_TLS_CERT",
|
||||
}
|
||||
useClientTLSKey = []string{
|
||||
"AWS_SDK_GO_CLIENT_TLS_KEY",
|
||||
}
|
||||
)
|
||||
|
||||
// loadEnvConfig retrieves the SDK's environment configuration.
|
||||
|
@ -302,7 +323,9 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) {
|
|||
cfg.SharedConfigFile = defaults.SharedConfigFilename()
|
||||
}
|
||||
|
||||
cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
|
||||
setFromEnvVal(&cfg.CustomCABundle, useCABundleKey)
|
||||
setFromEnvVal(&cfg.ClientTLSCert, useClientTLSCert)
|
||||
setFromEnvVal(&cfg.ClientTLSKey, useClientTLSKey)
|
||||
|
||||
var err error
|
||||
// STS Regional Endpoint variable
|
||||
|
|
|
@ -25,11 +25,18 @@ const (
|
|||
// ErrCodeSharedConfig represents an error that occurs in the shared
|
||||
// configuration logic
|
||||
ErrCodeSharedConfig = "SharedConfigErr"
|
||||
|
||||
// ErrCodeLoadCustomCABundle error code for unable to load custom CA bundle.
|
||||
ErrCodeLoadCustomCABundle = "LoadCustomCABundleError"
|
||||
|
||||
// ErrCodeLoadClientTLSCert error code for unable to load client TLS
|
||||
// certificate or key
|
||||
ErrCodeLoadClientTLSCert = "LoadClientTLSCertError"
|
||||
)
|
||||
|
||||
// ErrSharedConfigSourceCollision will be returned if a section contains both
|
||||
// source_profile and credential_source
|
||||
var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil)
|
||||
var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil)
|
||||
|
||||
// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment
|
||||
// variables are empty and Environment was set as the credential source
|
||||
|
@ -229,17 +236,46 @@ type Options struct {
|
|||
// the SDK will use instead of the default system's root CA bundle. Use this
|
||||
// only if you want to replace the CA bundle the SDK uses for TLS requests.
|
||||
//
|
||||
// Enabling this option will attempt to merge the Transport into the SDK's HTTP
|
||||
// client. If the client's Transport is not a http.Transport an error will be
|
||||
// returned. If the Transport's TLS config is set this option will cause the SDK
|
||||
// HTTP Client's Transport concrete implementation must be a http.Transport
|
||||
// or creating the session will fail.
|
||||
//
|
||||
// If the Transport's TLS config is set this option will cause the SDK
|
||||
// to overwrite the Transport's TLS config's RootCAs value. If the CA
|
||||
// bundle reader contains multiple certificates all of them will be loaded.
|
||||
//
|
||||
// The Session option CustomCABundle is also available when creating sessions
|
||||
// to also enable this feature. CustomCABundle session option field has priority
|
||||
// over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
|
||||
// Can also be specified via the environment variable:
|
||||
//
|
||||
// AWS_CA_BUNDLE=$HOME/ca_bundle
|
||||
//
|
||||
// Can also be specified via the shared config field:
|
||||
//
|
||||
// ca_bundle = $HOME/ca_bundle
|
||||
CustomCABundle io.Reader
|
||||
|
||||
// Reader for the TLC client certificate that should be used by the SDK's
|
||||
// HTTP transport when making requests. The certificate must be paired with
|
||||
// a TLS client key file. Will be ignored if both are not provided.
|
||||
//
|
||||
// HTTP Client's Transport concrete implementation must be a http.Transport
|
||||
// or creating the session will fail.
|
||||
//
|
||||
// Can also be specified via the environment variable:
|
||||
//
|
||||
// AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert
|
||||
ClientTLSCert io.Reader
|
||||
|
||||
// Reader for the TLC client key that should be used by the SDK's HTTP
|
||||
// transport when making requests. The key must be paired with a TLS client
|
||||
// certificate file. Will be ignored if both are not provided.
|
||||
//
|
||||
// HTTP Client's Transport concrete implementation must be a http.Transport
|
||||
// or creating the session will fail.
|
||||
//
|
||||
// Can also be specified via the environment variable:
|
||||
//
|
||||
// AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key
|
||||
ClientTLSKey io.Reader
|
||||
|
||||
// The handlers that the session and all API clients will be created with.
|
||||
// This must be a complete set of handlers. Use the defaults.Handlers()
|
||||
// function to initialize this value before changing the handlers to be
|
||||
|
@ -319,17 +355,6 @@ func NewSessionWithOptions(opts Options) (*Session, error) {
|
|||
envCfg.EnableSharedConfig = true
|
||||
}
|
||||
|
||||
// Only use AWS_CA_BUNDLE if session option is not provided.
|
||||
if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
|
||||
f, err := os.Open(envCfg.CustomCABundle)
|
||||
if err != nil {
|
||||
return nil, awserr.New("LoadCustomCABundleError",
|
||||
"failed to open custom CA bundle PEM file", err)
|
||||
}
|
||||
defer f.Close()
|
||||
opts.CustomCABundle = f
|
||||
}
|
||||
|
||||
return newSession(opts, envCfg, &opts.Config)
|
||||
}
|
||||
|
||||
|
@ -460,6 +485,10 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err := setTLSOptions(&opts, cfg, envCfg, sharedCfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := &Session{
|
||||
Config: cfg,
|
||||
Handlers: handlers,
|
||||
|
@ -479,13 +508,6 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
|
|||
}
|
||||
}
|
||||
|
||||
// Setup HTTP client with custom cert bundle if enabled
|
||||
if opts.CustomCABundle != nil {
|
||||
if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
|
@ -529,22 +551,83 @@ func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) {
|
|||
return csmConfig{}, nil
|
||||
}
|
||||
|
||||
func loadCustomCABundle(s *Session, bundle io.Reader) error {
|
||||
func setTLSOptions(opts *Options, cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error {
|
||||
// CA Bundle can be specified in both environment variable shared config file.
|
||||
var caBundleFilename = envCfg.CustomCABundle
|
||||
if len(caBundleFilename) == 0 {
|
||||
caBundleFilename = sharedCfg.CustomCABundle
|
||||
}
|
||||
|
||||
// Only use environment value if session option is not provided.
|
||||
customTLSOptions := map[string]struct {
|
||||
filename string
|
||||
field *io.Reader
|
||||
errCode string
|
||||
}{
|
||||
"custom CA bundle PEM": {filename: caBundleFilename, field: &opts.CustomCABundle, errCode: ErrCodeLoadCustomCABundle},
|
||||
"custom client TLS cert": {filename: envCfg.ClientTLSCert, field: &opts.ClientTLSCert, errCode: ErrCodeLoadClientTLSCert},
|
||||
"custom client TLS key": {filename: envCfg.ClientTLSKey, field: &opts.ClientTLSKey, errCode: ErrCodeLoadClientTLSCert},
|
||||
}
|
||||
for name, v := range customTLSOptions {
|
||||
if len(v.filename) != 0 && *v.field == nil {
|
||||
f, err := os.Open(v.filename)
|
||||
if err != nil {
|
||||
return awserr.New(v.errCode, fmt.Sprintf("failed to open %s file", name), err)
|
||||
}
|
||||
defer f.Close()
|
||||
*v.field = f
|
||||
}
|
||||
}
|
||||
|
||||
// Setup HTTP client with custom cert bundle if enabled
|
||||
if opts.CustomCABundle != nil {
|
||||
if err := loadCustomCABundle(cfg.HTTPClient, opts.CustomCABundle); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Setup HTTP client TLS certificate and key for client TLS authentication.
|
||||
if opts.ClientTLSCert != nil && opts.ClientTLSKey != nil {
|
||||
if err := loadClientTLSCert(cfg.HTTPClient, opts.ClientTLSCert, opts.ClientTLSKey); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if opts.ClientTLSCert == nil && opts.ClientTLSKey == nil {
|
||||
// Do nothing if neither values are available.
|
||||
|
||||
} else {
|
||||
return awserr.New(ErrCodeLoadClientTLSCert,
|
||||
fmt.Sprintf("client TLS cert(%t) and key(%t) must both be provided",
|
||||
opts.ClientTLSCert != nil, opts.ClientTLSKey != nil), nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHTTPTransport(client *http.Client) (*http.Transport, error) {
|
||||
var t *http.Transport
|
||||
switch v := s.Config.HTTPClient.Transport.(type) {
|
||||
switch v := client.Transport.(type) {
|
||||
case *http.Transport:
|
||||
t = v
|
||||
default:
|
||||
if s.Config.HTTPClient.Transport != nil {
|
||||
return awserr.New("LoadCustomCABundleError",
|
||||
"unable to load custom CA bundle, HTTPClient's transport unsupported type", nil)
|
||||
if client.Transport != nil {
|
||||
return nil, fmt.Errorf("unsupported transport, %T", client.Transport)
|
||||
}
|
||||
}
|
||||
if t == nil {
|
||||
// Nil transport implies `http.DefaultTransport` should be used. Since
|
||||
// the SDK cannot modify, nor copy the `DefaultTransport` specifying
|
||||
// the values the next closest behavior.
|
||||
t = getCABundleTransport()
|
||||
t = getCustomTransport()
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func loadCustomCABundle(client *http.Client, bundle io.Reader) error {
|
||||
t, err := getHTTPTransport(client)
|
||||
if err != nil {
|
||||
return awserr.New(ErrCodeLoadCustomCABundle,
|
||||
"unable to load custom CA bundle, HTTPClient's transport unsupported type", err)
|
||||
}
|
||||
|
||||
p, err := loadCertPool(bundle)
|
||||
|
@ -556,7 +639,7 @@ func loadCustomCABundle(s *Session, bundle io.Reader) error {
|
|||
}
|
||||
t.TLSClientConfig.RootCAs = p
|
||||
|
||||
s.Config.HTTPClient.Transport = t
|
||||
client.Transport = t
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -564,19 +647,57 @@ func loadCustomCABundle(s *Session, bundle io.Reader) error {
|
|||
func loadCertPool(r io.Reader) (*x509.CertPool, error) {
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, awserr.New("LoadCustomCABundleError",
|
||||
return nil, awserr.New(ErrCodeLoadCustomCABundle,
|
||||
"failed to read custom CA bundle PEM file", err)
|
||||
}
|
||||
|
||||
p := x509.NewCertPool()
|
||||
if !p.AppendCertsFromPEM(b) {
|
||||
return nil, awserr.New("LoadCustomCABundleError",
|
||||
return nil, awserr.New(ErrCodeLoadCustomCABundle,
|
||||
"failed to load custom CA bundle PEM file", err)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func loadClientTLSCert(client *http.Client, certFile, keyFile io.Reader) error {
|
||||
t, err := getHTTPTransport(client)
|
||||
if err != nil {
|
||||
return awserr.New(ErrCodeLoadClientTLSCert,
|
||||
"unable to get usable HTTP transport from client", err)
|
||||
}
|
||||
|
||||
cert, err := ioutil.ReadAll(certFile)
|
||||
if err != nil {
|
||||
return awserr.New(ErrCodeLoadClientTLSCert,
|
||||
"unable to get read client TLS cert file", err)
|
||||
}
|
||||
|
||||
key, err := ioutil.ReadAll(keyFile)
|
||||
if err != nil {
|
||||
return awserr.New(ErrCodeLoadClientTLSCert,
|
||||
"unable to get read client TLS key file", err)
|
||||
}
|
||||
|
||||
clientCert, err := tls.X509KeyPair(cert, key)
|
||||
if err != nil {
|
||||
return awserr.New(ErrCodeLoadClientTLSCert,
|
||||
"unable to load x509 key pair from client cert", err)
|
||||
}
|
||||
|
||||
tlsCfg := t.TLSClientConfig
|
||||
if tlsCfg == nil {
|
||||
tlsCfg = &tls.Config{}
|
||||
}
|
||||
|
||||
tlsCfg.Certificates = append(tlsCfg.Certificates, clientCert)
|
||||
|
||||
t.TLSClientConfig = tlsCfg
|
||||
client.Transport = t
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mergeConfigSrcs(cfg, userCfg *aws.Config,
|
||||
envCfg envConfig, sharedCfg sharedConfig,
|
||||
handlers request.Handlers,
|
||||
|
|
|
@ -2,6 +2,7 @@ package session
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
|
@ -25,6 +26,12 @@ const (
|
|||
roleSessionNameKey = `role_session_name` // optional
|
||||
roleDurationSecondsKey = "duration_seconds" // optional
|
||||
|
||||
// AWS Single Sign-On (AWS SSO) group
|
||||
ssoAccountIDKey = "sso_account_id"
|
||||
ssoRegionKey = "sso_region"
|
||||
ssoRoleNameKey = "sso_role_name"
|
||||
ssoStartURL = "sso_start_url"
|
||||
|
||||
// CSM options
|
||||
csmEnabledKey = `csm_enabled`
|
||||
csmHostKey = `csm_host`
|
||||
|
@ -34,6 +41,9 @@ const (
|
|||
// Additional Config fields
|
||||
regionKey = `region`
|
||||
|
||||
// custom CA Bundle filename
|
||||
customCABundleKey = `ca_bundle`
|
||||
|
||||
// endpoint discovery group
|
||||
enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
|
||||
|
||||
|
@ -60,6 +70,8 @@ const (
|
|||
|
||||
// sharedConfig represents the configuration fields of the SDK config files.
|
||||
type sharedConfig struct {
|
||||
Profile string
|
||||
|
||||
// Credentials values from the config file. Both aws_access_key_id and
|
||||
// aws_secret_access_key must be provided together in the same file to be
|
||||
// considered valid. The values will be ignored if not a complete group.
|
||||
|
@ -75,6 +87,11 @@ type sharedConfig struct {
|
|||
CredentialProcess string
|
||||
WebIdentityTokenFile string
|
||||
|
||||
SSOAccountID string
|
||||
SSORegion string
|
||||
SSORoleName string
|
||||
SSOStartURL string
|
||||
|
||||
RoleARN string
|
||||
RoleSessionName string
|
||||
ExternalID string
|
||||
|
@ -90,6 +107,15 @@ type sharedConfig struct {
|
|||
// region
|
||||
Region string
|
||||
|
||||
// CustomCABundle is the file path to a PEM file the SDK will read and
|
||||
// use to configure the HTTP transport with additional CA certs that are
|
||||
// not present in the platforms default CA store.
|
||||
//
|
||||
// This value will be ignored if the file does not exist.
|
||||
//
|
||||
// ca_bundle
|
||||
CustomCABundle string
|
||||
|
||||
// EnableEndpointDiscovery can be enabled in the shared config by setting
|
||||
// endpoint_discovery_enabled to true
|
||||
//
|
||||
|
@ -177,6 +203,8 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
|
|||
}
|
||||
|
||||
func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error {
|
||||
cfg.Profile = profile
|
||||
|
||||
// Trim files from the list that don't exist.
|
||||
var skippedFiles int
|
||||
var profileNotFoundErr error
|
||||
|
@ -205,9 +233,9 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s
|
|||
cfg.clearAssumeRoleOptions()
|
||||
} else {
|
||||
// First time a profile has been seen, It must either be a assume role
|
||||
// or credentials. Assert if the credential type requires a role ARN,
|
||||
// the ARN is also set.
|
||||
if err := cfg.validateCredentialsRequireARN(profile); err != nil {
|
||||
// credentials, or SSO. Assert if the credential type requires a role ARN,
|
||||
// the ARN is also set, or validate that the SSO configuration is complete.
|
||||
if err := cfg.validateCredentialsConfig(profile); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -276,6 +304,7 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
|
|||
updateString(&cfg.SourceProfileName, section, sourceProfileKey)
|
||||
updateString(&cfg.CredentialSource, section, credentialSourceKey)
|
||||
updateString(&cfg.Region, section, regionKey)
|
||||
updateString(&cfg.CustomCABundle, section, customCABundleKey)
|
||||
|
||||
if section.Has(roleDurationSecondsKey) {
|
||||
d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second
|
||||
|
@ -299,6 +328,12 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
|
|||
}
|
||||
cfg.S3UsEast1RegionalEndpoint = sre
|
||||
}
|
||||
|
||||
// AWS Single Sign-On (AWS SSO)
|
||||
updateString(&cfg.SSOAccountID, section, ssoAccountIDKey)
|
||||
updateString(&cfg.SSORegion, section, ssoRegionKey)
|
||||
updateString(&cfg.SSORoleName, section, ssoRoleNameKey)
|
||||
updateString(&cfg.SSOStartURL, section, ssoStartURL)
|
||||
}
|
||||
|
||||
updateString(&cfg.CredentialProcess, section, credentialProcessKey)
|
||||
|
@ -329,6 +364,14 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
|
|||
return nil
|
||||
}
|
||||
|
||||
func (cfg *sharedConfig) validateCredentialsConfig(profile string) error {
|
||||
if err := cfg.validateCredentialsRequireARN(profile); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error {
|
||||
var credSource string
|
||||
|
||||
|
@ -358,6 +401,7 @@ func (cfg *sharedConfig) validateCredentialType() error {
|
|||
len(cfg.CredentialSource) != 0,
|
||||
len(cfg.CredentialProcess) != 0,
|
||||
len(cfg.WebIdentityTokenFile) != 0,
|
||||
cfg.hasSSOConfiguration(),
|
||||
) {
|
||||
return ErrSharedConfigSourceCollision
|
||||
}
|
||||
|
@ -365,12 +409,43 @@ func (cfg *sharedConfig) validateCredentialType() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (cfg *sharedConfig) validateSSOConfiguration() error {
|
||||
if !cfg.hasSSOConfiguration() {
|
||||
return nil
|
||||
}
|
||||
|
||||
var missing []string
|
||||
if len(cfg.SSOAccountID) == 0 {
|
||||
missing = append(missing, ssoAccountIDKey)
|
||||
}
|
||||
|
||||
if len(cfg.SSORegion) == 0 {
|
||||
missing = append(missing, ssoRegionKey)
|
||||
}
|
||||
|
||||
if len(cfg.SSORoleName) == 0 {
|
||||
missing = append(missing, ssoRoleNameKey)
|
||||
}
|
||||
|
||||
if len(cfg.SSOStartURL) == 0 {
|
||||
missing = append(missing, ssoStartURL)
|
||||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
|
||||
cfg.Profile, strings.Join(missing, ", "))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *sharedConfig) hasCredentials() bool {
|
||||
switch {
|
||||
case len(cfg.SourceProfileName) != 0:
|
||||
case len(cfg.CredentialSource) != 0:
|
||||
case len(cfg.CredentialProcess) != 0:
|
||||
case len(cfg.WebIdentityTokenFile) != 0:
|
||||
case cfg.hasSSOConfiguration():
|
||||
case cfg.Creds.HasKeys():
|
||||
default:
|
||||
return false
|
||||
|
@ -394,6 +469,18 @@ func (cfg *sharedConfig) clearAssumeRoleOptions() {
|
|||
cfg.SourceProfileName = ""
|
||||
}
|
||||
|
||||
func (cfg *sharedConfig) hasSSOConfiguration() bool {
|
||||
switch {
|
||||
case len(cfg.SSOAccountID) != 0:
|
||||
case len(cfg.SSORegion) != 0:
|
||||
case len(cfg.SSORoleName) != 0:
|
||||
case len(cfg.SSOStartURL) != 0:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func oneOrNone(bs ...bool) bool {
|
||||
var count int
|
||||
|
||||
|
|
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.34.28"
|
||||
const SDKVersion = "1.37.19"
|
||||
|
|
|
@ -66,6 +66,7 @@ var parseTable = map[ASTKind]map[TokenType]int{
|
|||
TokenLit: ValueState,
|
||||
TokenWS: SkipTokenState,
|
||||
TokenNL: SkipState,
|
||||
TokenNone: SkipState,
|
||||
},
|
||||
ASTKindStatement: map[TokenType]int{
|
||||
TokenLit: SectionState,
|
||||
|
|
|
@ -19,23 +19,28 @@ func (a AccessPointARN) GetARN() arn.ARN {
|
|||
|
||||
// ParseAccessPointResource attempts to parse the ARN's resource as an
|
||||
// AccessPoint resource.
|
||||
//
|
||||
// Supported Access point resource format:
|
||||
// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName}
|
||||
// - example: arn.aws.s3.us-west-2.012345678901:accesspoint/myaccesspoint
|
||||
//
|
||||
func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) {
|
||||
if len(a.Region) == 0 {
|
||||
return AccessPointARN{}, InvalidARNError{a, "region not set"}
|
||||
return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "region not set"}
|
||||
}
|
||||
if len(a.AccountID) == 0 {
|
||||
return AccessPointARN{}, InvalidARNError{a, "account-id not set"}
|
||||
return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"}
|
||||
}
|
||||
if len(resParts) == 0 {
|
||||
return AccessPointARN{}, InvalidARNError{a, "resource-id not set"}
|
||||
return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"}
|
||||
}
|
||||
if len(resParts) > 1 {
|
||||
return AccessPointARN{}, InvalidARNError{a, "sub resource not supported"}
|
||||
return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"}
|
||||
}
|
||||
|
||||
resID := resParts[0]
|
||||
if len(strings.TrimSpace(resID)) == 0 {
|
||||
return AccessPointARN{}, InvalidARNError{a, "resource-id not set"}
|
||||
return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"}
|
||||
}
|
||||
|
||||
return AccessPointARN{
|
|
@ -1,6 +1,7 @@
|
|||
package arn
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/arn"
|
||||
|
@ -25,13 +26,14 @@ func ParseResource(s string, resParser ResourceParser) (resARN Resource, err err
|
|||
}
|
||||
|
||||
if len(a.Partition) == 0 {
|
||||
return nil, InvalidARNError{a, "partition not set"}
|
||||
return nil, InvalidARNError{ARN: a, Reason: "partition not set"}
|
||||
}
|
||||
if a.Service != "s3" {
|
||||
return nil, InvalidARNError{a, "service is not S3"}
|
||||
|
||||
if a.Service != "s3" && a.Service != "s3-outposts" {
|
||||
return nil, InvalidARNError{ARN: a, Reason: "service is not supported"}
|
||||
}
|
||||
if len(a.Resource) == 0 {
|
||||
return nil, InvalidARNError{a, "resource not set"}
|
||||
return nil, InvalidARNError{ARN: a, Reason: "resource not set"}
|
||||
}
|
||||
|
||||
return resParser(a)
|
||||
|
@ -66,6 +68,7 @@ type InvalidARNError struct {
|
|||
Reason string
|
||||
}
|
||||
|
||||
// Error returns a string denoting the occurred InvalidARNError
|
||||
func (e InvalidARNError) Error() string {
|
||||
return "invalid Amazon S3 ARN, " + e.Reason + ", " + e.ARN.String()
|
||||
return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String())
|
||||
}
|
126
vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go
generated
vendored
Normal file
126
vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go
generated
vendored
Normal file
|
@ -0,0 +1,126 @@
|
|||
package arn
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/arn"
|
||||
)
|
||||
|
||||
// OutpostARN interface that should be satisfied by outpost ARNs
|
||||
type OutpostARN interface {
|
||||
Resource
|
||||
GetOutpostID() string
|
||||
}
|
||||
|
||||
// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format
|
||||
// and return a specific OutpostARN type
|
||||
//
|
||||
// Currently supported outpost ARN formats:
|
||||
// * Outpost AccessPoint ARN format:
|
||||
// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName}
|
||||
// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint
|
||||
//
|
||||
// * Outpost Bucket ARN format:
|
||||
// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName}
|
||||
// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket
|
||||
//
|
||||
// Other outpost ARN formats may be supported and added in the future.
|
||||
//
|
||||
func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) {
|
||||
if len(a.Region) == 0 {
|
||||
return nil, InvalidARNError{ARN: a, Reason: "region not set"}
|
||||
}
|
||||
|
||||
if len(a.AccountID) == 0 {
|
||||
return nil, InvalidARNError{ARN: a, Reason: "account-id not set"}
|
||||
}
|
||||
|
||||
// verify if outpost id is present and valid
|
||||
if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 {
|
||||
return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"}
|
||||
}
|
||||
|
||||
// verify possible resource type exists
|
||||
if len(resParts) < 3 {
|
||||
return nil, InvalidARNError{
|
||||
ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present",
|
||||
}
|
||||
}
|
||||
|
||||
// Since we know this is a OutpostARN fetch outpostID
|
||||
outpostID := strings.TrimSpace(resParts[0])
|
||||
|
||||
switch resParts[1] {
|
||||
case "accesspoint":
|
||||
accesspointARN, err := ParseAccessPointResource(a, resParts[2:])
|
||||
if err != nil {
|
||||
return OutpostAccessPointARN{}, err
|
||||
}
|
||||
return OutpostAccessPointARN{
|
||||
AccessPointARN: accesspointARN,
|
||||
OutpostID: outpostID,
|
||||
}, nil
|
||||
|
||||
case "bucket":
|
||||
bucketName, err := parseBucketResource(a, resParts[2:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return OutpostBucketARN{
|
||||
ARN: a,
|
||||
BucketName: bucketName,
|
||||
OutpostID: outpostID,
|
||||
}, nil
|
||||
|
||||
default:
|
||||
return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"}
|
||||
}
|
||||
}
|
||||
|
||||
// OutpostAccessPointARN represents outpost access point ARN.
|
||||
type OutpostAccessPointARN struct {
|
||||
AccessPointARN
|
||||
OutpostID string
|
||||
}
|
||||
|
||||
// GetOutpostID returns the outpost id of outpost access point arn
|
||||
func (o OutpostAccessPointARN) GetOutpostID() string {
|
||||
return o.OutpostID
|
||||
}
|
||||
|
||||
// OutpostBucketARN represents the outpost bucket ARN.
|
||||
type OutpostBucketARN struct {
|
||||
arn.ARN
|
||||
BucketName string
|
||||
OutpostID string
|
||||
}
|
||||
|
||||
// GetOutpostID returns the outpost id of outpost bucket arn
|
||||
func (o OutpostBucketARN) GetOutpostID() string {
|
||||
return o.OutpostID
|
||||
}
|
||||
|
||||
// GetARN retrives the base ARN from outpost bucket ARN resource
|
||||
func (o OutpostBucketARN) GetARN() arn.ARN {
|
||||
return o.ARN
|
||||
}
|
||||
|
||||
// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the
|
||||
// bucket resource id.
|
||||
//
|
||||
// parseBucketResource only parses the bucket resource id.
|
||||
//
|
||||
func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) {
|
||||
if len(resParts) == 0 {
|
||||
return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"}
|
||||
}
|
||||
if len(resParts) > 1 {
|
||||
return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"}
|
||||
}
|
||||
|
||||
bucketName = strings.TrimSpace(resParts[0])
|
||||
if len(bucketName) == 0 {
|
||||
return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"}
|
||||
}
|
||||
return bucketName, err
|
||||
}
|
189
vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go
generated
vendored
Normal file
189
vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go
generated
vendored
Normal file
|
@ -0,0 +1,189 @@
|
|||
package s3shared
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/internal/s3shared/arn"
|
||||
)
|
||||
|
||||
const (
|
||||
invalidARNErrorErrCode = "InvalidARNError"
|
||||
configurationErrorErrCode = "ConfigurationError"
|
||||
)
|
||||
|
||||
// InvalidARNError denotes the error for Invalid ARN
|
||||
type InvalidARNError struct {
|
||||
message string
|
||||
resource arn.Resource
|
||||
origErr error
|
||||
}
|
||||
|
||||
// Error returns the InvalidARNError
|
||||
func (e InvalidARNError) Error() string {
|
||||
var extra string
|
||||
if e.resource != nil {
|
||||
extra = "ARN: " + e.resource.String()
|
||||
}
|
||||
return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr)
|
||||
}
|
||||
|
||||
// Code returns the invalid ARN error code
|
||||
func (e InvalidARNError) Code() string {
|
||||
return invalidARNErrorErrCode
|
||||
}
|
||||
|
||||
// Message returns the message for Invalid ARN error
|
||||
func (e InvalidARNError) Message() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// OrigErr is the original error wrapped by Invalid ARN Error
|
||||
func (e InvalidARNError) OrigErr() error {
|
||||
return e.origErr
|
||||
}
|
||||
|
||||
// NewInvalidARNError denotes invalid arn error
|
||||
func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError {
|
||||
return InvalidARNError{
|
||||
message: "invalid ARN",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
// NewInvalidARNWithCustomEndpointError ARN not supported for custom clients endpoints
|
||||
func NewInvalidARNWithCustomEndpointError(resource arn.Resource, err error) InvalidARNError {
|
||||
return InvalidARNError{
|
||||
message: "resource ARN not supported with custom client endpoints",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition
|
||||
func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError {
|
||||
return InvalidARNError{
|
||||
message: "resource ARN not supported for the target ARN partition",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
// NewInvalidARNWithFIPSError ARN not supported for FIPS region
|
||||
func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError {
|
||||
return InvalidARNError{
|
||||
message: "resource ARN not supported for FIPS region",
|
||||
resource: resource,
|
||||
origErr: err,
|
||||
}
|
||||
}
|
||||
|
||||
// ConfigurationError is used to denote a client configuration error
|
||||
type ConfigurationError struct {
|
||||
message string
|
||||
resource arn.Resource
|
||||
clientPartitionID string
|
||||
clientRegion string
|
||||
origErr error
|
||||
}
|
||||
|
||||
// Error returns the Configuration error string
|
||||
func (e ConfigurationError) Error() string {
|
||||
extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s",
|
||||
e.resource, e.clientPartitionID, e.clientRegion)
|
||||
|
||||
return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr)
|
||||
}
|
||||
|
||||
// Code returns configuration error's error-code
|
||||
func (e ConfigurationError) Code() string {
|
||||
return configurationErrorErrCode
|
||||
}
|
||||
|
||||
// Message returns the configuration error message
|
||||
func (e ConfigurationError) Message() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// OrigErr is the original error wrapped by Configuration Error
|
||||
func (e ConfigurationError) OrigErr() error {
|
||||
return e.origErr
|
||||
}
|
||||
|
||||
// NewClientPartitionMismatchError stub
|
||||
func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
|
||||
return ConfigurationError{
|
||||
message: "client partition does not match provided ARN partition",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
||||
|
||||
// NewClientRegionMismatchError denotes cross region access error
|
||||
func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
|
||||
return ConfigurationError{
|
||||
message: "client region does not match provided ARN region",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
||||
|
||||
// NewFailedToResolveEndpointError denotes endpoint resolving error
|
||||
func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
|
||||
return ConfigurationError{
|
||||
message: "endpoint resolver failed to find an endpoint for the provided ARN region",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
||||
|
||||
// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access
|
||||
func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
|
||||
return ConfigurationError{
|
||||
message: "client configured for fips but cross-region resource ARN provided",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
||||
|
||||
// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate
|
||||
func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
|
||||
return ConfigurationError{
|
||||
message: "client configured for S3 Accelerate but is not supported with resource ARN",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
||||
|
||||
// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request
|
||||
func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
|
||||
return ConfigurationError{
|
||||
message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
||||
|
||||
// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack
|
||||
func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
|
||||
return ConfigurationError{
|
||||
message: "client configured for S3 Dual-stack but is not supported with resource ARN",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
62
vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go
generated
vendored
Normal file
62
vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
package s3shared
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
awsarn "github.com/aws/aws-sdk-go/aws/arn"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/s3shared/arn"
|
||||
)
|
||||
|
||||
// ResourceRequest represents the request and arn resource
|
||||
type ResourceRequest struct {
|
||||
Resource arn.Resource
|
||||
Request *request.Request
|
||||
}
|
||||
|
||||
// ARN returns the resource ARN
|
||||
func (r ResourceRequest) ARN() awsarn.ARN {
|
||||
return r.Resource.GetARN()
|
||||
}
|
||||
|
||||
// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set
|
||||
func (r ResourceRequest) AllowCrossRegion() bool {
|
||||
return aws.BoolValue(r.Request.Config.S3UseARNRegion)
|
||||
}
|
||||
|
||||
// UseFIPS returns true if request config region is FIPS
|
||||
func (r ResourceRequest) UseFIPS() bool {
|
||||
return IsFIPS(aws.StringValue(r.Request.Config.Region))
|
||||
}
|
||||
|
||||
// ResourceConfiguredForFIPS returns true if resource ARNs region is FIPS
|
||||
func (r ResourceRequest) ResourceConfiguredForFIPS() bool {
|
||||
return IsFIPS(r.ARN().Region)
|
||||
}
|
||||
|
||||
// IsCrossPartition returns true if client is configured for another partition, than
|
||||
// the partition that resource ARN region resolves to.
|
||||
func (r ResourceRequest) IsCrossPartition() bool {
|
||||
return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition
|
||||
}
|
||||
|
||||
// IsCrossRegion returns true if ARN region is different than client configured region
|
||||
func (r ResourceRequest) IsCrossRegion() bool {
|
||||
return IsCrossRegion(r.Request, r.Resource.GetARN().Region)
|
||||
}
|
||||
|
||||
// HasCustomEndpoint returns true if custom client endpoint is provided
|
||||
func (r ResourceRequest) HasCustomEndpoint() bool {
|
||||
return len(aws.StringValue(r.Request.Config.Endpoint)) > 0
|
||||
}
|
||||
|
||||
// IsFIPS returns true if region is a fips region
|
||||
func IsFIPS(clientRegion string) bool {
|
||||
return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips")
|
||||
}
|
||||
|
||||
// IsCrossRegion returns true if request signing region is not same as configured region
|
||||
func IsCrossRegion(req *request.Request, otherRegion string) bool {
|
||||
return req.ClientInfo.SigningRegion != otherRegion
|
||||
}
|
|
@ -1,9 +1,10 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ValidateEndpointHostHandler is a request handler that will validate the
|
||||
|
@ -22,8 +23,26 @@ var ValidateEndpointHostHandler = request.NamedHandler{
|
|||
// 3986 host. Returns error if the host is not valid.
|
||||
func ValidateEndpointHost(opName, host string) error {
|
||||
paramErrs := request.ErrInvalidParams{Context: opName}
|
||||
labels := strings.Split(host, ".")
|
||||
|
||||
var hostname string
|
||||
var port string
|
||||
var err error
|
||||
|
||||
if strings.Contains(host, ":") {
|
||||
hostname, port, err = net.SplitHostPort(host)
|
||||
|
||||
if err != nil {
|
||||
paramErrs.Add(request.NewErrParamFormat("endpoint", err.Error(), host))
|
||||
}
|
||||
|
||||
if !ValidPortNumber(port) {
|
||||
paramErrs.Add(request.NewErrParamFormat("endpoint port number", "[0-65535]", port))
|
||||
}
|
||||
} else {
|
||||
hostname = host
|
||||
}
|
||||
|
||||
labels := strings.Split(hostname, ".")
|
||||
for i, label := range labels {
|
||||
if i == len(labels)-1 && len(label) == 0 {
|
||||
// Allow trailing dot for FQDN hosts.
|
||||
|
@ -36,7 +55,11 @@ func ValidateEndpointHost(opName, host string) error {
|
|||
}
|
||||
}
|
||||
|
||||
if len(host) > 255 {
|
||||
if len(hostname) == 0 {
|
||||
paramErrs.Add(request.NewErrParamMinLen("endpoint host", 1))
|
||||
}
|
||||
|
||||
if len(hostname) > 255 {
|
||||
paramErrs.Add(request.NewErrParamMaxLen(
|
||||
"endpoint host", 255, host,
|
||||
))
|
||||
|
@ -66,3 +89,16 @@ func ValidHostLabel(label string) bool {
|
|||
|
||||
return true
|
||||
}
|
||||
|
||||
// ValidPortNumber return if the port is valid RFC 3986 port
|
||||
func ValidPortNumber(port string) bool {
|
||||
i, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if i < 0 || i > 65535 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
59
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go
generated
vendored
Normal file
59
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
// Package restjson provides RESTful JSON serialization of AWS
|
||||
// requests and responses.
|
||||
package restjson
|
||||
|
||||
//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-json.json build_test.go
|
||||
//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||
)
|
||||
|
||||
// BuildHandler is a named request handler for building restjson protocol
|
||||
// requests
|
||||
var BuildHandler = request.NamedHandler{
|
||||
Name: "awssdk.restjson.Build",
|
||||
Fn: Build,
|
||||
}
|
||||
|
||||
// UnmarshalHandler is a named request handler for unmarshaling restjson
|
||||
// protocol requests
|
||||
var UnmarshalHandler = request.NamedHandler{
|
||||
Name: "awssdk.restjson.Unmarshal",
|
||||
Fn: Unmarshal,
|
||||
}
|
||||
|
||||
// UnmarshalMetaHandler is a named request handler for unmarshaling restjson
|
||||
// protocol request metadata
|
||||
var UnmarshalMetaHandler = request.NamedHandler{
|
||||
Name: "awssdk.restjson.UnmarshalMeta",
|
||||
Fn: UnmarshalMeta,
|
||||
}
|
||||
|
||||
// Build builds a request for the REST JSON protocol.
|
||||
func Build(r *request.Request) {
|
||||
rest.Build(r)
|
||||
|
||||
if t := rest.PayloadType(r.Params); t == "structure" || t == "" {
|
||||
if v := r.HTTPRequest.Header.Get("Content-Type"); len(v) == 0 {
|
||||
r.HTTPRequest.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
jsonrpc.Build(r)
|
||||
}
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals a response body for the REST JSON protocol.
|
||||
func Unmarshal(r *request.Request) {
|
||||
if t := rest.PayloadType(r.Data); t == "structure" || t == "" {
|
||||
jsonrpc.Unmarshal(r)
|
||||
} else {
|
||||
rest.Unmarshal(r)
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalMeta unmarshals response headers for the REST JSON protocol.
|
||||
func UnmarshalMeta(r *request.Request) {
|
||||
rest.UnmarshalMeta(r)
|
||||
}
|
134
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go
generated
vendored
Normal file
134
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go
generated
vendored
Normal file
|
@ -0,0 +1,134 @@
|
|||
package restjson
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
errorTypeHeader = "X-Amzn-Errortype"
|
||||
errorMessageHeader = "X-Amzn-Errormessage"
|
||||
)
|
||||
|
||||
// UnmarshalTypedError provides unmarshaling errors API response errors
|
||||
// for both typed and untyped errors.
|
||||
type UnmarshalTypedError struct {
|
||||
exceptions map[string]func(protocol.ResponseMetadata) error
|
||||
}
|
||||
|
||||
// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the
|
||||
// set of exception names to the error unmarshalers
|
||||
func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError {
|
||||
return &UnmarshalTypedError{
|
||||
exceptions: exceptions,
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalError attempts to unmarshal the HTTP response error as a known
|
||||
// error type. If unable to unmarshal the error type, the generic SDK error
|
||||
// type will be used.
|
||||
func (u *UnmarshalTypedError) UnmarshalError(
|
||||
resp *http.Response,
|
||||
respMeta protocol.ResponseMetadata,
|
||||
) (error, error) {
|
||||
|
||||
code := resp.Header.Get(errorTypeHeader)
|
||||
msg := resp.Header.Get(errorMessageHeader)
|
||||
|
||||
body := resp.Body
|
||||
if len(code) == 0 {
|
||||
// If unable to get code from HTTP headers have to parse JSON message
|
||||
// to determine what kind of exception this will be.
|
||||
var buf bytes.Buffer
|
||||
var jsonErr jsonErrorResponse
|
||||
teeReader := io.TeeReader(resp.Body, &buf)
|
||||
err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
body = ioutil.NopCloser(&buf)
|
||||
code = jsonErr.Code
|
||||
msg = jsonErr.Message
|
||||
}
|
||||
|
||||
// If code has colon separators remove them so can compare against modeled
|
||||
// exception names.
|
||||
code = strings.SplitN(code, ":", 2)[0]
|
||||
|
||||
if fn, ok := u.exceptions[code]; ok {
|
||||
// If exception code is know, use associated constructor to get a value
|
||||
// for the exception that the JSON body can be unmarshaled into.
|
||||
v := fn(respMeta)
|
||||
if err := jsonutil.UnmarshalJSONCaseInsensitive(v, body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := rest.UnmarshalResponse(resp, v, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// fallback to unmodeled generic exceptions
|
||||
return awserr.NewRequestFailure(
|
||||
awserr.New(code, msg, nil),
|
||||
respMeta.StatusCode,
|
||||
respMeta.RequestID,
|
||||
), nil
|
||||
}
|
||||
|
||||
// UnmarshalErrorHandler is a named request handler for unmarshaling restjson
|
||||
// protocol request errors
|
||||
var UnmarshalErrorHandler = request.NamedHandler{
|
||||
Name: "awssdk.restjson.UnmarshalError",
|
||||
Fn: UnmarshalError,
|
||||
}
|
||||
|
||||
// UnmarshalError unmarshals a response error for the REST JSON protocol.
|
||||
func UnmarshalError(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
|
||||
var jsonErr jsonErrorResponse
|
||||
err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(request.ErrCodeSerialization,
|
||||
"failed to unmarshal response error", err),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
code := r.HTTPResponse.Header.Get(errorTypeHeader)
|
||||
if code == "" {
|
||||
code = jsonErr.Code
|
||||
}
|
||||
msg := r.HTTPResponse.Header.Get(errorMessageHeader)
|
||||
if msg == "" {
|
||||
msg = jsonErr.Message
|
||||
}
|
||||
|
||||
code = strings.SplitN(code, ":", 2)[0]
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(code, jsonErr.Message, nil),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
}
|
||||
|
||||
type jsonErrorResponse struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -33,6 +33,25 @@ const (
|
|||
// Backups have not yet been enabled for this table.
|
||||
ErrCodeContinuousBackupsUnavailableException = "ContinuousBackupsUnavailableException"
|
||||
|
||||
// ErrCodeDuplicateItemException for service response error code
|
||||
// "DuplicateItemException".
|
||||
//
|
||||
// There was an attempt to insert an item with the same primary key as an item
|
||||
// that already exists in the DynamoDB table.
|
||||
ErrCodeDuplicateItemException = "DuplicateItemException"
|
||||
|
||||
// ErrCodeExportConflictException for service response error code
|
||||
// "ExportConflictException".
|
||||
//
|
||||
// There was a conflict when writing to the specified S3 bucket.
|
||||
ErrCodeExportConflictException = "ExportConflictException"
|
||||
|
||||
// ErrCodeExportNotFoundException for service response error code
|
||||
// "ExportNotFoundException".
|
||||
//
|
||||
// The specified export was not found.
|
||||
ErrCodeExportNotFoundException = "ExportNotFoundException"
|
||||
|
||||
// ErrCodeGlobalTableAlreadyExistsException for service response error code
|
||||
// "GlobalTableAlreadyExistsException".
|
||||
//
|
||||
|
@ -64,6 +83,12 @@ const (
|
|||
// An error occurred on the server side.
|
||||
ErrCodeInternalServerError = "InternalServerError"
|
||||
|
||||
// ErrCodeInvalidExportTimeException for service response error code
|
||||
// "InvalidExportTimeException".
|
||||
//
|
||||
// The specified ExportTime is outside of the point in time recovery window.
|
||||
ErrCodeInvalidExportTimeException = "InvalidExportTimeException"
|
||||
|
||||
// ErrCodeInvalidRestoreTimeException for service response error code
|
||||
// "InvalidRestoreTimeException".
|
||||
//
|
||||
|
@ -92,7 +117,7 @@ const (
|
|||
// if the table or index specifications are complex, DynamoDB might temporarily
|
||||
// reduce the number of concurrent operations.
|
||||
//
|
||||
// There is a soft account limit of 256 tables.
|
||||
// There is a soft account quota of 256 tables.
|
||||
ErrCodeLimitExceededException = "LimitExceededException"
|
||||
|
||||
// ErrCodePointInTimeRecoveryUnavailableException for service response error code
|
||||
|
@ -127,9 +152,9 @@ const (
|
|||
// ErrCodeRequestLimitExceeded for service response error code
|
||||
// "RequestLimitExceeded".
|
||||
//
|
||||
// Throughput exceeds the current throughput limit for your account. Please
|
||||
// Throughput exceeds the current throughput quota for your account. Please
|
||||
// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
|
||||
// a limit increase.
|
||||
// a quota increase.
|
||||
ErrCodeRequestLimitExceeded = "RequestLimitExceeded"
|
||||
|
||||
// ErrCodeResourceInUseException for service response error code
|
||||
|
@ -274,11 +299,15 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
|
|||
"BackupNotFoundException": newErrorBackupNotFoundException,
|
||||
"ConditionalCheckFailedException": newErrorConditionalCheckFailedException,
|
||||
"ContinuousBackupsUnavailableException": newErrorContinuousBackupsUnavailableException,
|
||||
"DuplicateItemException": newErrorDuplicateItemException,
|
||||
"ExportConflictException": newErrorExportConflictException,
|
||||
"ExportNotFoundException": newErrorExportNotFoundException,
|
||||
"GlobalTableAlreadyExistsException": newErrorGlobalTableAlreadyExistsException,
|
||||
"GlobalTableNotFoundException": newErrorGlobalTableNotFoundException,
|
||||
"IdempotentParameterMismatchException": newErrorIdempotentParameterMismatchException,
|
||||
"IndexNotFoundException": newErrorIndexNotFoundException,
|
||||
"InternalServerError": newErrorInternalServerError,
|
||||
"InvalidExportTimeException": newErrorInvalidExportTimeException,
|
||||
"InvalidRestoreTimeException": newErrorInvalidRestoreTimeException,
|
||||
"ItemCollectionSizeLimitExceededException": newErrorItemCollectionSizeLimitExceededException,
|
||||
"LimitExceededException": newErrorLimitExceededException,
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -4,8 +4,14 @@
|
|||
// requests to Amazon Elastic Compute Cloud.
|
||||
//
|
||||
// Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing
|
||||
// capacity in the AWS cloud. Using Amazon EC2 eliminates the need to invest
|
||||
// capacity in the AWS Cloud. Using Amazon EC2 eliminates the need to invest
|
||||
// in hardware up front, so you can develop and deploy applications faster.
|
||||
// Amazon Virtual Private Cloud (Amazon VPC) enables you to provision a logically
|
||||
// isolated section of the AWS Cloud where you can launch AWS resources in a
|
||||
// virtual network that you've defined. Amazon Elastic Block Store (Amazon EBS)
|
||||
// provides block level storage volumes for use with EC2 instances. EBS volumes
|
||||
// are highly available and reliable storage volumes that can be attached to
|
||||
// any running instance and used like a hard drive.
|
||||
//
|
||||
// To learn more, see the following resources:
|
||||
//
|
||||
|
@ -13,7 +19,7 @@
|
|||
// EC2 documentation (http://aws.amazon.com/documentation/ec2)
|
||||
//
|
||||
// * Amazon EBS: Amazon EBS product page (http://aws.amazon.com/ebs), Amazon
|
||||
// EBS documentation (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html)
|
||||
// EBS documentation (http://aws.amazon.com/documentation/ebs)
|
||||
//
|
||||
// * Amazon VPC: Amazon VPC product page (http://aws.amazon.com/vpc), Amazon
|
||||
// VPC documentation (http://aws.amazon.com/documentation/vpc)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -17,7 +17,7 @@ const (
|
|||
//
|
||||
// The request was rejected because the most recent credential report has expired.
|
||||
// To generate a new credential report, use GenerateCredentialReport. For more
|
||||
// information about credential report expiration, see Getting Credential Reports
|
||||
// information about credential report expiration, see Getting credential reports
|
||||
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html)
|
||||
// in the IAM User Guide.
|
||||
ErrCodeCredentialReportExpiredException = "ReportExpired"
|
||||
|
@ -117,8 +117,7 @@ const (
|
|||
// "LimitExceeded".
|
||||
//
|
||||
// The request was rejected because it attempted to create resources beyond
|
||||
// the current AWS account limitations. The error message describes the limit
|
||||
// exceeded.
|
||||
// the current AWS account limits. The error message describes the limit exceeded.
|
||||
ErrCodeLimitExceededException = "LimitExceeded"
|
||||
|
||||
// ErrCodeMalformedCertificateException for service response error code
|
||||
|
|
|
@ -437,6 +437,10 @@ type IAMAPI interface {
|
|||
ListGroupsForUserPages(*iam.ListGroupsForUserInput, func(*iam.ListGroupsForUserOutput, bool) bool) error
|
||||
ListGroupsForUserPagesWithContext(aws.Context, *iam.ListGroupsForUserInput, func(*iam.ListGroupsForUserOutput, bool) bool, ...request.Option) error
|
||||
|
||||
ListInstanceProfileTags(*iam.ListInstanceProfileTagsInput) (*iam.ListInstanceProfileTagsOutput, error)
|
||||
ListInstanceProfileTagsWithContext(aws.Context, *iam.ListInstanceProfileTagsInput, ...request.Option) (*iam.ListInstanceProfileTagsOutput, error)
|
||||
ListInstanceProfileTagsRequest(*iam.ListInstanceProfileTagsInput) (*request.Request, *iam.ListInstanceProfileTagsOutput)
|
||||
|
||||
ListInstanceProfiles(*iam.ListInstanceProfilesInput) (*iam.ListInstanceProfilesOutput, error)
|
||||
ListInstanceProfilesWithContext(aws.Context, *iam.ListInstanceProfilesInput, ...request.Option) (*iam.ListInstanceProfilesOutput, error)
|
||||
ListInstanceProfilesRequest(*iam.ListInstanceProfilesInput) (*request.Request, *iam.ListInstanceProfilesOutput)
|
||||
|
@ -451,6 +455,10 @@ type IAMAPI interface {
|
|||
ListInstanceProfilesForRolePages(*iam.ListInstanceProfilesForRoleInput, func(*iam.ListInstanceProfilesForRoleOutput, bool) bool) error
|
||||
ListInstanceProfilesForRolePagesWithContext(aws.Context, *iam.ListInstanceProfilesForRoleInput, func(*iam.ListInstanceProfilesForRoleOutput, bool) bool, ...request.Option) error
|
||||
|
||||
ListMFADeviceTags(*iam.ListMFADeviceTagsInput) (*iam.ListMFADeviceTagsOutput, error)
|
||||
ListMFADeviceTagsWithContext(aws.Context, *iam.ListMFADeviceTagsInput, ...request.Option) (*iam.ListMFADeviceTagsOutput, error)
|
||||
ListMFADeviceTagsRequest(*iam.ListMFADeviceTagsInput) (*request.Request, *iam.ListMFADeviceTagsOutput)
|
||||
|
||||
ListMFADevices(*iam.ListMFADevicesInput) (*iam.ListMFADevicesOutput, error)
|
||||
ListMFADevicesWithContext(aws.Context, *iam.ListMFADevicesInput, ...request.Option) (*iam.ListMFADevicesOutput, error)
|
||||
ListMFADevicesRequest(*iam.ListMFADevicesInput) (*request.Request, *iam.ListMFADevicesOutput)
|
||||
|
@ -458,6 +466,10 @@ type IAMAPI interface {
|
|||
ListMFADevicesPages(*iam.ListMFADevicesInput, func(*iam.ListMFADevicesOutput, bool) bool) error
|
||||
ListMFADevicesPagesWithContext(aws.Context, *iam.ListMFADevicesInput, func(*iam.ListMFADevicesOutput, bool) bool, ...request.Option) error
|
||||
|
||||
ListOpenIDConnectProviderTags(*iam.ListOpenIDConnectProviderTagsInput) (*iam.ListOpenIDConnectProviderTagsOutput, error)
|
||||
ListOpenIDConnectProviderTagsWithContext(aws.Context, *iam.ListOpenIDConnectProviderTagsInput, ...request.Option) (*iam.ListOpenIDConnectProviderTagsOutput, error)
|
||||
ListOpenIDConnectProviderTagsRequest(*iam.ListOpenIDConnectProviderTagsInput) (*request.Request, *iam.ListOpenIDConnectProviderTagsOutput)
|
||||
|
||||
ListOpenIDConnectProviders(*iam.ListOpenIDConnectProvidersInput) (*iam.ListOpenIDConnectProvidersOutput, error)
|
||||
ListOpenIDConnectProvidersWithContext(aws.Context, *iam.ListOpenIDConnectProvidersInput, ...request.Option) (*iam.ListOpenIDConnectProvidersOutput, error)
|
||||
ListOpenIDConnectProvidersRequest(*iam.ListOpenIDConnectProvidersInput) (*request.Request, *iam.ListOpenIDConnectProvidersOutput)
|
||||
|
@ -473,6 +485,10 @@ type IAMAPI interface {
|
|||
ListPoliciesGrantingServiceAccessWithContext(aws.Context, *iam.ListPoliciesGrantingServiceAccessInput, ...request.Option) (*iam.ListPoliciesGrantingServiceAccessOutput, error)
|
||||
ListPoliciesGrantingServiceAccessRequest(*iam.ListPoliciesGrantingServiceAccessInput) (*request.Request, *iam.ListPoliciesGrantingServiceAccessOutput)
|
||||
|
||||
ListPolicyTags(*iam.ListPolicyTagsInput) (*iam.ListPolicyTagsOutput, error)
|
||||
ListPolicyTagsWithContext(aws.Context, *iam.ListPolicyTagsInput, ...request.Option) (*iam.ListPolicyTagsOutput, error)
|
||||
ListPolicyTagsRequest(*iam.ListPolicyTagsInput) (*request.Request, *iam.ListPolicyTagsOutput)
|
||||
|
||||
ListPolicyVersions(*iam.ListPolicyVersionsInput) (*iam.ListPolicyVersionsOutput, error)
|
||||
ListPolicyVersionsWithContext(aws.Context, *iam.ListPolicyVersionsInput, ...request.Option) (*iam.ListPolicyVersionsOutput, error)
|
||||
ListPolicyVersionsRequest(*iam.ListPolicyVersionsInput) (*request.Request, *iam.ListPolicyVersionsOutput)
|
||||
|
@ -498,6 +514,10 @@ type IAMAPI interface {
|
|||
ListRolesPages(*iam.ListRolesInput, func(*iam.ListRolesOutput, bool) bool) error
|
||||
ListRolesPagesWithContext(aws.Context, *iam.ListRolesInput, func(*iam.ListRolesOutput, bool) bool, ...request.Option) error
|
||||
|
||||
ListSAMLProviderTags(*iam.ListSAMLProviderTagsInput) (*iam.ListSAMLProviderTagsOutput, error)
|
||||
ListSAMLProviderTagsWithContext(aws.Context, *iam.ListSAMLProviderTagsInput, ...request.Option) (*iam.ListSAMLProviderTagsOutput, error)
|
||||
ListSAMLProviderTagsRequest(*iam.ListSAMLProviderTagsInput) (*request.Request, *iam.ListSAMLProviderTagsOutput)
|
||||
|
||||
ListSAMLProviders(*iam.ListSAMLProvidersInput) (*iam.ListSAMLProvidersOutput, error)
|
||||
ListSAMLProvidersWithContext(aws.Context, *iam.ListSAMLProvidersInput, ...request.Option) (*iam.ListSAMLProvidersOutput, error)
|
||||
ListSAMLProvidersRequest(*iam.ListSAMLProvidersInput) (*request.Request, *iam.ListSAMLProvidersOutput)
|
||||
|
@ -509,6 +529,10 @@ type IAMAPI interface {
|
|||
ListSSHPublicKeysPages(*iam.ListSSHPublicKeysInput, func(*iam.ListSSHPublicKeysOutput, bool) bool) error
|
||||
ListSSHPublicKeysPagesWithContext(aws.Context, *iam.ListSSHPublicKeysInput, func(*iam.ListSSHPublicKeysOutput, bool) bool, ...request.Option) error
|
||||
|
||||
ListServerCertificateTags(*iam.ListServerCertificateTagsInput) (*iam.ListServerCertificateTagsOutput, error)
|
||||
ListServerCertificateTagsWithContext(aws.Context, *iam.ListServerCertificateTagsInput, ...request.Option) (*iam.ListServerCertificateTagsOutput, error)
|
||||
ListServerCertificateTagsRequest(*iam.ListServerCertificateTagsInput) (*request.Request, *iam.ListServerCertificateTagsOutput)
|
||||
|
||||
ListServerCertificates(*iam.ListServerCertificatesInput) (*iam.ListServerCertificatesOutput, error)
|
||||
ListServerCertificatesWithContext(aws.Context, *iam.ListServerCertificatesInput, ...request.Option) (*iam.ListServerCertificatesOutput, error)
|
||||
ListServerCertificatesRequest(*iam.ListServerCertificatesInput) (*request.Request, *iam.ListServerCertificatesOutput)
|
||||
|
@ -614,18 +638,66 @@ type IAMAPI interface {
|
|||
SimulatePrincipalPolicyPages(*iam.SimulatePrincipalPolicyInput, func(*iam.SimulatePolicyResponse, bool) bool) error
|
||||
SimulatePrincipalPolicyPagesWithContext(aws.Context, *iam.SimulatePrincipalPolicyInput, func(*iam.SimulatePolicyResponse, bool) bool, ...request.Option) error
|
||||
|
||||
TagInstanceProfile(*iam.TagInstanceProfileInput) (*iam.TagInstanceProfileOutput, error)
|
||||
TagInstanceProfileWithContext(aws.Context, *iam.TagInstanceProfileInput, ...request.Option) (*iam.TagInstanceProfileOutput, error)
|
||||
TagInstanceProfileRequest(*iam.TagInstanceProfileInput) (*request.Request, *iam.TagInstanceProfileOutput)
|
||||
|
||||
TagMFADevice(*iam.TagMFADeviceInput) (*iam.TagMFADeviceOutput, error)
|
||||
TagMFADeviceWithContext(aws.Context, *iam.TagMFADeviceInput, ...request.Option) (*iam.TagMFADeviceOutput, error)
|
||||
TagMFADeviceRequest(*iam.TagMFADeviceInput) (*request.Request, *iam.TagMFADeviceOutput)
|
||||
|
||||
TagOpenIDConnectProvider(*iam.TagOpenIDConnectProviderInput) (*iam.TagOpenIDConnectProviderOutput, error)
|
||||
TagOpenIDConnectProviderWithContext(aws.Context, *iam.TagOpenIDConnectProviderInput, ...request.Option) (*iam.TagOpenIDConnectProviderOutput, error)
|
||||
TagOpenIDConnectProviderRequest(*iam.TagOpenIDConnectProviderInput) (*request.Request, *iam.TagOpenIDConnectProviderOutput)
|
||||
|
||||
TagPolicy(*iam.TagPolicyInput) (*iam.TagPolicyOutput, error)
|
||||
TagPolicyWithContext(aws.Context, *iam.TagPolicyInput, ...request.Option) (*iam.TagPolicyOutput, error)
|
||||
TagPolicyRequest(*iam.TagPolicyInput) (*request.Request, *iam.TagPolicyOutput)
|
||||
|
||||
TagRole(*iam.TagRoleInput) (*iam.TagRoleOutput, error)
|
||||
TagRoleWithContext(aws.Context, *iam.TagRoleInput, ...request.Option) (*iam.TagRoleOutput, error)
|
||||
TagRoleRequest(*iam.TagRoleInput) (*request.Request, *iam.TagRoleOutput)
|
||||
|
||||
TagSAMLProvider(*iam.TagSAMLProviderInput) (*iam.TagSAMLProviderOutput, error)
|
||||
TagSAMLProviderWithContext(aws.Context, *iam.TagSAMLProviderInput, ...request.Option) (*iam.TagSAMLProviderOutput, error)
|
||||
TagSAMLProviderRequest(*iam.TagSAMLProviderInput) (*request.Request, *iam.TagSAMLProviderOutput)
|
||||
|
||||
TagServerCertificate(*iam.TagServerCertificateInput) (*iam.TagServerCertificateOutput, error)
|
||||
TagServerCertificateWithContext(aws.Context, *iam.TagServerCertificateInput, ...request.Option) (*iam.TagServerCertificateOutput, error)
|
||||
TagServerCertificateRequest(*iam.TagServerCertificateInput) (*request.Request, *iam.TagServerCertificateOutput)
|
||||
|
||||
TagUser(*iam.TagUserInput) (*iam.TagUserOutput, error)
|
||||
TagUserWithContext(aws.Context, *iam.TagUserInput, ...request.Option) (*iam.TagUserOutput, error)
|
||||
TagUserRequest(*iam.TagUserInput) (*request.Request, *iam.TagUserOutput)
|
||||
|
||||
UntagInstanceProfile(*iam.UntagInstanceProfileInput) (*iam.UntagInstanceProfileOutput, error)
|
||||
UntagInstanceProfileWithContext(aws.Context, *iam.UntagInstanceProfileInput, ...request.Option) (*iam.UntagInstanceProfileOutput, error)
|
||||
UntagInstanceProfileRequest(*iam.UntagInstanceProfileInput) (*request.Request, *iam.UntagInstanceProfileOutput)
|
||||
|
||||
UntagMFADevice(*iam.UntagMFADeviceInput) (*iam.UntagMFADeviceOutput, error)
|
||||
UntagMFADeviceWithContext(aws.Context, *iam.UntagMFADeviceInput, ...request.Option) (*iam.UntagMFADeviceOutput, error)
|
||||
UntagMFADeviceRequest(*iam.UntagMFADeviceInput) (*request.Request, *iam.UntagMFADeviceOutput)
|
||||
|
||||
UntagOpenIDConnectProvider(*iam.UntagOpenIDConnectProviderInput) (*iam.UntagOpenIDConnectProviderOutput, error)
|
||||
UntagOpenIDConnectProviderWithContext(aws.Context, *iam.UntagOpenIDConnectProviderInput, ...request.Option) (*iam.UntagOpenIDConnectProviderOutput, error)
|
||||
UntagOpenIDConnectProviderRequest(*iam.UntagOpenIDConnectProviderInput) (*request.Request, *iam.UntagOpenIDConnectProviderOutput)
|
||||
|
||||
UntagPolicy(*iam.UntagPolicyInput) (*iam.UntagPolicyOutput, error)
|
||||
UntagPolicyWithContext(aws.Context, *iam.UntagPolicyInput, ...request.Option) (*iam.UntagPolicyOutput, error)
|
||||
UntagPolicyRequest(*iam.UntagPolicyInput) (*request.Request, *iam.UntagPolicyOutput)
|
||||
|
||||
UntagRole(*iam.UntagRoleInput) (*iam.UntagRoleOutput, error)
|
||||
UntagRoleWithContext(aws.Context, *iam.UntagRoleInput, ...request.Option) (*iam.UntagRoleOutput, error)
|
||||
UntagRoleRequest(*iam.UntagRoleInput) (*request.Request, *iam.UntagRoleOutput)
|
||||
|
||||
UntagSAMLProvider(*iam.UntagSAMLProviderInput) (*iam.UntagSAMLProviderOutput, error)
|
||||
UntagSAMLProviderWithContext(aws.Context, *iam.UntagSAMLProviderInput, ...request.Option) (*iam.UntagSAMLProviderOutput, error)
|
||||
UntagSAMLProviderRequest(*iam.UntagSAMLProviderInput) (*request.Request, *iam.UntagSAMLProviderOutput)
|
||||
|
||||
UntagServerCertificate(*iam.UntagServerCertificateInput) (*iam.UntagServerCertificateOutput, error)
|
||||
UntagServerCertificateWithContext(aws.Context, *iam.UntagServerCertificateInput, ...request.Option) (*iam.UntagServerCertificateOutput, error)
|
||||
UntagServerCertificateRequest(*iam.UntagServerCertificateInput) (*request.Request, *iam.UntagServerCertificateOutput)
|
||||
|
||||
UntagUser(*iam.UntagUserInput) (*iam.UntagUserOutput, error)
|
||||
UntagUserWithContext(aws.Context, *iam.UntagUserInput, ...request.Option) (*iam.UntagUserOutput, error)
|
||||
UntagUserRequest(*iam.UntagUserInput) (*request.Request, *iam.UntagUserOutput)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -3,8 +3,8 @@ package s3
|
|||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/s3err"
|
||||
"github.com/aws/aws-sdk-go/service/s3/internal/arn"
|
||||
"github.com/aws/aws-sdk-go/internal/s3shared/arn"
|
||||
"github.com/aws/aws-sdk-go/internal/s3shared/s3err"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -69,6 +69,8 @@ type copySourceSSECustomerKeyGetter interface {
|
|||
getCopySourceSSECustomerKey() string
|
||||
}
|
||||
|
||||
// endpointARNGetter is an accessor interface to grab the
|
||||
// the field corresponding to an endpoint ARN input.
|
||||
type endpointARNGetter interface {
|
||||
getEndpointARN() (arn.Resource, error)
|
||||
hasEndpointARN() bool
|
||||
|
|
|
@ -6,11 +6,9 @@ import (
|
|||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
awsarn "github.com/aws/aws-sdk-go/aws/arn"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
"github.com/aws/aws-sdk-go/service/s3/internal/arn"
|
||||
"github.com/aws/aws-sdk-go/internal/s3shared"
|
||||
"github.com/aws/aws-sdk-go/internal/s3shared/arn"
|
||||
)
|
||||
|
||||
// Used by shapes with members decorated as endpoint ARN.
|
||||
|
@ -22,12 +20,66 @@ func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) {
|
|||
resParts := arn.SplitResource(a.Resource)
|
||||
switch resParts[0] {
|
||||
case "accesspoint":
|
||||
if a.Service != "s3" {
|
||||
return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3"}
|
||||
}
|
||||
return arn.ParseAccessPointResource(a, resParts[1:])
|
||||
case "outpost":
|
||||
if a.Service != "s3-outposts" {
|
||||
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"}
|
||||
}
|
||||
return parseOutpostAccessPointResource(a, resParts[1:])
|
||||
default:
|
||||
return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"}
|
||||
}
|
||||
}
|
||||
|
||||
// parseOutpostAccessPointResource attempts to parse the ARNs resource as an
|
||||
// outpost access-point resource.
|
||||
//
|
||||
// Supported Outpost AccessPoint ARN format:
|
||||
// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName}
|
||||
// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint
|
||||
//
|
||||
func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) {
|
||||
// outpost accesspoint arn is only valid if service is s3-outposts
|
||||
if a.Service != "s3-outposts" {
|
||||
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"}
|
||||
}
|
||||
|
||||
if len(resParts) == 0 {
|
||||
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"}
|
||||
}
|
||||
|
||||
if len(resParts) < 3 {
|
||||
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{
|
||||
ARN: a, Reason: "access-point resource not set in Outpost ARN",
|
||||
}
|
||||
}
|
||||
|
||||
resID := strings.TrimSpace(resParts[0])
|
||||
if len(resID) == 0 {
|
||||
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"}
|
||||
}
|
||||
|
||||
var outpostAccessPointARN = arn.OutpostAccessPointARN{}
|
||||
switch resParts[1] {
|
||||
case "accesspoint":
|
||||
accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:])
|
||||
if err != nil {
|
||||
return arn.OutpostAccessPointARN{}, err
|
||||
}
|
||||
// set access-point arn
|
||||
outpostAccessPointARN.AccessPointARN = accessPointARN
|
||||
default:
|
||||
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"}
|
||||
}
|
||||
|
||||
// set outpost id
|
||||
outpostAccessPointARN.OutpostID = resID
|
||||
return outpostAccessPointARN, nil
|
||||
}
|
||||
|
||||
func endpointHandler(req *request.Request) {
|
||||
endpoint, ok := req.Params.(endpointARNGetter)
|
||||
if !ok || !endpoint.hasEndpointARN() {
|
||||
|
@ -37,79 +89,49 @@ func endpointHandler(req *request.Request) {
|
|||
|
||||
resource, err := endpoint.getEndpointARN()
|
||||
if err != nil {
|
||||
req.Error = newInvalidARNError(nil, err)
|
||||
req.Error = s3shared.NewInvalidARNError(nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
resReq := resourceRequest{
|
||||
resReq := s3shared.ResourceRequest{
|
||||
Resource: resource,
|
||||
Request: req,
|
||||
}
|
||||
|
||||
if resReq.IsCrossPartition() {
|
||||
req.Error = newClientPartitionMismatchError(resource,
|
||||
if len(resReq.Request.ClientInfo.PartitionID) != 0 && resReq.IsCrossPartition() {
|
||||
req.Error = s3shared.NewClientPartitionMismatchError(resource,
|
||||
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() {
|
||||
req.Error = newClientRegionMismatchError(resource,
|
||||
req.Error = s3shared.NewClientRegionMismatchError(resource,
|
||||
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if resReq.HasCustomEndpoint() {
|
||||
req.Error = newInvalidARNWithCustomEndpointError(resource, nil)
|
||||
return
|
||||
}
|
||||
|
||||
switch tv := resource.(type) {
|
||||
case arn.AccessPointARN:
|
||||
err = updateRequestAccessPointEndpoint(req, tv)
|
||||
if err != nil {
|
||||
req.Error = err
|
||||
}
|
||||
case arn.OutpostAccessPointARN:
|
||||
// outposts does not support FIPS regions
|
||||
if resReq.ResourceConfiguredForFIPS() {
|
||||
req.Error = s3shared.NewInvalidARNWithFIPSError(resource, nil)
|
||||
return
|
||||
}
|
||||
|
||||
err = updateRequestOutpostAccessPointEndpoint(req, tv)
|
||||
if err != nil {
|
||||
req.Error = err
|
||||
}
|
||||
default:
|
||||
req.Error = newInvalidARNError(resource, nil)
|
||||
req.Error = s3shared.NewInvalidARNError(resource, nil)
|
||||
}
|
||||
}
|
||||
|
||||
type resourceRequest struct {
|
||||
Resource arn.Resource
|
||||
Request *request.Request
|
||||
}
|
||||
|
||||
func (r resourceRequest) ARN() awsarn.ARN {
|
||||
return r.Resource.GetARN()
|
||||
}
|
||||
|
||||
func (r resourceRequest) AllowCrossRegion() bool {
|
||||
return aws.BoolValue(r.Request.Config.S3UseARNRegion)
|
||||
}
|
||||
|
||||
func (r resourceRequest) UseFIPS() bool {
|
||||
return isFIPS(aws.StringValue(r.Request.Config.Region))
|
||||
}
|
||||
|
||||
func (r resourceRequest) IsCrossPartition() bool {
|
||||
return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition
|
||||
}
|
||||
|
||||
func (r resourceRequest) IsCrossRegion() bool {
|
||||
return isCrossRegion(r.Request, r.Resource.GetARN().Region)
|
||||
}
|
||||
|
||||
func (r resourceRequest) HasCustomEndpoint() bool {
|
||||
return len(aws.StringValue(r.Request.Config.Endpoint)) > 0
|
||||
}
|
||||
|
||||
func isFIPS(clientRegion string) bool {
|
||||
return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips")
|
||||
}
|
||||
func isCrossRegion(req *request.Request, otherRegion string) bool {
|
||||
return req.ClientInfo.SigningRegion != otherRegion
|
||||
}
|
||||
|
||||
func updateBucketEndpointFromParams(r *request.Request) {
|
||||
bucket, ok := bucketNameFromReqParams(r.Params)
|
||||
if !ok {
|
||||
|
@ -124,15 +146,14 @@ func updateBucketEndpointFromParams(r *request.Request) {
|
|||
func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error {
|
||||
// Accelerate not supported
|
||||
if aws.BoolValue(req.Config.S3UseAccelerate) {
|
||||
return newClientConfiguredForAccelerateError(accessPoint,
|
||||
return s3shared.NewClientConfiguredForAccelerateError(accessPoint,
|
||||
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
|
||||
}
|
||||
|
||||
// Ignore the disable host prefix for access points since custom endpoints
|
||||
// are not supported.
|
||||
// Ignore the disable host prefix for access points
|
||||
req.Config.DisableEndpointHostPrefix = aws.Bool(false)
|
||||
|
||||
if err := accessPointEndpointBuilder(accessPoint).Build(req); err != nil {
|
||||
if err := accessPointEndpointBuilder(accessPoint).build(req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -141,93 +162,33 @@ func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.Acce
|
|||
return nil
|
||||
}
|
||||
|
||||
func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint arn.OutpostAccessPointARN) error {
|
||||
// Accelerate not supported
|
||||
if aws.BoolValue(req.Config.S3UseAccelerate) {
|
||||
return s3shared.NewClientConfiguredForAccelerateError(accessPoint,
|
||||
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
|
||||
}
|
||||
|
||||
// Dualstack not supported
|
||||
if aws.BoolValue(req.Config.UseDualStack) {
|
||||
return s3shared.NewClientConfiguredForDualStackError(accessPoint,
|
||||
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
|
||||
}
|
||||
|
||||
// Ignore the disable host prefix for access points
|
||||
req.Config.DisableEndpointHostPrefix = aws.Bool(false)
|
||||
|
||||
if err := outpostAccessPointEndpointBuilder(accessPoint).build(req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
removeBucketFromPath(req.HTTPRequest.URL)
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeBucketFromPath(u *url.URL) {
|
||||
u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
|
||||
if u.Path == "" {
|
||||
u.Path = "/"
|
||||
}
|
||||
}
|
||||
|
||||
type accessPointEndpointBuilder arn.AccessPointARN
|
||||
|
||||
const (
|
||||
accessPointPrefixLabel = "accesspoint"
|
||||
accountIDPrefixLabel = "accountID"
|
||||
accesPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}."
|
||||
)
|
||||
|
||||
func (a accessPointEndpointBuilder) Build(req *request.Request) error {
|
||||
resolveRegion := arn.AccessPointARN(a).Region
|
||||
cfgRegion := aws.StringValue(req.Config.Region)
|
||||
|
||||
if isFIPS(cfgRegion) {
|
||||
if aws.BoolValue(req.Config.S3UseARNRegion) && isCrossRegion(req, resolveRegion) {
|
||||
// FIPS with cross region is not supported, the SDK must fail
|
||||
// because there is no well defined method for SDK to construct a
|
||||
// correct FIPS endpoint.
|
||||
return newClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a),
|
||||
req.ClientInfo.PartitionID, cfgRegion, nil)
|
||||
}
|
||||
resolveRegion = cfgRegion
|
||||
}
|
||||
|
||||
endpoint, err := resolveRegionalEndpoint(req, resolveRegion)
|
||||
if err != nil {
|
||||
return newFailedToResolveEndpointError(arn.AccessPointARN(a),
|
||||
req.ClientInfo.PartitionID, cfgRegion, err)
|
||||
}
|
||||
|
||||
if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
const serviceEndpointLabel = "s3-accesspoint"
|
||||
|
||||
// dualstack provided by endpoint resolver
|
||||
cfgHost := req.HTTPRequest.URL.Host
|
||||
if strings.HasPrefix(cfgHost, "s3") {
|
||||
req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:]
|
||||
}
|
||||
|
||||
protocol.HostPrefixBuilder{
|
||||
Prefix: accesPointPrefixTemplate,
|
||||
LabelsFn: a.hostPrefixLabelValues,
|
||||
}.Build(req)
|
||||
|
||||
req.ClientInfo.SigningName = endpoint.SigningName
|
||||
req.ClientInfo.SigningRegion = endpoint.SigningRegion
|
||||
|
||||
err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host)
|
||||
if err != nil {
|
||||
return newInvalidARNError(arn.AccessPointARN(a), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string {
|
||||
return map[string]string{
|
||||
accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName,
|
||||
accountIDPrefixLabel: arn.AccessPointARN(a).AccountID,
|
||||
}
|
||||
}
|
||||
|
||||
func resolveRegionalEndpoint(r *request.Request, region string) (endpoints.ResolvedEndpoint, error) {
|
||||
return r.Config.EndpointResolver.EndpointFor(EndpointsID, region, func(opts *endpoints.Options) {
|
||||
opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL)
|
||||
opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack)
|
||||
opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint
|
||||
})
|
||||
}
|
||||
|
||||
func updateRequestEndpoint(r *request.Request, endpoint string) (err error) {
|
||||
endpoint = endpoints.AddScheme(endpoint, aws.BoolValue(r.Config.DisableSSL))
|
||||
|
||||
r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath)
|
||||
if err != nil {
|
||||
return awserr.New(request.ErrCodeSerialization,
|
||||
"failed to parse endpoint URL", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,187 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/s3shared"
|
||||
"github.com/aws/aws-sdk-go/internal/s3shared/arn"
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
accessPointPrefixLabel = "accesspoint"
|
||||
accountIDPrefixLabel = "accountID"
|
||||
accessPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}."
|
||||
|
||||
outpostPrefixLabel = "outpost"
|
||||
outpostAccessPointPrefixTemplate = accessPointPrefixTemplate + "{" + outpostPrefixLabel + "}."
|
||||
)
|
||||
|
||||
// hasCustomEndpoint returns true if endpoint is a custom endpoint
|
||||
func hasCustomEndpoint(r *request.Request) bool {
|
||||
return len(aws.StringValue(r.Config.Endpoint)) > 0
|
||||
}
|
||||
|
||||
// accessPointEndpointBuilder represents the endpoint builder for access point arn
|
||||
type accessPointEndpointBuilder arn.AccessPointARN
|
||||
|
||||
// build builds the endpoint for corresponding access point arn
|
||||
//
|
||||
// For building an endpoint from access point arn, format used is:
|
||||
// - Access point endpoint format : {accesspointName}-{accountId}.s3-accesspoint.{region}.{dnsSuffix}
|
||||
// - example : myaccesspoint-012345678901.s3-accesspoint.us-west-2.amazonaws.com
|
||||
//
|
||||
// Access Point Endpoint requests are signed using "s3" as signing name.
|
||||
//
|
||||
func (a accessPointEndpointBuilder) build(req *request.Request) error {
|
||||
resolveService := arn.AccessPointARN(a).Service
|
||||
resolveRegion := arn.AccessPointARN(a).Region
|
||||
cfgRegion := aws.StringValue(req.Config.Region)
|
||||
|
||||
if s3shared.IsFIPS(cfgRegion) {
|
||||
if aws.BoolValue(req.Config.S3UseARNRegion) && s3shared.IsCrossRegion(req, resolveRegion) {
|
||||
// FIPS with cross region is not supported, the SDK must fail
|
||||
// because there is no well defined method for SDK to construct a
|
||||
// correct FIPS endpoint.
|
||||
return s3shared.NewClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a),
|
||||
req.ClientInfo.PartitionID, cfgRegion, nil)
|
||||
}
|
||||
resolveRegion = cfgRegion
|
||||
}
|
||||
|
||||
endpoint, err := resolveRegionalEndpoint(req, resolveRegion, resolveService)
|
||||
if err != nil {
|
||||
return s3shared.NewFailedToResolveEndpointError(arn.AccessPointARN(a),
|
||||
req.ClientInfo.PartitionID, cfgRegion, err)
|
||||
}
|
||||
|
||||
endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL))
|
||||
|
||||
if !hasCustomEndpoint(req) {
|
||||
if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
|
||||
return err
|
||||
}
|
||||
const serviceEndpointLabel = "s3-accesspoint"
|
||||
|
||||
// dual stack provided by endpoint resolver
|
||||
cfgHost := req.HTTPRequest.URL.Host
|
||||
if strings.HasPrefix(cfgHost, "s3") {
|
||||
req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:]
|
||||
}
|
||||
}
|
||||
|
||||
protocol.HostPrefixBuilder{
|
||||
Prefix: accessPointPrefixTemplate,
|
||||
LabelsFn: a.hostPrefixLabelValues,
|
||||
}.Build(req)
|
||||
|
||||
// signer redirection
|
||||
redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion)
|
||||
|
||||
err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host)
|
||||
if err != nil {
|
||||
return s3shared.NewInvalidARNError(arn.AccessPointARN(a), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string {
|
||||
return map[string]string{
|
||||
accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName,
|
||||
accountIDPrefixLabel: arn.AccessPointARN(a).AccountID,
|
||||
}
|
||||
}
|
||||
|
||||
// outpostAccessPointEndpointBuilder represents the Endpoint builder for outpost access point arn.
|
||||
type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN
|
||||
|
||||
// build builds an endpoint corresponding to the outpost access point arn.
|
||||
//
|
||||
// For building an endpoint from outpost access point arn, format used is:
|
||||
// - Outpost access point endpoint format : {accesspointName}-{accountId}.{outpostId}.s3-outposts.{region}.{dnsSuffix}
|
||||
// - example : myaccesspoint-012345678901.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com
|
||||
//
|
||||
// Outpost AccessPoint Endpoint request are signed using "s3-outposts" as signing name.
|
||||
//
|
||||
func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error {
|
||||
resolveRegion := o.Region
|
||||
resolveService := o.Service
|
||||
|
||||
endpointsID := resolveService
|
||||
if resolveService == "s3-outposts" {
|
||||
endpointsID = "s3"
|
||||
}
|
||||
|
||||
endpoint, err := resolveRegionalEndpoint(req, resolveRegion, endpointsID)
|
||||
if err != nil {
|
||||
return s3shared.NewFailedToResolveEndpointError(o,
|
||||
req.ClientInfo.PartitionID, resolveRegion, err)
|
||||
}
|
||||
|
||||
endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL))
|
||||
|
||||
if !hasCustomEndpoint(req) {
|
||||
if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
|
||||
return err
|
||||
}
|
||||
// add url host as s3-outposts
|
||||
cfgHost := req.HTTPRequest.URL.Host
|
||||
if strings.HasPrefix(cfgHost, endpointsID) {
|
||||
req.HTTPRequest.URL.Host = resolveService + cfgHost[len(endpointsID):]
|
||||
}
|
||||
}
|
||||
|
||||
protocol.HostPrefixBuilder{
|
||||
Prefix: outpostAccessPointPrefixTemplate,
|
||||
LabelsFn: o.hostPrefixLabelValues,
|
||||
}.Build(req)
|
||||
|
||||
// set the signing region, name to resolved names from ARN
|
||||
redirectSigner(req, resolveService, resolveRegion)
|
||||
|
||||
err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host)
|
||||
if err != nil {
|
||||
return s3shared.NewInvalidARNError(o, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o outpostAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string {
|
||||
return map[string]string{
|
||||
accessPointPrefixLabel: o.AccessPointName,
|
||||
accountIDPrefixLabel: o.AccountID,
|
||||
outpostPrefixLabel: o.OutpostID,
|
||||
}
|
||||
}
|
||||
|
||||
func resolveRegionalEndpoint(r *request.Request, region string, endpointsID string) (endpoints.ResolvedEndpoint, error) {
|
||||
return r.Config.EndpointResolver.EndpointFor(endpointsID, region, func(opts *endpoints.Options) {
|
||||
opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL)
|
||||
opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack)
|
||||
opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint
|
||||
})
|
||||
}
|
||||
|
||||
func updateRequestEndpoint(r *request.Request, endpoint string) (err error) {
|
||||
|
||||
r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath)
|
||||
if err != nil {
|
||||
return awserr.New(request.ErrCodeSerialization,
|
||||
"failed to parse endpoint URL", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// redirectSigner sets signing name, signing region for a request
|
||||
func redirectSigner(req *request.Request, signingName string, signingRegion string) {
|
||||
req.ClientInfo.SigningName = signingName
|
||||
req.ClientInfo.SigningRegion = signingRegion
|
||||
}
|
|
@ -1,151 +0,0 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/s3/internal/arn"
|
||||
)
|
||||
|
||||
const (
|
||||
invalidARNErrorErrCode = "InvalidARNError"
|
||||
configurationErrorErrCode = "ConfigurationError"
|
||||
)
|
||||
|
||||
type invalidARNError struct {
|
||||
message string
|
||||
resource arn.Resource
|
||||
origErr error
|
||||
}
|
||||
|
||||
func (e invalidARNError) Error() string {
|
||||
var extra string
|
||||
if e.resource != nil {
|
||||
extra = "ARN: " + e.resource.String()
|
||||
}
|
||||
return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr)
|
||||
}
|
||||
|
||||
func (e invalidARNError) Code() string {
|
||||
return invalidARNErrorErrCode
|
||||
}
|
||||
|
||||
func (e invalidARNError) Message() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
func (e invalidARNError) OrigErr() error {
|
||||
return e.origErr
|
||||
}
|
||||
|
||||
func newInvalidARNError(resource arn.Resource, err error) invalidARNError {
|
||||
return invalidARNError{
|
||||
message: "invalid ARN",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
func newInvalidARNWithCustomEndpointError(resource arn.Resource, err error) invalidARNError {
|
||||
return invalidARNError{
|
||||
message: "resource ARN not supported with custom client endpoints",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
// ARN not supported for the target partition
|
||||
func newInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) invalidARNError {
|
||||
return invalidARNError{
|
||||
message: "resource ARN not supported for the target ARN partition",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
type configurationError struct {
|
||||
message string
|
||||
resource arn.Resource
|
||||
clientPartitionID string
|
||||
clientRegion string
|
||||
origErr error
|
||||
}
|
||||
|
||||
func (e configurationError) Error() string {
|
||||
extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s",
|
||||
e.resource, e.clientPartitionID, e.clientRegion)
|
||||
|
||||
return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr)
|
||||
}
|
||||
|
||||
func (e configurationError) Code() string {
|
||||
return configurationErrorErrCode
|
||||
}
|
||||
|
||||
func (e configurationError) Message() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
func (e configurationError) OrigErr() error {
|
||||
return e.origErr
|
||||
}
|
||||
|
||||
func newClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
|
||||
return configurationError{
|
||||
message: "client partition does not match provided ARN partition",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
||||
|
||||
func newClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
|
||||
return configurationError{
|
||||
message: "client region does not match provided ARN region",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
||||
|
||||
func newFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
|
||||
return configurationError{
|
||||
message: "endpoint resolver failed to find an endpoint for the provided ARN region",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
||||
|
||||
func newClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
|
||||
return configurationError{
|
||||
message: "client configured for fips but cross-region resource ARN provided",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
||||
|
||||
func newClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
|
||||
return configurationError{
|
||||
message: "client configured for S3 Accelerate but is supported with resource ARN",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
||||
|
||||
func newClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
|
||||
return configurationError{
|
||||
message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
|
@ -8,7 +8,7 @@ const (
|
|||
// "BucketAlreadyExists".
|
||||
//
|
||||
// The requested bucket name is not available. The bucket namespace is shared
|
||||
// by all users of the system. Please select a different name and try again.
|
||||
// by all users of the system. Select a different name and try again.
|
||||
ErrCodeBucketAlreadyExists = "BucketAlreadyExists"
|
||||
|
||||
// ErrCodeBucketAlreadyOwnedByYou for service response error code
|
||||
|
@ -21,6 +21,12 @@ const (
|
|||
// bucket access control lists (ACLs).
|
||||
ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
|
||||
|
||||
// ErrCodeInvalidObjectState for service response error code
|
||||
// "InvalidObjectState".
|
||||
//
|
||||
// Object is archived and inaccessible until restored.
|
||||
ErrCodeInvalidObjectState = "InvalidObjectState"
|
||||
|
||||
// ErrCodeNoSuchBucket for service response error code
|
||||
// "NoSuchBucket".
|
||||
//
|
||||
|
|
|
@ -48,6 +48,9 @@ const (
|
|||
// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
|
||||
c := p.ClientConfig(EndpointsID, cfgs...)
|
||||
if c.SigningNameDerived || len(c.SigningName) == 0 {
|
||||
c.SigningName = "s3"
|
||||
}
|
||||
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,44 @@
|
|||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
// Package sso provides the client and types for making API
|
||||
// requests to AWS Single Sign-On.
|
||||
//
|
||||
// AWS Single Sign-On Portal is a web service that makes it easy for you to
|
||||
// assign user access to AWS SSO resources such as the user portal. Users can
|
||||
// get AWS account applications and roles assigned to them and get federated
|
||||
// into the application.
|
||||
//
|
||||
// For general information about AWS SSO, see What is AWS Single Sign-On? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html)
|
||||
// in the AWS SSO User Guide.
|
||||
//
|
||||
// This API reference guide describes the AWS SSO Portal operations that you
|
||||
// can call programatically and includes detailed information on data types
|
||||
// and errors.
|
||||
//
|
||||
// AWS provides SDKs that consist of libraries and sample code for various programming
|
||||
// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs
|
||||
// provide a convenient way to create programmatic access to AWS SSO and other
|
||||
// AWS services. For more information about the AWS SDKs, including how to download
|
||||
// and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/).
|
||||
//
|
||||
// See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service.
|
||||
//
|
||||
// See sso package documentation for more information.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/
|
||||
//
|
||||
// Using the Client
|
||||
//
|
||||
// To contact AWS Single Sign-On with the SDK use the New function to create
|
||||
// a new service client. With that client you can make API requests to the service.
|
||||
// These clients are safe to use concurrently.
|
||||
//
|
||||
// See the SDK's documentation for more information on how to use the SDK.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/
|
||||
//
|
||||
// See aws.Config documentation for more information on configuring SDK clients.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
|
||||
//
|
||||
// See the AWS Single Sign-On client SSO for more
|
||||
// information on creating client for this service.
|
||||
// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/#New
|
||||
package sso
|
|
@ -0,0 +1,44 @@
|
|||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package sso
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
// ErrCodeInvalidRequestException for service response error code
|
||||
// "InvalidRequestException".
|
||||
//
|
||||
// Indicates that a problem occurred with the input to the request. For example,
|
||||
// a required parameter might be missing or out of range.
|
||||
ErrCodeInvalidRequestException = "InvalidRequestException"
|
||||
|
||||
// ErrCodeResourceNotFoundException for service response error code
|
||||
// "ResourceNotFoundException".
|
||||
//
|
||||
// The specified resource doesn't exist.
|
||||
ErrCodeResourceNotFoundException = "ResourceNotFoundException"
|
||||
|
||||
// ErrCodeTooManyRequestsException for service response error code
|
||||
// "TooManyRequestsException".
|
||||
//
|
||||
// Indicates that the request is being made too frequently and is more than
|
||||
// what the server can handle.
|
||||
ErrCodeTooManyRequestsException = "TooManyRequestsException"
|
||||
|
||||
// ErrCodeUnauthorizedException for service response error code
|
||||
// "UnauthorizedException".
|
||||
//
|
||||
// Indicates that the request is not authorized. This can happen due to an invalid
|
||||
// access token in the request.
|
||||
ErrCodeUnauthorizedException = "UnauthorizedException"
|
||||
)
|
||||
|
||||
var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
|
||||
"InvalidRequestException": newErrorInvalidRequestException,
|
||||
"ResourceNotFoundException": newErrorResourceNotFoundException,
|
||||
"TooManyRequestsException": newErrorTooManyRequestsException,
|
||||
"UnauthorizedException": newErrorUnauthorizedException,
|
||||
}
|
|
@ -0,0 +1,104 @@
|
|||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
package sso
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/signer/v4"
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/restjson"
|
||||
)
|
||||
|
||||
// SSO provides the API operation methods for making requests to
|
||||
// AWS Single Sign-On. See this package's package overview docs
|
||||
// for details on the service.
|
||||
//
|
||||
// SSO methods are safe to use concurrently. It is not safe to
|
||||
// modify mutate any of the struct's properties though.
|
||||
type SSO struct {
|
||||
*client.Client
|
||||
}
|
||||
|
||||
// Used for custom client initialization logic
|
||||
var initClient func(*client.Client)
|
||||
|
||||
// Used for custom request initialization logic
|
||||
var initRequest func(*request.Request)
|
||||
|
||||
// Service information constants
|
||||
const (
|
||||
ServiceName = "SSO" // Name of service.
|
||||
EndpointsID = "portal.sso" // ID to lookup a service endpoint with.
|
||||
ServiceID = "SSO" // ServiceID is a unique identifier of a specific service.
|
||||
)
|
||||
|
||||
// New creates a new instance of the SSO client with a session.
|
||||
// If additional configuration is needed for the client instance use the optional
|
||||
// aws.Config parameter to add your extra config.
|
||||
//
|
||||
// Example:
|
||||
// mySession := session.Must(session.NewSession())
|
||||
//
|
||||
// // Create a SSO client from just a session.
|
||||
// svc := sso.New(mySession)
|
||||
//
|
||||
// // Create a SSO client with additional configuration
|
||||
// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO {
|
||||
c := p.ClientConfig(EndpointsID, cfgs...)
|
||||
if c.SigningNameDerived || len(c.SigningName) == 0 {
|
||||
c.SigningName = "awsssoportal"
|
||||
}
|
||||
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
|
||||
}
|
||||
|
||||
// newClient creates, initializes and returns a new service client instance.
|
||||
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SSO {
|
||||
svc := &SSO{
|
||||
Client: client.New(
|
||||
cfg,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: ServiceName,
|
||||
ServiceID: ServiceID,
|
||||
SigningName: signingName,
|
||||
SigningRegion: signingRegion,
|
||||
PartitionID: partitionID,
|
||||
Endpoint: endpoint,
|
||||
APIVersion: "2019-06-10",
|
||||
},
|
||||
handlers,
|
||||
),
|
||||
}
|
||||
|
||||
// Handlers
|
||||
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
|
||||
svc.Handlers.Build.PushBackNamed(restjson.BuildHandler)
|
||||
svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler)
|
||||
svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler)
|
||||
svc.Handlers.UnmarshalError.PushBackNamed(
|
||||
protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(),
|
||||
)
|
||||
|
||||
// Run custom client initialization if present
|
||||
if initClient != nil {
|
||||
initClient(svc.Client)
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
// newRequest creates a new request for a SSO operation and runs any
|
||||
// custom request initialization.
|
||||
func (c *SSO) newRequest(op *request.Operation, params, data interface{}) *request.Request {
|
||||
req := c.NewRequest(op, params, data)
|
||||
|
||||
// Run custom request initialization if present
|
||||
if initRequest != nil {
|
||||
initRequest(req)
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
86
vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go
generated
vendored
Normal file
86
vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
||||
|
||||
// Package ssoiface provides an interface to enable mocking the AWS Single Sign-On service client
|
||||
// for testing your code.
|
||||
//
|
||||
// It is important to note that this interface will have breaking changes
|
||||
// when the service model is updated and adds new API operations, paginators,
|
||||
// and waiters.
|
||||
package ssoiface
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/sso"
|
||||
)
|
||||
|
||||
// SSOAPI provides an interface to enable mocking the
|
||||
// sso.SSO service client's API operation,
|
||||
// paginators, and waiters. This make unit testing your code that calls out
|
||||
// to the SDK's service client's calls easier.
|
||||
//
|
||||
// The best way to use this interface is so the SDK's service client's calls
|
||||
// can be stubbed out for unit testing your code with the SDK without needing
|
||||
// to inject custom request handlers into the SDK's request pipeline.
|
||||
//
|
||||
// // myFunc uses an SDK service client to make a request to
|
||||
// // AWS Single Sign-On.
|
||||
// func myFunc(svc ssoiface.SSOAPI) bool {
|
||||
// // Make svc.GetRoleCredentials request
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// sess := session.New()
|
||||
// svc := sso.New(sess)
|
||||
//
|
||||
// myFunc(svc)
|
||||
// }
|
||||
//
|
||||
// In your _test.go file:
|
||||
//
|
||||
// // Define a mock struct to be used in your unit tests of myFunc.
|
||||
// type mockSSOClient struct {
|
||||
// ssoiface.SSOAPI
|
||||
// }
|
||||
// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) {
|
||||
// // mock response/functionality
|
||||
// }
|
||||
//
|
||||
// func TestMyFunc(t *testing.T) {
|
||||
// // Setup Test
|
||||
// mockSvc := &mockSSOClient{}
|
||||
//
|
||||
// myfunc(mockSvc)
|
||||
//
|
||||
// // Verify myFunc's functionality
|
||||
// }
|
||||
//
|
||||
// It is important to note that this interface will have breaking changes
|
||||
// when the service model is updated and adds new API operations, paginators,
|
||||
// and waiters. Its suggested to use the pattern above for testing, or using
|
||||
// tooling to generate mocks to satisfy the interfaces.
|
||||
type SSOAPI interface {
|
||||
GetRoleCredentials(*sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error)
|
||||
GetRoleCredentialsWithContext(aws.Context, *sso.GetRoleCredentialsInput, ...request.Option) (*sso.GetRoleCredentialsOutput, error)
|
||||
GetRoleCredentialsRequest(*sso.GetRoleCredentialsInput) (*request.Request, *sso.GetRoleCredentialsOutput)
|
||||
|
||||
ListAccountRoles(*sso.ListAccountRolesInput) (*sso.ListAccountRolesOutput, error)
|
||||
ListAccountRolesWithContext(aws.Context, *sso.ListAccountRolesInput, ...request.Option) (*sso.ListAccountRolesOutput, error)
|
||||
ListAccountRolesRequest(*sso.ListAccountRolesInput) (*request.Request, *sso.ListAccountRolesOutput)
|
||||
|
||||
ListAccountRolesPages(*sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool) error
|
||||
ListAccountRolesPagesWithContext(aws.Context, *sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool, ...request.Option) error
|
||||
|
||||
ListAccounts(*sso.ListAccountsInput) (*sso.ListAccountsOutput, error)
|
||||
ListAccountsWithContext(aws.Context, *sso.ListAccountsInput, ...request.Option) (*sso.ListAccountsOutput, error)
|
||||
ListAccountsRequest(*sso.ListAccountsInput) (*request.Request, *sso.ListAccountsOutput)
|
||||
|
||||
ListAccountsPages(*sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool) error
|
||||
ListAccountsPagesWithContext(aws.Context, *sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool, ...request.Option) error
|
||||
|
||||
Logout(*sso.LogoutInput) (*sso.LogoutOutput, error)
|
||||
LogoutWithContext(aws.Context, *sso.LogoutInput, ...request.Option) (*sso.LogoutOutput, error)
|
||||
LogoutRequest(*sso.LogoutInput) (*request.Request, *sso.LogoutOutput)
|
||||
}
|
||||
|
||||
var _ SSOAPI = (*sso.SSO)(nil)
|
|
@ -207,6 +207,10 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
|
|||
// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// * ErrCodeExpiredTokenException "ExpiredTokenException"
|
||||
// The web identity token that was passed is expired or is not valid. Get a
|
||||
// new identity token from the identity provider and then retry the request.
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
|
||||
func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
|
||||
req, out := c.AssumeRoleRequest(input)
|
||||
|
@ -626,7 +630,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
|||
// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
|
||||
// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
|
||||
//
|
||||
// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
|
||||
// * Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/).
|
||||
// Walk through the process of authenticating through Login with Amazon,
|
||||
// Facebook, or Google, getting temporary security credentials, and then
|
||||
// using those credentials to make a request to AWS.
|
||||
|
@ -1788,7 +1792,7 @@ type AssumeRoleWithSAMLInput struct {
|
|||
// in the IAM User Guide.
|
||||
//
|
||||
// SAMLAssertion is a required field
|
||||
SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"`
|
||||
SAMLAssertion *string `min:"4" type:"string" required:"true"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
|
@ -2100,7 +2104,7 @@ type AssumeRoleWithWebIdentityInput struct {
|
|||
// the application makes an AssumeRoleWithWebIdentity call.
|
||||
//
|
||||
// WebIdentityToken is a required field
|
||||
WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"`
|
||||
WebIdentityToken *string `min:"4" type:"string" required:"true"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
|
|
|
@ -3,87 +3,11 @@
|
|||
// Package sts provides the client and types for making API
|
||||
// requests to AWS Security Token Service.
|
||||
//
|
||||
// The AWS Security Token Service (STS) is a web service that enables you to
|
||||
// request temporary, limited-privilege credentials for AWS Identity and Access
|
||||
// Management (IAM) users or for users that you authenticate (federated users).
|
||||
// This guide provides descriptions of the STS API. For more detailed information
|
||||
// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
|
||||
//
|
||||
// For information about setting up signatures and authorization through the
|
||||
// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
|
||||
// in the AWS General Reference. For general information about the Query API,
|
||||
// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
|
||||
// in Using IAM. For information about using security tokens with other AWS
|
||||
// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// If you're new to AWS and need additional technical information about a specific
|
||||
// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
|
||||
// (http://aws.amazon.com/documentation/).
|
||||
//
|
||||
// Endpoints
|
||||
//
|
||||
// By default, AWS Security Token Service (STS) is available as a global service,
|
||||
// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com.
|
||||
// Global requests map to the US East (N. Virginia) region. AWS recommends using
|
||||
// Regional AWS STS endpoints instead of the global endpoint to reduce latency,
|
||||
// build in redundancy, and increase session token validity. For more information,
|
||||
// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Most AWS Regions are enabled for operations in all AWS services by default.
|
||||
// Those Regions are automatically activated for use with AWS STS. Some Regions,
|
||||
// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more
|
||||
// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html)
|
||||
// in the AWS General Reference. When you enable these AWS Regions, they are
|
||||
// automatically activated for use with AWS STS. You cannot activate the STS
|
||||
// endpoint for a Region that is disabled. Tokens that are valid in all AWS
|
||||
// Regions are longer than tokens that are valid in Regions that are enabled
|
||||
// by default. Changing this setting might affect existing systems where you
|
||||
// temporarily store tokens. For more information, see Managing Global Endpoint
|
||||
// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// After you activate a Region for use with AWS STS, you can direct AWS STS
|
||||
// API calls to that Region. AWS STS recommends that you provide both the Region
|
||||
// and endpoint when you make calls to a Regional endpoint. You can provide
|
||||
// the Region alone for manually enabled Regions, such as Asia Pacific (Hong
|
||||
// Kong). In this case, the calls are directed to the STS Regional endpoint.
|
||||
// However, if you provide the Region alone for Regions enabled by default,
|
||||
// the calls are directed to the global endpoint of https://sts.amazonaws.com.
|
||||
//
|
||||
// To view the list of AWS STS endpoints and whether they are active by default,
|
||||
// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Recording API requests
|
||||
//
|
||||
// STS supports AWS CloudTrail, which is a service that records AWS calls for
|
||||
// your AWS account and delivers log files to an Amazon S3 bucket. By using
|
||||
// information collected by CloudTrail, you can determine what requests were
|
||||
// successfully made to STS, who made the request, when it was made, and so
|
||||
// on.
|
||||
//
|
||||
// If you activate AWS STS endpoints in Regions other than the default global
|
||||
// endpoint, then you must also turn on CloudTrail logging in those Regions.
|
||||
// This is necessary to record any AWS STS API calls that are made in those
|
||||
// Regions. For more information, see Turning On CloudTrail in Additional Regions
|
||||
// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html)
|
||||
// in the AWS CloudTrail User Guide.
|
||||
//
|
||||
// AWS Security Token Service (STS) is a global service with a single endpoint
|
||||
// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls
|
||||
// to a global service. However, because this endpoint is physically located
|
||||
// in the US East (N. Virginia) Region, your logs list us-east-1 as the event
|
||||
// Region. CloudTrail does not write these logs to the US East (Ohio) Region
|
||||
// unless you choose to include global service logs in that Region. CloudTrail
|
||||
// writes calls to all Regional endpoints to their respective Regions. For example,
|
||||
// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio)
|
||||
// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU
|
||||
// (Frankfurt) Region.
|
||||
//
|
||||
// To learn more about CloudTrail, including how to turn it on and find your
|
||||
// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
|
||||
// AWS Security Token Service (STS) enables you to request temporary, limited-privilege
|
||||
// credentials for AWS Identity and Access Management (IAM) users or for users
|
||||
// that you authenticate (federated users). This guide provides descriptions
|
||||
// of the STS API. For more information about using this service, see Temporary
|
||||
// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
|
||||
//
|
||||
// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
|
||||
//
|
||||
|
|
|
@ -51,43 +51,43 @@ var (
|
|||
|
||||
// IsInvalidArgument returns true if the error is due to an invalid argument
|
||||
func IsInvalidArgument(err error) bool {
|
||||
return errors.Cause(err) == ErrInvalidArgument
|
||||
return errors.Is(err, ErrInvalidArgument)
|
||||
}
|
||||
|
||||
// IsNotFound returns true if the error is due to a missing object
|
||||
func IsNotFound(err error) bool {
|
||||
return errors.Cause(err) == ErrNotFound
|
||||
return errors.Is(err, ErrNotFound)
|
||||
}
|
||||
|
||||
// IsAlreadyExists returns true if the error is due to an already existing
|
||||
// metadata item
|
||||
func IsAlreadyExists(err error) bool {
|
||||
return errors.Cause(err) == ErrAlreadyExists
|
||||
return errors.Is(err, ErrAlreadyExists)
|
||||
}
|
||||
|
||||
// IsFailedPrecondition returns true if an operation could not proceed to the
|
||||
// lack of a particular condition
|
||||
func IsFailedPrecondition(err error) bool {
|
||||
return errors.Cause(err) == ErrFailedPrecondition
|
||||
return errors.Is(err, ErrFailedPrecondition)
|
||||
}
|
||||
|
||||
// IsUnavailable returns true if the error is due to a resource being unavailable
|
||||
func IsUnavailable(err error) bool {
|
||||
return errors.Cause(err) == ErrUnavailable
|
||||
return errors.Is(err, ErrUnavailable)
|
||||
}
|
||||
|
||||
// IsNotImplemented returns true if the error is due to not being implemented
|
||||
func IsNotImplemented(err error) bool {
|
||||
return errors.Cause(err) == ErrNotImplemented
|
||||
return errors.Is(err, ErrNotImplemented)
|
||||
}
|
||||
|
||||
// IsCanceled returns true if the error is due to `context.Canceled`.
|
||||
func IsCanceled(err error) bool {
|
||||
return errors.Cause(err) == context.Canceled
|
||||
return errors.Is(err, context.Canceled)
|
||||
}
|
||||
|
||||
// IsDeadlineExceeded returns true if the error is due to
|
||||
// `context.DeadlineExceeded`.
|
||||
func IsDeadlineExceeded(err error) bool {
|
||||
return errors.Cause(err) == context.DeadlineExceeded
|
||||
return errors.Is(err, context.DeadlineExceeded)
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
// given config.
|
||||
func GenerateClients(nodes int, config *Config) ([]*Client, error) {
|
||||
clients := make([]*Client, nodes)
|
||||
for i, _ := range clients {
|
||||
for i := range clients {
|
||||
client, err := NewClient(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -146,7 +146,7 @@ func Simulate(clients []*Client, truth [][]time.Duration, cycles int) {
|
|||
|
||||
nodes := len(clients)
|
||||
for cycle := 0; cycle < cycles; cycle++ {
|
||||
for i, _ := range clients {
|
||||
for i := range clients {
|
||||
if j := rand.Intn(nodes); j != i {
|
||||
c := clients[j].GetCoordinate()
|
||||
rtt := truth[i][j]
|
||||
|
|
9
vendor/github.com/hashicorp/vault-plugin-secrets-azure/path_service_principal.go
generated
vendored
9
vendor/github.com/hashicorp/vault-plugin-secrets-azure/path_service_principal.go
generated
vendored
|
@ -46,9 +46,14 @@ func pathServicePrincipal(b *azureSecretBackend) *framework.Path {
|
|||
Description: "Name of the Vault role",
|
||||
},
|
||||
},
|
||||
Callbacks: map[logical.Operation]framework.OperationFunc{
|
||||
logical.ReadOperation: b.pathSPRead,
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.ReadOperation: &framework.PathOperation{
|
||||
Callback: b.pathSPRead,
|
||||
ForwardPerformanceSecondary: true,
|
||||
ForwardPerformanceStandby: true,
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathServicePrincipalHelpSyn,
|
||||
HelpDescription: pathServicePrincipalHelpDesc,
|
||||
}
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- "1.12.x"
|
||||
- "1.13.x"
|
||||
- tip
|
||||
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
|
||||
script:
|
||||
- go test -race -v -bench=. -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
|
@ -26,8 +26,8 @@ avoiding breaking changes wherever reasonable. We support the last two versions
|
|||
A not-so-up-to-date-list-that-may-be-actually-current:
|
||||
|
||||
* https://github.com/coredns/coredns
|
||||
* https://cloudflare.com
|
||||
* https://github.com/abh/geodns
|
||||
* https://github.com/baidu/bfe
|
||||
* http://www.statdns.com/
|
||||
* http://www.dnsinspect.com/
|
||||
* https://github.com/chuangbo/jianbing-dictionary-dns
|
||||
|
@ -41,11 +41,9 @@ A not-so-up-to-date-list-that-may-be-actually-current:
|
|||
* https://github.com/StalkR/dns-reverse-proxy
|
||||
* https://github.com/tianon/rawdns
|
||||
* https://mesosphere.github.io/mesos-dns/
|
||||
* https://pulse.turbobytes.com/
|
||||
* https://github.com/fcambus/statzone
|
||||
* https://github.com/benschw/dns-clb-go
|
||||
* https://github.com/corny/dnscheck for <http://public-dns.info/>
|
||||
* https://namesmith.io
|
||||
* https://github.com/miekg/unbound
|
||||
* https://github.com/miekg/exdns
|
||||
* https://dnslookup.org
|
||||
|
@ -54,22 +52,28 @@ A not-so-up-to-date-list-that-may-be-actually-current:
|
|||
* https://github.com/mehrdadrad/mylg
|
||||
* https://github.com/bamarni/dockness
|
||||
* https://github.com/fffaraz/microdns
|
||||
* http://kelda.io
|
||||
* https://github.com/ipdcode/hades <https://jd.com>
|
||||
* https://github.com/StackExchange/dnscontrol/
|
||||
* https://www.dnsperf.com/
|
||||
* https://dnssectest.net/
|
||||
* https://dns.apebits.com
|
||||
* https://github.com/oif/apex
|
||||
* https://github.com/jedisct1/dnscrypt-proxy
|
||||
* https://github.com/jedisct1/rpdns
|
||||
* https://github.com/xor-gate/sshfp
|
||||
* https://github.com/rs/dnstrace
|
||||
* https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss))
|
||||
* https://github.com/semihalev/sdns
|
||||
* https://render.com
|
||||
* https://github.com/peterzen/goresolver
|
||||
* https://github.com/folbricht/routedns
|
||||
* https://domainr.com/
|
||||
* https://zonedb.org/
|
||||
* https://router7.org/
|
||||
* https://github.com/fortio/dnsping
|
||||
* https://github.com/Luzilla/dnsbl_exporter
|
||||
* https://github.com/bodgit/tsig
|
||||
* https://github.com/v2fly/v2ray-core (test only)
|
||||
* https://kuma.io/
|
||||
|
||||
|
||||
Send pull request if you want to be listed here.
|
||||
|
||||
|
@ -127,6 +131,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
|
|||
* 2915 - NAPTR record
|
||||
* 2929 - DNS IANA Considerations
|
||||
* 3110 - RSASHA1 DNS keys
|
||||
* 3123 - APL record
|
||||
* 3225 - DO bit (DNSSEC OK)
|
||||
* 340{1,2,3} - NAPTR record
|
||||
* 3445 - Limiting the scope of (DNS)KEY
|
||||
|
@ -165,6 +170,8 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
|
|||
* 7873 - Domain Name System (DNS) Cookies
|
||||
* 8080 - EdDSA for DNSSEC
|
||||
* 8499 - DNS Terminology
|
||||
* 8659 - DNS Certification Authority Authorization (CAA) Resource Record
|
||||
* 8976 - Message Digest for DNS Zones (ZONEMD RR)
|
||||
|
||||
## Loosely Based Upon
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ type Conn struct {
|
|||
net.Conn // a net.Conn holding the connection
|
||||
UDPSize uint16 // minimum receive buffer for UDP messages
|
||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||
TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations.
|
||||
tsigRequestMAC string
|
||||
}
|
||||
|
||||
|
@ -34,12 +35,13 @@ type Client struct {
|
|||
Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more
|
||||
// Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout,
|
||||
// WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and
|
||||
// Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext)
|
||||
// Client.Dialer) or context.Context.Deadline (see ExchangeContext)
|
||||
Timeout time.Duration
|
||||
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
|
||||
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||
TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations.
|
||||
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
|
||||
group singleflight
|
||||
}
|
||||
|
@ -106,7 +108,7 @@ func (c *Client) Dial(address string) (conn *Conn, err error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conn.UDPSize = c.UDPSize
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
|
@ -125,14 +127,36 @@ func (c *Client) Dial(address string) (conn *Conn, err error) {
|
|||
// To specify a local address or a timeout, the caller has to set the `Client.Dialer`
|
||||
// attribute appropriately
|
||||
func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
|
||||
co, err := c.Dial(address)
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer co.Close()
|
||||
return c.ExchangeWithConn(m, co)
|
||||
}
|
||||
|
||||
// ExchangeWithConn has the same behavior as Exchange, just with a predetermined connection
|
||||
// that will be used instead of creating a new one.
|
||||
// Usage pattern with a *dns.Client:
|
||||
// c := new(dns.Client)
|
||||
// // connection management logic goes here
|
||||
//
|
||||
// conn := c.Dial(address)
|
||||
// in, rtt, err := c.ExchangeWithConn(message, conn)
|
||||
//
|
||||
// This allows users of the library to implement their own connection management,
|
||||
// as opposed to Exchange, which will always use new connections and incur the added overhead
|
||||
// that entails when using "tcp" and especially "tcp-tls" clients.
|
||||
func (c *Client) ExchangeWithConn(m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) {
|
||||
if !c.SingleInflight {
|
||||
return c.exchange(m, address)
|
||||
return c.exchange(m, conn)
|
||||
}
|
||||
|
||||
q := m.Question[0]
|
||||
key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass)
|
||||
r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) {
|
||||
return c.exchange(m, address)
|
||||
return c.exchange(m, conn)
|
||||
})
|
||||
if r != nil && shared {
|
||||
r = r.Copy()
|
||||
|
@ -141,15 +165,7 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er
|
|||
return r, rtt, err
|
||||
}
|
||||
|
||||
func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||
var co *Conn
|
||||
|
||||
co, err = c.Dial(a)
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer co.Close()
|
||||
func (c *Client) exchange(m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) {
|
||||
|
||||
opt := m.IsEdns0()
|
||||
// If EDNS0 is used use that for size.
|
||||
|
@ -161,7 +177,7 @@ func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
|
|||
co.UDPSize = c.UDPSize
|
||||
}
|
||||
|
||||
co.TsigSecret = c.TsigSecret
|
||||
co.TsigSecret, co.TsigProvider = c.TsigSecret, c.TsigProvider
|
||||
t := time.Now()
|
||||
// write with the appropriate write timeout
|
||||
co.SetWriteDeadline(t.Add(c.getTimeoutForRequest(c.writeTimeout())))
|
||||
|
@ -170,10 +186,21 @@ func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
|
|||
}
|
||||
|
||||
co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout())))
|
||||
if _, ok := co.Conn.(net.PacketConn); ok {
|
||||
for {
|
||||
r, err = co.ReadMsg()
|
||||
// Ignore replies with mismatched IDs because they might be
|
||||
// responses to earlier queries that timed out.
|
||||
if err != nil || r.Id == m.Id {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
r, err = co.ReadMsg()
|
||||
if err == nil && r.Id != m.Id {
|
||||
err = ErrId
|
||||
}
|
||||
}
|
||||
rtt = time.Since(t)
|
||||
return r, rtt, err
|
||||
}
|
||||
|
@ -197,12 +224,16 @@ func (co *Conn) ReadMsg() (*Msg, error) {
|
|||
return m, err
|
||||
}
|
||||
if t := m.IsTsig(); t != nil {
|
||||
if co.TsigProvider != nil {
|
||||
err = tsigVerifyProvider(p, co.TsigProvider, co.tsigRequestMAC, false)
|
||||
} else {
|
||||
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
|
||||
return m, ErrSecret
|
||||
}
|
||||
// Need to work on the original message p, as that was used to calculate the tsig.
|
||||
err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
|
||||
}
|
||||
}
|
||||
return m, err
|
||||
}
|
||||
|
||||
|
@ -279,10 +310,14 @@ func (co *Conn) WriteMsg(m *Msg) (err error) {
|
|||
var out []byte
|
||||
if t := m.IsTsig(); t != nil {
|
||||
mac := ""
|
||||
if co.TsigProvider != nil {
|
||||
out, mac, err = tsigGenerateProvider(m, co.TsigProvider, co.tsigRequestMAC, false)
|
||||
} else {
|
||||
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
|
||||
return ErrSecret
|
||||
}
|
||||
out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
|
||||
}
|
||||
// Set for the next read, although only used in zone transfers
|
||||
co.tsigRequestMAC = mac
|
||||
} else {
|
||||
|
@ -305,11 +340,10 @@ func (co *Conn) Write(p []byte) (int, error) {
|
|||
return co.Conn.Write(p)
|
||||
}
|
||||
|
||||
l := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(l, uint16(len(p)))
|
||||
|
||||
n, err := (&net.Buffers{l, p}).WriteTo(co.Conn)
|
||||
return int(n), err
|
||||
msg := make([]byte, 2+len(p))
|
||||
binary.BigEndian.PutUint16(msg, uint16(len(p)))
|
||||
copy(msg[2:], p)
|
||||
return co.Conn.Write(msg)
|
||||
}
|
||||
|
||||
// Return the appropriate timeout for a specific request
|
||||
|
|
|
@ -105,7 +105,7 @@ func (dns *Msg) SetAxfr(z string) *Msg {
|
|||
|
||||
// SetTsig appends a TSIG RR to the message.
|
||||
// This is only a skeleton TSIG RR that is added as the last RR in the
|
||||
// additional section. The Tsig is calculated when the message is being send.
|
||||
// additional section. The TSIG is calculated when the message is being send.
|
||||
func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg {
|
||||
t := new(TSIG)
|
||||
t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0}
|
||||
|
@ -317,6 +317,12 @@ func Fqdn(s string) string {
|
|||
return s + "."
|
||||
}
|
||||
|
||||
// CanonicalName returns the domain name in canonical form. A name in canonical
|
||||
// form is lowercase and fully qualified. See Section 6.2 in RFC 4034.
|
||||
func CanonicalName(s string) string {
|
||||
return strings.ToLower(Fqdn(s))
|
||||
}
|
||||
|
||||
// Copied from the official Go code.
|
||||
|
||||
// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP
|
||||
|
@ -364,7 +370,7 @@ func (t Type) String() string {
|
|||
// String returns the string representation for the class c.
|
||||
func (c Class) String() string {
|
||||
if s, ok := ClassToString[uint16(c)]; ok {
|
||||
// Only emit mnemonics when they are unambiguous, specically ANY is in both.
|
||||
// Only emit mnemonics when they are unambiguous, specially ANY is in both.
|
||||
if _, ok := StringToType[s]; !ok {
|
||||
return s
|
||||
}
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package dns
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"encoding/hex"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
|
||||
|
@ -111,7 +114,7 @@ func (h *RR_Header) parse(c *zlexer, origin string) *ParseError {
|
|||
|
||||
// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
|
||||
func (rr *RFC3597) ToRFC3597(r RR) error {
|
||||
buf := make([]byte, Len(r)*2)
|
||||
buf := make([]byte, Len(r))
|
||||
headerEnd, off, err := packRR(r, buf, 0, compressionMap{}, false)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -126,9 +129,30 @@ func (rr *RFC3597) ToRFC3597(r RR) error {
|
|||
}
|
||||
|
||||
_, err = rr.unpack(buf, headerEnd)
|
||||
return err
|
||||
}
|
||||
|
||||
// fromRFC3597 converts an unknown RR representation from RFC 3597 to the known RR type.
|
||||
func (rr *RFC3597) fromRFC3597(r RR) error {
|
||||
hdr := r.Header()
|
||||
*hdr = rr.Hdr
|
||||
|
||||
// Can't overflow uint16 as the length of Rdata is validated in (*RFC3597).parse.
|
||||
// We can only get here when rr was constructed with that method.
|
||||
hdr.Rdlength = uint16(hex.DecodedLen(len(rr.Rdata)))
|
||||
|
||||
if noRdata(*hdr) {
|
||||
// Dynamic update.
|
||||
return nil
|
||||
}
|
||||
|
||||
// rr.pack requires an extra allocation and a copy so we just decode Rdata
|
||||
// manually, it's simpler anyway.
|
||||
msg, err := hex.DecodeString(rr.Rdata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
_, err = r.unpack(msg, 0)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -3,10 +3,8 @@ package dns
|
|||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
_ "crypto/md5"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
_ "crypto/sha1"
|
||||
|
@ -200,7 +198,7 @@ func (k *DNSKEY) ToDS(h uint8) *DS {
|
|||
wire = wire[:n]
|
||||
|
||||
owner := make([]byte, 255)
|
||||
off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false)
|
||||
off, err1 := PackDomainName(CanonicalName(k.Hdr.Name), owner, 0, nil, false)
|
||||
if err1 != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -285,7 +283,7 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
|
|||
sigwire.Inception = rr.Inception
|
||||
sigwire.KeyTag = rr.KeyTag
|
||||
// For signing, lowercase this name
|
||||
sigwire.SignerName = strings.ToLower(rr.SignerName)
|
||||
sigwire.SignerName = CanonicalName(rr.SignerName)
|
||||
|
||||
// Create the desired binary blob
|
||||
signdata := make([]byte, DefaultMsgSize)
|
||||
|
@ -318,6 +316,7 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
|
|||
}
|
||||
|
||||
rr.Signature = toBase64(signature)
|
||||
return nil
|
||||
case RSAMD5, DSA, DSANSEC3SHA1:
|
||||
// See RFC 6944.
|
||||
return ErrAlg
|
||||
|
@ -332,10 +331,9 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
|
|||
}
|
||||
|
||||
rr.Signature = toBase64(signature)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) {
|
||||
signature, err := k.Sign(rand.Reader, hashed, hash)
|
||||
|
@ -346,7 +344,6 @@ func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte,
|
|||
switch alg {
|
||||
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512:
|
||||
return signature, nil
|
||||
|
||||
case ECDSAP256SHA256, ECDSAP384SHA384:
|
||||
ecdsaSignature := &struct {
|
||||
R, S *big.Int
|
||||
|
@ -366,21 +363,12 @@ func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte,
|
|||
signature := intToBytes(ecdsaSignature.R, intlen)
|
||||
signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...)
|
||||
return signature, nil
|
||||
|
||||
// There is no defined interface for what a DSA backed crypto.Signer returns
|
||||
case DSA, DSANSEC3SHA1:
|
||||
// t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8)
|
||||
// signature := []byte{byte(t)}
|
||||
// signature = append(signature, intToBytes(r1, 20)...)
|
||||
// signature = append(signature, intToBytes(s1, 20)...)
|
||||
// rr.Signature = signature
|
||||
|
||||
case ED25519:
|
||||
return signature, nil
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, ErrAlg
|
||||
}
|
||||
}
|
||||
|
||||
// Verify validates an RRSet with the signature and key. This is only the
|
||||
// cryptographic test, the signature validity period must be checked separately.
|
||||
|
@ -423,7 +411,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
|
|||
sigwire.Expiration = rr.Expiration
|
||||
sigwire.Inception = rr.Inception
|
||||
sigwire.KeyTag = rr.KeyTag
|
||||
sigwire.SignerName = strings.ToLower(rr.SignerName)
|
||||
sigwire.SignerName = CanonicalName(rr.SignerName)
|
||||
// Create the desired binary blob
|
||||
signeddata := make([]byte, DefaultMsgSize)
|
||||
n, err := packSigWire(sigwire, signeddata)
|
||||
|
@ -448,7 +436,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
|
|||
}
|
||||
|
||||
switch rr.Algorithm {
|
||||
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5:
|
||||
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512:
|
||||
// TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere??
|
||||
pubkey := k.publicKeyRSA() // Get the key
|
||||
if pubkey == nil {
|
||||
|
@ -600,30 +588,6 @@ func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey {
|
|||
return pubkey
|
||||
}
|
||||
|
||||
func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey {
|
||||
keybuf, err := fromBase64([]byte(k.PublicKey))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if len(keybuf) < 22 {
|
||||
return nil
|
||||
}
|
||||
t, keybuf := int(keybuf[0]), keybuf[1:]
|
||||
size := 64 + t*8
|
||||
q, keybuf := keybuf[:20], keybuf[20:]
|
||||
if len(keybuf) != 3*size {
|
||||
return nil
|
||||
}
|
||||
p, keybuf := keybuf[:size], keybuf[size:]
|
||||
g, y := keybuf[:size], keybuf[size:]
|
||||
pubkey := new(dsa.PublicKey)
|
||||
pubkey.Parameters.Q = new(big.Int).SetBytes(q)
|
||||
pubkey.Parameters.P = new(big.Int).SetBytes(p)
|
||||
pubkey.Parameters.G = new(big.Int).SetBytes(g)
|
||||
pubkey.Y = new(big.Int).SetBytes(y)
|
||||
return pubkey
|
||||
}
|
||||
|
||||
func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey {
|
||||
keybuf, err := fromBase64([]byte(k.PublicKey))
|
||||
if err != nil {
|
||||
|
@ -659,7 +623,7 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
|
|||
h.Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "."
|
||||
}
|
||||
// RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase
|
||||
h.Name = strings.ToLower(h.Name)
|
||||
h.Name = CanonicalName(h.Name)
|
||||
// 6.2. Canonical RR Form. (3) - domain rdata to lowercase.
|
||||
// NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
|
||||
// HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
|
||||
|
@ -672,49 +636,49 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
|
|||
// conversion.
|
||||
switch x := r1.(type) {
|
||||
case *NS:
|
||||
x.Ns = strings.ToLower(x.Ns)
|
||||
x.Ns = CanonicalName(x.Ns)
|
||||
case *MD:
|
||||
x.Md = strings.ToLower(x.Md)
|
||||
x.Md = CanonicalName(x.Md)
|
||||
case *MF:
|
||||
x.Mf = strings.ToLower(x.Mf)
|
||||
x.Mf = CanonicalName(x.Mf)
|
||||
case *CNAME:
|
||||
x.Target = strings.ToLower(x.Target)
|
||||
x.Target = CanonicalName(x.Target)
|
||||
case *SOA:
|
||||
x.Ns = strings.ToLower(x.Ns)
|
||||
x.Mbox = strings.ToLower(x.Mbox)
|
||||
x.Ns = CanonicalName(x.Ns)
|
||||
x.Mbox = CanonicalName(x.Mbox)
|
||||
case *MB:
|
||||
x.Mb = strings.ToLower(x.Mb)
|
||||
x.Mb = CanonicalName(x.Mb)
|
||||
case *MG:
|
||||
x.Mg = strings.ToLower(x.Mg)
|
||||
x.Mg = CanonicalName(x.Mg)
|
||||
case *MR:
|
||||
x.Mr = strings.ToLower(x.Mr)
|
||||
x.Mr = CanonicalName(x.Mr)
|
||||
case *PTR:
|
||||
x.Ptr = strings.ToLower(x.Ptr)
|
||||
x.Ptr = CanonicalName(x.Ptr)
|
||||
case *MINFO:
|
||||
x.Rmail = strings.ToLower(x.Rmail)
|
||||
x.Email = strings.ToLower(x.Email)
|
||||
x.Rmail = CanonicalName(x.Rmail)
|
||||
x.Email = CanonicalName(x.Email)
|
||||
case *MX:
|
||||
x.Mx = strings.ToLower(x.Mx)
|
||||
x.Mx = CanonicalName(x.Mx)
|
||||
case *RP:
|
||||
x.Mbox = strings.ToLower(x.Mbox)
|
||||
x.Txt = strings.ToLower(x.Txt)
|
||||
x.Mbox = CanonicalName(x.Mbox)
|
||||
x.Txt = CanonicalName(x.Txt)
|
||||
case *AFSDB:
|
||||
x.Hostname = strings.ToLower(x.Hostname)
|
||||
x.Hostname = CanonicalName(x.Hostname)
|
||||
case *RT:
|
||||
x.Host = strings.ToLower(x.Host)
|
||||
x.Host = CanonicalName(x.Host)
|
||||
case *SIG:
|
||||
x.SignerName = strings.ToLower(x.SignerName)
|
||||
x.SignerName = CanonicalName(x.SignerName)
|
||||
case *PX:
|
||||
x.Map822 = strings.ToLower(x.Map822)
|
||||
x.Mapx400 = strings.ToLower(x.Mapx400)
|
||||
x.Map822 = CanonicalName(x.Map822)
|
||||
x.Mapx400 = CanonicalName(x.Mapx400)
|
||||
case *NAPTR:
|
||||
x.Replacement = strings.ToLower(x.Replacement)
|
||||
x.Replacement = CanonicalName(x.Replacement)
|
||||
case *KX:
|
||||
x.Exchanger = strings.ToLower(x.Exchanger)
|
||||
x.Exchanger = CanonicalName(x.Exchanger)
|
||||
case *SRV:
|
||||
x.Target = strings.ToLower(x.Target)
|
||||
x.Target = CanonicalName(x.Target)
|
||||
case *DNAME:
|
||||
x.Target = strings.ToLower(x.Target)
|
||||
x.Target = CanonicalName(x.Target)
|
||||
}
|
||||
// 6.2. Canonical RR Form. (5) - origTTL
|
||||
wire := make([]byte, Len(r1)+1) // +1 to be safe(r)
|
||||
|
|
|
@ -19,8 +19,6 @@ import (
|
|||
// bits should be set to the size of the algorithm.
|
||||
func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
|
||||
switch k.Algorithm {
|
||||
case RSAMD5, DSA, DSANSEC3SHA1:
|
||||
return nil, ErrAlg
|
||||
case RSASHA1, RSASHA256, RSASHA1NSEC3SHA1:
|
||||
if bits < 512 || bits > 4096 {
|
||||
return nil, ErrKeySize
|
||||
|
@ -41,6 +39,8 @@ func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
|
|||
if bits != 256 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
default:
|
||||
return nil, ErrAlg
|
||||
}
|
||||
|
||||
switch k.Algorithm {
|
||||
|
|
|
@ -43,15 +43,7 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er
|
|||
return nil, ErrPrivKey
|
||||
}
|
||||
switch uint8(algo) {
|
||||
case RSAMD5, DSA, DSANSEC3SHA1:
|
||||
return nil, ErrAlg
|
||||
case RSASHA1:
|
||||
fallthrough
|
||||
case RSASHA1NSEC3SHA1:
|
||||
fallthrough
|
||||
case RSASHA256:
|
||||
fallthrough
|
||||
case RSASHA512:
|
||||
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512:
|
||||
priv, err := readPrivateKeyRSA(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -62,11 +54,7 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er
|
|||
}
|
||||
priv.PublicKey = *pub
|
||||
return priv, nil
|
||||
case ECCGOST:
|
||||
return nil, ErrPrivKey
|
||||
case ECDSAP256SHA256:
|
||||
fallthrough
|
||||
case ECDSAP384SHA384:
|
||||
case ECDSAP256SHA256, ECDSAP384SHA384:
|
||||
priv, err := readPrivateKeyECDSA(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -80,7 +68,7 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er
|
|||
case ED25519:
|
||||
return readPrivateKeyED25519(m)
|
||||
default:
|
||||
return nil, ErrPrivKey
|
||||
return nil, ErrAlg
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package dns
|
|||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"math/big"
|
||||
|
@ -17,8 +16,8 @@ var bigIntOne = big.NewInt(1)
|
|||
|
||||
// PrivateKeyString converts a PrivateKey to a string. This string has the same
|
||||
// format as the private-key-file of BIND9 (Private-key-format: v1.3).
|
||||
// It needs some info from the key (the algorithm), so its a method of the DNSKEY
|
||||
// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey
|
||||
// It needs some info from the key (the algorithm), so its a method of the DNSKEY.
|
||||
// It supports *rsa.PrivateKey, *ecdsa.PrivateKey and ed25519.PrivateKey.
|
||||
func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string {
|
||||
algorithm := strconv.Itoa(int(r.Algorithm))
|
||||
algorithm += " (" + AlgorithmToString[r.Algorithm] + ")"
|
||||
|
@ -67,21 +66,6 @@ func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string {
|
|||
"Algorithm: " + algorithm + "\n" +
|
||||
"PrivateKey: " + private + "\n"
|
||||
|
||||
case *dsa.PrivateKey:
|
||||
T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8)
|
||||
prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8))
|
||||
subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20))
|
||||
base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8))
|
||||
priv := toBase64(intToBytes(p.X, 20))
|
||||
pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8))
|
||||
return format +
|
||||
"Algorithm: " + algorithm + "\n" +
|
||||
"Prime(p): " + prime + "\n" +
|
||||
"Subprime(q): " + subprime + "\n" +
|
||||
"Base(g): " + base + "\n" +
|
||||
"Private_value(x): " + priv + "\n" +
|
||||
"Public_value(y): " + pub + "\n"
|
||||
|
||||
case ed25519.PrivateKey:
|
||||
private := toBase64(p.Seed())
|
||||
return format +
|
||||
|
|
|
@ -194,6 +194,30 @@ request an AXFR for miek.nl. with TSIG key named "axfr." and secret
|
|||
You can now read the records from the transfer as they come in. Each envelope
|
||||
is checked with TSIG. If something is not correct an error is returned.
|
||||
|
||||
A custom TSIG implementation can be used. This requires additional code to
|
||||
perform any session establishment and signature generation/verification. The
|
||||
client must be configured with an implementation of the TsigProvider interface:
|
||||
|
||||
type Provider struct{}
|
||||
|
||||
func (*Provider) Generate(msg []byte, tsig *dns.TSIG) ([]byte, error) {
|
||||
// Use tsig.Hdr.Name and tsig.Algorithm in your code to
|
||||
// generate the MAC using msg as the payload.
|
||||
}
|
||||
|
||||
func (*Provider) Verify(msg []byte, tsig *dns.TSIG) error {
|
||||
// Use tsig.Hdr.Name and tsig.Algorithm in your code to verify
|
||||
// that msg matches the value in tsig.MAC.
|
||||
}
|
||||
|
||||
c := new(dns.Client)
|
||||
c.TsigProvider = new(Provider)
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("miek.nl.", dns.TypeMX)
|
||||
m.SetTsig(keyname, dns.HmacSHA1, 300, time.Now().Unix())
|
||||
...
|
||||
// TSIG RR is calculated by calling your Generate method
|
||||
|
||||
Basic use pattern validating and replying to a message that has TSIG set.
|
||||
|
||||
server := &dns.Server{Addr: ":53", Net: "udp"}
|
||||
|
@ -209,7 +233,7 @@ Basic use pattern validating and replying to a message that has TSIG set.
|
|||
// *Msg r has an TSIG record and it was validated
|
||||
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
||||
} else {
|
||||
// *Msg r has an TSIG records and it was not valided
|
||||
// *Msg r has an TSIG records and it was not validated
|
||||
}
|
||||
}
|
||||
w.WriteMsg(m)
|
||||
|
@ -260,7 +284,7 @@ From RFC 2931:
|
|||
on requests and responses, and protection of the overall integrity of a response.
|
||||
|
||||
It works like TSIG, except that SIG(0) uses public key cryptography, instead of
|
||||
the shared secret approach in TSIG. Supported algorithms: DSA, ECDSAP256SHA256,
|
||||
the shared secret approach in TSIG. Supported algorithms: ECDSAP256SHA256,
|
||||
ECDSAP384SHA384, RSASHA1, RSASHA256 and RSASHA512.
|
||||
|
||||
Signing subsequent messages in multi-message sessions is not implemented.
|
||||
|
|
|
@ -3,9 +3,8 @@ package dns
|
|||
//go:generate go run duplicate_generate.go
|
||||
|
||||
// IsDuplicate checks of r1 and r2 are duplicates of each other, excluding the TTL.
|
||||
// So this means the header data is equal *and* the RDATA is the same. Return true
|
||||
// is so, otherwise false.
|
||||
// It's a protocol violation to have identical RRs in a message.
|
||||
// So this means the header data is equal *and* the RDATA is the same. Returns true
|
||||
// if so, otherwise false. It's a protocol violation to have identical RRs in a message.
|
||||
func IsDuplicate(r1, r2 RR) bool {
|
||||
// Check whether the record header is identical.
|
||||
if !r1.Header().isDuplicate(r2.Header()) {
|
||||
|
|
|
@ -88,8 +88,8 @@ func (rr *OPT) len(off int, compression map[string]struct{}) int {
|
|||
return l
|
||||
}
|
||||
|
||||
func (rr *OPT) parse(c *zlexer, origin string) *ParseError {
|
||||
panic("dns: internal error: parse should never be called on OPT")
|
||||
func (*OPT) parse(c *zlexer, origin string) *ParseError {
|
||||
return &ParseError{err: "OPT records do not have a presentation format"}
|
||||
}
|
||||
|
||||
func (r1 *OPT) isDuplicate(r2 RR) bool { return false }
|
||||
|
@ -543,6 +543,10 @@ func (e *EDNS0_EXPIRE) pack() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (e *EDNS0_EXPIRE) unpack(b []byte) error {
|
||||
if len(b) == 0 {
|
||||
// zero-length EXPIRE query, see RFC 7314 Section 2
|
||||
return nil
|
||||
}
|
||||
if len(b) < 4 {
|
||||
return ErrBuf
|
||||
}
|
||||
|
|
|
@ -20,13 +20,13 @@ import (
|
|||
// of $ after that are interpreted.
|
||||
func (zp *ZoneParser) generate(l lex) (RR, bool) {
|
||||
token := l.token
|
||||
step := 1
|
||||
step := int64(1)
|
||||
if i := strings.IndexByte(token, '/'); i >= 0 {
|
||||
if i+1 == len(token) {
|
||||
return zp.setParseError("bad step in $GENERATE range", l)
|
||||
}
|
||||
|
||||
s, err := strconv.Atoi(token[i+1:])
|
||||
s, err := strconv.ParseInt(token[i+1:], 10, 64)
|
||||
if err != nil || s <= 0 {
|
||||
return zp.setParseError("bad step in $GENERATE range", l)
|
||||
}
|
||||
|
@ -40,12 +40,12 @@ func (zp *ZoneParser) generate(l lex) (RR, bool) {
|
|||
return zp.setParseError("bad start-stop in $GENERATE range", l)
|
||||
}
|
||||
|
||||
start, err := strconv.Atoi(sx[0])
|
||||
start, err := strconv.ParseInt(sx[0], 10, 64)
|
||||
if err != nil {
|
||||
return zp.setParseError("bad start in $GENERATE range", l)
|
||||
}
|
||||
|
||||
end, err := strconv.Atoi(sx[1])
|
||||
end, err := strconv.ParseInt(sx[1], 10, 64)
|
||||
if err != nil {
|
||||
return zp.setParseError("bad stop in $GENERATE range", l)
|
||||
}
|
||||
|
@ -94,10 +94,10 @@ type generateReader struct {
|
|||
s string
|
||||
si int
|
||||
|
||||
cur int
|
||||
start int
|
||||
end int
|
||||
step int
|
||||
cur int64
|
||||
start int64
|
||||
end int64
|
||||
step int64
|
||||
|
||||
mod bytes.Buffer
|
||||
|
||||
|
@ -173,7 +173,7 @@ func (r *generateReader) ReadByte() (byte, error) {
|
|||
return '$', nil
|
||||
}
|
||||
|
||||
var offset int
|
||||
var offset int64
|
||||
|
||||
// Search for { and }
|
||||
if r.s[si+1] == '{' {
|
||||
|
@ -208,7 +208,7 @@ func (r *generateReader) ReadByte() (byte, error) {
|
|||
}
|
||||
|
||||
// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
|
||||
func modToPrintf(s string) (string, int, string) {
|
||||
func modToPrintf(s string) (string, int64, string) {
|
||||
// Modifier is { offset [ ,width [ ,base ] ] } - provide default
|
||||
// values for optional width and type, if necessary.
|
||||
var offStr, widthStr, base string
|
||||
|
@ -229,12 +229,12 @@ func modToPrintf(s string) (string, int, string) {
|
|||
return "", 0, "bad base in $GENERATE"
|
||||
}
|
||||
|
||||
offset, err := strconv.Atoi(offStr)
|
||||
offset, err := strconv.ParseInt(offStr, 10, 64)
|
||||
if err != nil {
|
||||
return "", 0, "bad offset in $GENERATE"
|
||||
}
|
||||
|
||||
width, err := strconv.Atoi(widthStr)
|
||||
width, err := strconv.ParseInt(widthStr, 10, 64)
|
||||
if err != nil || width < 0 || width > 255 {
|
||||
return "", 0, "bad width in $GENERATE"
|
||||
}
|
||||
|
|
|
@ -3,10 +3,9 @@ module github.com/miekg/dns
|
|||
go 1.12
|
||||
|
||||
require (
|
||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58
|
||||
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe
|
||||
golang.org/x/text v0.3.2 // indirect
|
||||
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18 // indirect
|
||||
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 // indirect
|
||||
)
|
||||
|
|
|
@ -5,6 +5,9 @@ golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 h1:Gv7RPwsi3eZ2Fgewe3CBsu
|
|||
golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M=
|
||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3 h1:dgd4x4kJt7G4k4m93AYLzM8Ni6h2qLTfh9n9vXJT3/0=
|
||||
golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
|
@ -30,4 +33,7 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 h1:VvQyQJN0tSuecqgcIxMWnnfG5kSmgy9KZR9sW3W5QeA=
|
||||
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
|
@ -83,7 +83,7 @@ func CompareDomainName(s1, s2 string) (n int) {
|
|||
return
|
||||
}
|
||||
|
||||
// CountLabel counts the the number of labels in the string s.
|
||||
// CountLabel counts the number of labels in the string s.
|
||||
// s must be a syntactically valid domain name.
|
||||
func CountLabel(s string) (labels int) {
|
||||
if s == "." {
|
||||
|
|
|
@ -398,19 +398,14 @@ Loop:
|
|||
return "", lenmsg, ErrLongDomain
|
||||
}
|
||||
for _, b := range msg[off : off+c] {
|
||||
switch b {
|
||||
case '.', '(', ')', ';', ' ', '@':
|
||||
fallthrough
|
||||
case '"', '\\':
|
||||
if isDomainNameLabelSpecial(b) {
|
||||
s = append(s, '\\', b)
|
||||
default:
|
||||
if b < ' ' || b > '~' { // unprintable, use \DDD
|
||||
} else if b < ' ' || b > '~' {
|
||||
s = append(s, escapeByte(b)...)
|
||||
} else {
|
||||
s = append(s, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
s = append(s, '.')
|
||||
off += c
|
||||
case 0xC0:
|
||||
|
@ -629,11 +624,18 @@ func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err
|
|||
rr = &RFC3597{Hdr: h}
|
||||
}
|
||||
|
||||
if noRdata(h) {
|
||||
return rr, off, nil
|
||||
if off < 0 || off > len(msg) {
|
||||
return &h, off, &Error{err: "bad off"}
|
||||
}
|
||||
|
||||
end := off + int(h.Rdlength)
|
||||
if end < off || end > len(msg) {
|
||||
return &h, end, &Error{err: "bad rdlength"}
|
||||
}
|
||||
|
||||
if noRdata(h) {
|
||||
return rr, off, nil
|
||||
}
|
||||
|
||||
off, err = rr.unpack(msg, off)
|
||||
if err != nil {
|
||||
|
@ -661,7 +663,6 @@ func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error)
|
|||
}
|
||||
// If offset does not increase anymore, l is a lie
|
||||
if off1 == off {
|
||||
l = i
|
||||
break
|
||||
}
|
||||
dst = append(dst, r)
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"net"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -423,86 +424,12 @@ Option:
|
|||
if off+int(optlen) > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking opt"}
|
||||
}
|
||||
switch code {
|
||||
case EDNS0NSID:
|
||||
e := new(EDNS0_NSID)
|
||||
e := makeDataOpt(code)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0SUBNET:
|
||||
e := new(EDNS0_SUBNET)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0COOKIE:
|
||||
e := new(EDNS0_COOKIE)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0EXPIRE:
|
||||
e := new(EDNS0_EXPIRE)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0UL:
|
||||
e := new(EDNS0_UL)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0LLQ:
|
||||
e := new(EDNS0_LLQ)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0DAU:
|
||||
e := new(EDNS0_DAU)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0DHU:
|
||||
e := new(EDNS0_DHU)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0N3U:
|
||||
e := new(EDNS0_N3U)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0PADDING:
|
||||
e := new(EDNS0_PADDING)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
default:
|
||||
e := new(EDNS0_LOCAL)
|
||||
e.Code = code
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
}
|
||||
|
||||
if off < len(msg) {
|
||||
goto Option
|
||||
|
@ -511,6 +438,35 @@ Option:
|
|||
return edns, off, nil
|
||||
}
|
||||
|
||||
func makeDataOpt(code uint16) EDNS0 {
|
||||
switch code {
|
||||
case EDNS0NSID:
|
||||
return new(EDNS0_NSID)
|
||||
case EDNS0SUBNET:
|
||||
return new(EDNS0_SUBNET)
|
||||
case EDNS0COOKIE:
|
||||
return new(EDNS0_COOKIE)
|
||||
case EDNS0EXPIRE:
|
||||
return new(EDNS0_EXPIRE)
|
||||
case EDNS0UL:
|
||||
return new(EDNS0_UL)
|
||||
case EDNS0LLQ:
|
||||
return new(EDNS0_LLQ)
|
||||
case EDNS0DAU:
|
||||
return new(EDNS0_DAU)
|
||||
case EDNS0DHU:
|
||||
return new(EDNS0_DHU)
|
||||
case EDNS0N3U:
|
||||
return new(EDNS0_N3U)
|
||||
case EDNS0PADDING:
|
||||
return new(EDNS0_PADDING)
|
||||
default:
|
||||
e := new(EDNS0_LOCAL)
|
||||
e.Code = code
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
|
||||
for _, el := range options {
|
||||
b, err := el.pack()
|
||||
|
@ -521,9 +477,7 @@ func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
|
|||
binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length
|
||||
off += 4
|
||||
if off+len(b) > len(msg) {
|
||||
copy(msg[off:], b)
|
||||
off = len(msg)
|
||||
continue
|
||||
return len(msg), &Error{err: "overflow packing opt"}
|
||||
}
|
||||
// Actual data
|
||||
copy(msg[off:off+len(b)], b)
|
||||
|
@ -659,6 +613,65 @@ func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
|
|||
return off, nil
|
||||
}
|
||||
|
||||
func unpackDataSVCB(msg []byte, off int) ([]SVCBKeyValue, int, error) {
|
||||
var xs []SVCBKeyValue
|
||||
var code uint16
|
||||
var length uint16
|
||||
var err error
|
||||
for off < len(msg) {
|
||||
code, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking SVCB"}
|
||||
}
|
||||
length, off, err = unpackUint16(msg, off)
|
||||
if err != nil || off+int(length) > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking SVCB"}
|
||||
}
|
||||
e := makeSVCBKeyValue(SVCBKey(code))
|
||||
if e == nil {
|
||||
return nil, len(msg), &Error{err: "bad SVCB key"}
|
||||
}
|
||||
if err := e.unpack(msg[off : off+int(length)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
if len(xs) > 0 && e.Key() <= xs[len(xs)-1].Key() {
|
||||
return nil, len(msg), &Error{err: "SVCB keys not in strictly increasing order"}
|
||||
}
|
||||
xs = append(xs, e)
|
||||
off += int(length)
|
||||
}
|
||||
return xs, off, nil
|
||||
}
|
||||
|
||||
func packDataSVCB(pairs []SVCBKeyValue, msg []byte, off int) (int, error) {
|
||||
pairs = append([]SVCBKeyValue(nil), pairs...)
|
||||
sort.Slice(pairs, func(i, j int) bool {
|
||||
return pairs[i].Key() < pairs[j].Key()
|
||||
})
|
||||
prev := svcb_RESERVED
|
||||
for _, el := range pairs {
|
||||
if el.Key() == prev {
|
||||
return len(msg), &Error{err: "repeated SVCB keys are not allowed"}
|
||||
}
|
||||
prev = el.Key()
|
||||
packed, err := el.pack()
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
off, err = packUint16(uint16(el.Key()), msg, off)
|
||||
if err != nil {
|
||||
return len(msg), &Error{err: "overflow packing SVCB"}
|
||||
}
|
||||
off, err = packUint16(uint16(len(packed)), msg, off)
|
||||
if err != nil || off+len(packed) > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing SVCB"}
|
||||
}
|
||||
copy(msg[off:off+len(packed)], packed)
|
||||
off += len(packed)
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) {
|
||||
var (
|
||||
servers []string
|
||||
|
@ -688,3 +701,133 @@ func packDataDomainNames(names []string, msg []byte, off int, compression compre
|
|||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func packDataApl(data []APLPrefix, msg []byte, off int) (int, error) {
|
||||
var err error
|
||||
for i := range data {
|
||||
off, err = packDataAplPrefix(&data[i], msg, off)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func packDataAplPrefix(p *APLPrefix, msg []byte, off int) (int, error) {
|
||||
if len(p.Network.IP) != len(p.Network.Mask) {
|
||||
return len(msg), &Error{err: "address and mask lengths don't match"}
|
||||
}
|
||||
|
||||
var err error
|
||||
prefix, _ := p.Network.Mask.Size()
|
||||
addr := p.Network.IP.Mask(p.Network.Mask)[:(prefix+7)/8]
|
||||
|
||||
switch len(p.Network.IP) {
|
||||
case net.IPv4len:
|
||||
off, err = packUint16(1, msg, off)
|
||||
case net.IPv6len:
|
||||
off, err = packUint16(2, msg, off)
|
||||
default:
|
||||
err = &Error{err: "unrecognized address family"}
|
||||
}
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
|
||||
off, err = packUint8(uint8(prefix), msg, off)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
|
||||
var n uint8
|
||||
if p.Negation {
|
||||
n = 0x80
|
||||
}
|
||||
|
||||
// trim trailing zero bytes as specified in RFC3123 Sections 4.1 and 4.2.
|
||||
i := len(addr) - 1
|
||||
for ; i >= 0 && addr[i] == 0; i-- {
|
||||
}
|
||||
addr = addr[:i+1]
|
||||
|
||||
adflen := uint8(len(addr)) & 0x7f
|
||||
off, err = packUint8(n|adflen, msg, off)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
|
||||
if off+len(addr) > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing APL prefix"}
|
||||
}
|
||||
off += copy(msg[off:], addr)
|
||||
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackDataApl(msg []byte, off int) ([]APLPrefix, int, error) {
|
||||
var result []APLPrefix
|
||||
for off < len(msg) {
|
||||
prefix, end, err := unpackDataAplPrefix(msg, off)
|
||||
if err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
off = end
|
||||
result = append(result, prefix)
|
||||
}
|
||||
return result, off, nil
|
||||
}
|
||||
|
||||
func unpackDataAplPrefix(msg []byte, off int) (APLPrefix, int, error) {
|
||||
family, off, err := unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
|
||||
}
|
||||
prefix, off, err := unpackUint8(msg, off)
|
||||
if err != nil {
|
||||
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
|
||||
}
|
||||
nlen, off, err := unpackUint8(msg, off)
|
||||
if err != nil {
|
||||
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
|
||||
}
|
||||
|
||||
var ip []byte
|
||||
switch family {
|
||||
case 1:
|
||||
ip = make([]byte, net.IPv4len)
|
||||
case 2:
|
||||
ip = make([]byte, net.IPv6len)
|
||||
default:
|
||||
return APLPrefix{}, len(msg), &Error{err: "unrecognized APL address family"}
|
||||
}
|
||||
if int(prefix) > 8*len(ip) {
|
||||
return APLPrefix{}, len(msg), &Error{err: "APL prefix too long"}
|
||||
}
|
||||
afdlen := int(nlen & 0x7f)
|
||||
if afdlen > len(ip) {
|
||||
return APLPrefix{}, len(msg), &Error{err: "APL length too long"}
|
||||
}
|
||||
if off+afdlen > len(msg) {
|
||||
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL address"}
|
||||
}
|
||||
off += copy(ip, msg[off:off+afdlen])
|
||||
if afdlen > 0 {
|
||||
last := ip[afdlen-1]
|
||||
if last == 0 {
|
||||
return APLPrefix{}, len(msg), &Error{err: "extra APL address bits"}
|
||||
}
|
||||
}
|
||||
ipnet := net.IPNet{
|
||||
IP: ip,
|
||||
Mask: net.CIDRMask(int(prefix), 8*len(ip)),
|
||||
}
|
||||
network := ipnet.IP.Mask(ipnet.Mask)
|
||||
if !network.Equal(ipnet.IP) {
|
||||
return APLPrefix{}, len(msg), &Error{err: "invalid APL address length"}
|
||||
}
|
||||
|
||||
return APLPrefix{
|
||||
Negation: (nlen & 0x80) != 0,
|
||||
Network: ipnet,
|
||||
}, off, nil
|
||||
}
|
||||
|
|
|
@ -8,8 +8,14 @@ package dns
|
|||
// record adding as many records as possible without exceeding the
|
||||
// requested buffer size.
|
||||
//
|
||||
// If the message fits within the requested size without compression,
|
||||
// Truncate will set the message's Compress attribute to false. It is
|
||||
// the caller's responsibility to set it back to true if they wish to
|
||||
// compress the payload regardless of size.
|
||||
//
|
||||
// The TC bit will be set if any records were excluded from the message.
|
||||
// This indicates to that the client should retry over TCP.
|
||||
// If the TC bit is already set on the message it will be retained.
|
||||
// TC indicates that the client should retry over TCP.
|
||||
//
|
||||
// According to RFC 2181, the TC bit should only be set if not all of the
|
||||
// "required" RRs can be included in the response. Unfortunately, we have
|
||||
|
@ -28,11 +34,11 @@ func (dns *Msg) Truncate(size int) {
|
|||
}
|
||||
|
||||
// RFC 6891 mandates that the payload size in an OPT record
|
||||
// less than 512 bytes must be treated as equal to 512 bytes.
|
||||
// less than 512 (MinMsgSize) bytes must be treated as equal to 512 bytes.
|
||||
//
|
||||
// For ease of use, we impose that restriction here.
|
||||
if size < 512 {
|
||||
size = 512
|
||||
if size < MinMsgSize {
|
||||
size = MinMsgSize
|
||||
}
|
||||
|
||||
l := msgLenWithCompressionMap(dns, nil) // uncompressed length
|
||||
|
@ -73,11 +79,11 @@ func (dns *Msg) Truncate(size int) {
|
|||
|
||||
var numExtra int
|
||||
if l < size {
|
||||
l, numExtra = truncateLoop(dns.Extra, size, l, compression)
|
||||
_, numExtra = truncateLoop(dns.Extra, size, l, compression)
|
||||
}
|
||||
|
||||
// See the function documentation for when we set this.
|
||||
dns.Truncated = len(dns.Answer) > numAnswer ||
|
||||
dns.Truncated = dns.Truncated || len(dns.Answer) > numAnswer ||
|
||||
len(dns.Ns) > numNS || len(dns.Extra) > numExtra
|
||||
|
||||
dns.Answer = dns.Answer[:numAnswer]
|
||||
|
|
|
@ -43,7 +43,7 @@ func HashName(label string, ha uint8, iter uint16, salt string) string {
|
|||
return toBase32(nsec3)
|
||||
}
|
||||
|
||||
// Cover returns true if a name is covered by the NSEC3 record
|
||||
// Cover returns true if a name is covered by the NSEC3 record.
|
||||
func (rr *NSEC3) Cover(name string) bool {
|
||||
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
|
||||
owner := strings.ToUpper(rr.Hdr.Name)
|
||||
|
|
|
@ -13,7 +13,6 @@ type PrivateRdata interface {
|
|||
// Pack is used when packing a private RR into a buffer.
|
||||
Pack([]byte) (int, error)
|
||||
// Unpack is used when unpacking a private RR from a buffer.
|
||||
// TODO(miek): diff. signature than Pack, see edns0.go for instance.
|
||||
Unpack([]byte) (int, error)
|
||||
// Copy copies the Rdata into the PrivateRdata argument.
|
||||
Copy(PrivateRdata) error
|
||||
|
|
|
@ -87,31 +87,18 @@ type lex struct {
|
|||
column int // column in the file
|
||||
}
|
||||
|
||||
// Token holds the token that are returned when a zone file is parsed.
|
||||
type Token struct {
|
||||
// The scanned resource record when error is not nil.
|
||||
RR
|
||||
// When an error occurred, this has the error specifics.
|
||||
Error *ParseError
|
||||
// A potential comment positioned after the RR and on the same line.
|
||||
Comment string
|
||||
}
|
||||
|
||||
// ttlState describes the state necessary to fill in an omitted RR TTL
|
||||
type ttlState struct {
|
||||
ttl uint32 // ttl is the current default TTL
|
||||
isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive
|
||||
}
|
||||
|
||||
// NewRR reads the RR contained in the string s. Only the first RR is
|
||||
// returned. If s contains no records, NewRR will return nil with no
|
||||
// error.
|
||||
// NewRR reads the RR contained in the string s. Only the first RR is returned.
|
||||
// If s contains no records, NewRR will return nil with no error.
|
||||
//
|
||||
// The class defaults to IN and TTL defaults to 3600. The full zone
|
||||
// file syntax like $TTL, $ORIGIN, etc. is supported.
|
||||
//
|
||||
// All fields of the returned RR are set, except RR.Header().Rdlength
|
||||
// which is set to 0.
|
||||
// The class defaults to IN and TTL defaults to 3600. The full zone file syntax
|
||||
// like $TTL, $ORIGIN, etc. is supported. All fields of the returned RR are
|
||||
// set, except RR.Header().Rdlength which is set to 0.
|
||||
func NewRR(s string) (RR, error) {
|
||||
if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline
|
||||
return ReadRR(strings.NewReader(s+"\n"), "")
|
||||
|
@ -133,70 +120,6 @@ func ReadRR(r io.Reader, file string) (RR, error) {
|
|||
return rr, zp.Err()
|
||||
}
|
||||
|
||||
// ParseZone reads a RFC 1035 style zonefile from r. It returns
|
||||
// Tokens on the returned channel, each consisting of either a
|
||||
// parsed RR and optional comment or a nil RR and an error. The
|
||||
// channel is closed by ParseZone when the end of r is reached.
|
||||
//
|
||||
// The string file is used in error reporting and to resolve relative
|
||||
// $INCLUDE directives. The string origin is used as the initial
|
||||
// origin, as if the file would start with an $ORIGIN directive.
|
||||
//
|
||||
// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all
|
||||
// supported. Note that $GENERATE's range support up to a maximum of
|
||||
// of 65535 steps.
|
||||
//
|
||||
// Basic usage pattern when reading from a string (z) containing the
|
||||
// zone data:
|
||||
//
|
||||
// for x := range dns.ParseZone(strings.NewReader(z), "", "") {
|
||||
// if x.Error != nil {
|
||||
// // log.Println(x.Error)
|
||||
// } else {
|
||||
// // Do something with x.RR
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Comments specified after an RR (and on the same line!) are
|
||||
// returned too:
|
||||
//
|
||||
// foo. IN A 10.0.0.1 ; this is a comment
|
||||
//
|
||||
// The text "; this is comment" is returned in Token.Comment.
|
||||
// Comments inside the RR are returned concatenated along with the
|
||||
// RR. Comments on a line by themselves are discarded.
|
||||
//
|
||||
// To prevent memory leaks it is important to always fully drain the
|
||||
// returned channel. If an error occurs, it will always be the last
|
||||
// Token sent on the channel.
|
||||
//
|
||||
// Deprecated: New users should prefer the ZoneParser API.
|
||||
func ParseZone(r io.Reader, origin, file string) chan *Token {
|
||||
t := make(chan *Token, 10000)
|
||||
go parseZone(r, origin, file, t)
|
||||
return t
|
||||
}
|
||||
|
||||
func parseZone(r io.Reader, origin, file string, t chan *Token) {
|
||||
defer close(t)
|
||||
|
||||
zp := NewZoneParser(r, origin, file)
|
||||
zp.SetIncludeAllowed(true)
|
||||
|
||||
for rr, ok := zp.Next(); ok; rr, ok = zp.Next() {
|
||||
t <- &Token{RR: rr, Comment: zp.Comment()}
|
||||
}
|
||||
|
||||
if err := zp.Err(); err != nil {
|
||||
pe, ok := err.(*ParseError)
|
||||
if !ok {
|
||||
pe = &ParseError{file: file, err: err.Error()}
|
||||
}
|
||||
|
||||
t <- &Token{Error: pe}
|
||||
}
|
||||
}
|
||||
|
||||
// ZoneParser is a parser for an RFC 1035 style zonefile.
|
||||
//
|
||||
// Each parsed RR in the zone is returned sequentially from Next. An
|
||||
|
@ -654,10 +577,23 @@ func (zp *ZoneParser) Next() (RR, bool) {
|
|||
|
||||
st = zExpectRdata
|
||||
case zExpectRdata:
|
||||
var rr RR
|
||||
if newFn, ok := TypeToRR[h.Rrtype]; ok && canParseAsRR(h.Rrtype) {
|
||||
var (
|
||||
rr RR
|
||||
parseAsRFC3597 bool
|
||||
)
|
||||
if newFn, ok := TypeToRR[h.Rrtype]; ok {
|
||||
rr = newFn()
|
||||
*rr.Header() = *h
|
||||
|
||||
// We may be parsing a known RR type using the RFC3597 format.
|
||||
// If so, we handle that here in a generic way.
|
||||
//
|
||||
// This is also true for PrivateRR types which will have the
|
||||
// RFC3597 parsing done for them and the Unpack method called
|
||||
// to populate the RR instead of simply deferring to Parse.
|
||||
if zp.c.Peek().token == "\\#" {
|
||||
parseAsRFC3597 = true
|
||||
}
|
||||
} else {
|
||||
rr = &RFC3597{Hdr: *h}
|
||||
}
|
||||
|
@ -677,13 +613,18 @@ func (zp *ZoneParser) Next() (RR, bool) {
|
|||
return zp.setParseError("unexpected newline", l)
|
||||
}
|
||||
|
||||
if err := rr.parse(zp.c, zp.origin); err != nil {
|
||||
parseAsRR := rr
|
||||
if parseAsRFC3597 {
|
||||
parseAsRR = &RFC3597{Hdr: *h}
|
||||
}
|
||||
|
||||
if err := parseAsRR.parse(zp.c, zp.origin); err != nil {
|
||||
// err is a concrete *ParseError without the file field set.
|
||||
// The setParseError call below will construct a new
|
||||
// *ParseError with file set to zp.file.
|
||||
|
||||
// If err.lex is nil than we have encounter an unknown RR type
|
||||
// in that case we substitute our current lex token.
|
||||
// err.lex may be nil in which case we substitute our current
|
||||
// lex token.
|
||||
if err.lex == (lex{}) {
|
||||
return zp.setParseError(err.err, l)
|
||||
}
|
||||
|
@ -691,6 +632,13 @@ func (zp *ZoneParser) Next() (RR, bool) {
|
|||
return zp.setParseError(err.err, err.lex)
|
||||
}
|
||||
|
||||
if parseAsRFC3597 {
|
||||
err := parseAsRR.(*RFC3597).fromRFC3597(rr)
|
||||
if err != nil {
|
||||
return zp.setParseError(err.Error(), l)
|
||||
}
|
||||
}
|
||||
|
||||
return rr, true
|
||||
}
|
||||
}
|
||||
|
@ -700,18 +648,6 @@ func (zp *ZoneParser) Next() (RR, bool) {
|
|||
return nil, false
|
||||
}
|
||||
|
||||
// canParseAsRR returns true if the record type can be parsed as a
|
||||
// concrete RR. It blacklists certain record types that must be parsed
|
||||
// according to RFC 3597 because they lack a presentation format.
|
||||
func canParseAsRR(rrtype uint16) bool {
|
||||
switch rrtype {
|
||||
case TypeANY, TypeNULL, TypeOPT, TypeTSIG:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
type zlexer struct {
|
||||
br io.ByteReader
|
||||
|
||||
|
@ -1287,11 +1223,29 @@ func stringToCm(token string) (e, m uint8, ok bool) {
|
|||
if cmeters, err = strconv.Atoi(s[1]); err != nil {
|
||||
return
|
||||
}
|
||||
// There's no point in having more than 2 digits in this part, and would rather make the implementation complicated ('123' should be treated as '12').
|
||||
// So we simply reject it.
|
||||
// We also make sure the first character is a digit to reject '+-' signs.
|
||||
if len(s[1]) > 2 || s[1][0] < '0' || s[1][0] > '9' {
|
||||
return
|
||||
}
|
||||
if len(s[1]) == 1 {
|
||||
// 'nn.1' must be treated as 'nn-meters and 10cm, not 1cm.
|
||||
cmeters *= 10
|
||||
}
|
||||
if len(s[0]) == 0 {
|
||||
// This will allow omitting the 'meter' part, like .01 (meaning 0.01m = 1cm).
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case 1:
|
||||
if meters, err = strconv.Atoi(s[0]); err != nil {
|
||||
return
|
||||
}
|
||||
// RFC1876 states the max value is 90000000.00. The latter two conditions enforce it.
|
||||
if s[0][0] < '0' || s[0][0] > '9' || meters > 90000000 || (meters == 90000000 && cmeters != 0) {
|
||||
return
|
||||
}
|
||||
case 0:
|
||||
// huh?
|
||||
return 0, 0, false
|
||||
|
@ -1304,13 +1258,10 @@ func stringToCm(token string) (e, m uint8, ok bool) {
|
|||
e = 0
|
||||
val = cmeters
|
||||
}
|
||||
for val > 10 {
|
||||
for val >= 10 {
|
||||
e++
|
||||
val /= 10
|
||||
}
|
||||
if e > 9 {
|
||||
ok = false
|
||||
}
|
||||
m = uint8(val)
|
||||
return
|
||||
}
|
||||
|
@ -1352,6 +1303,9 @@ func appendOrigin(name, origin string) string {
|
|||
|
||||
// LOC record helper function
|
||||
func locCheckNorth(token string, latitude uint32) (uint32, bool) {
|
||||
if latitude > 90*1000*60*60 {
|
||||
return latitude, false
|
||||
}
|
||||
switch token {
|
||||
case "n", "N":
|
||||
return LOC_EQUATOR + latitude, true
|
||||
|
@ -1363,6 +1317,9 @@ func locCheckNorth(token string, latitude uint32) (uint32, bool) {
|
|||
|
||||
// LOC record helper function
|
||||
func locCheckEast(token string, longitude uint32) (uint32, bool) {
|
||||
if longitude > 180*1000*60*60 {
|
||||
return longitude, false
|
||||
}
|
||||
switch token {
|
||||
case "e", "E":
|
||||
return LOC_EQUATOR + longitude, true
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package dns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"net"
|
||||
"strconv"
|
||||
|
@ -10,15 +11,15 @@ import (
|
|||
// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces)
|
||||
// or an error
|
||||
func endingToString(c *zlexer, errstr string) (string, *ParseError) {
|
||||
var s string
|
||||
var buffer bytes.Buffer
|
||||
l, _ := c.Next() // zString
|
||||
for l.value != zNewline && l.value != zEOF {
|
||||
if l.err {
|
||||
return s, &ParseError{"", errstr, l}
|
||||
return buffer.String(), &ParseError{"", errstr, l}
|
||||
}
|
||||
switch l.value {
|
||||
case zString:
|
||||
s += l.token
|
||||
buffer.WriteString(l.token)
|
||||
case zBlank: // Ok
|
||||
default:
|
||||
return "", &ParseError{"", errstr, l}
|
||||
|
@ -26,7 +27,7 @@ func endingToString(c *zlexer, errstr string) (string, *ParseError) {
|
|||
l, _ = c.Next()
|
||||
}
|
||||
|
||||
return s, nil
|
||||
return buffer.String(), nil
|
||||
}
|
||||
|
||||
// A remainder of the rdata with embedded spaces, split on unquoted whitespace
|
||||
|
@ -403,7 +404,7 @@ func (rr *SOA) parse(c *zlexer, o string) *ParseError {
|
|||
if l.err {
|
||||
return &ParseError{"", "bad SOA zone parameter", l}
|
||||
}
|
||||
if j, e := strconv.ParseUint(l.token, 10, 32); e != nil {
|
||||
if j, err := strconv.ParseUint(l.token, 10, 32); err != nil {
|
||||
if i == 0 {
|
||||
// Serial must be a number
|
||||
return &ParseError{"", "bad SOA zone parameter", l}
|
||||
|
@ -446,16 +447,16 @@ func (rr *SRV) parse(c *zlexer, o string) *ParseError {
|
|||
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next() // zString
|
||||
i, e = strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
i, e1 := strconv.ParseUint(l.token, 10, 16)
|
||||
if e1 != nil || l.err {
|
||||
return &ParseError{"", "bad SRV Weight", l}
|
||||
}
|
||||
rr.Weight = uint16(i)
|
||||
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next() // zString
|
||||
i, e = strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
i, e2 := strconv.ParseUint(l.token, 10, 16)
|
||||
if e2 != nil || l.err {
|
||||
return &ParseError{"", "bad SRV Port", l}
|
||||
}
|
||||
rr.Port = uint16(i)
|
||||
|
@ -482,8 +483,8 @@ func (rr *NAPTR) parse(c *zlexer, o string) *ParseError {
|
|||
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next() // zString
|
||||
i, e = strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
i, e1 := strconv.ParseUint(l.token, 10, 16)
|
||||
if e1 != nil || l.err {
|
||||
return &ParseError{"", "bad NAPTR Preference", l}
|
||||
}
|
||||
rr.Preference = uint16(i)
|
||||
|
@ -581,15 +582,15 @@ func (rr *TALINK) parse(c *zlexer, o string) *ParseError {
|
|||
|
||||
func (rr *LOC) parse(c *zlexer, o string) *ParseError {
|
||||
// Non zero defaults for LOC record, see RFC 1876, Section 3.
|
||||
rr.HorizPre = 165 // 10000
|
||||
rr.VertPre = 162 // 10
|
||||
rr.Size = 18 // 1
|
||||
rr.Size = 0x12 // 1e2 cm (1m)
|
||||
rr.HorizPre = 0x16 // 1e6 cm (10000m)
|
||||
rr.VertPre = 0x13 // 1e3 cm (10m)
|
||||
ok := false
|
||||
|
||||
// North
|
||||
l, _ := c.Next()
|
||||
i, e := strconv.ParseUint(l.token, 10, 32)
|
||||
if e != nil || l.err {
|
||||
if e != nil || l.err || i > 90 {
|
||||
return &ParseError{"", "bad LOC Latitude", l}
|
||||
}
|
||||
rr.Latitude = 1000 * 60 * 60 * uint32(i)
|
||||
|
@ -600,15 +601,15 @@ func (rr *LOC) parse(c *zlexer, o string) *ParseError {
|
|||
if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
|
||||
goto East
|
||||
}
|
||||
i, e = strconv.ParseUint(l.token, 10, 32)
|
||||
if e != nil || l.err {
|
||||
if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 59 {
|
||||
return &ParseError{"", "bad LOC Latitude minutes", l}
|
||||
}
|
||||
} else {
|
||||
rr.Latitude += 1000 * 60 * uint32(i)
|
||||
}
|
||||
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err {
|
||||
if i, err := strconv.ParseFloat(l.token, 32); err != nil || l.err || i < 0 || i >= 60 {
|
||||
return &ParseError{"", "bad LOC Latitude seconds", l}
|
||||
} else {
|
||||
rr.Latitude += uint32(1000 * i)
|
||||
|
@ -626,7 +627,7 @@ East:
|
|||
// East
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err {
|
||||
if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 180 {
|
||||
return &ParseError{"", "bad LOC Longitude", l}
|
||||
} else {
|
||||
rr.Longitude = 1000 * 60 * 60 * uint32(i)
|
||||
|
@ -637,14 +638,14 @@ East:
|
|||
if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
|
||||
goto Altitude
|
||||
}
|
||||
if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err {
|
||||
if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 59 {
|
||||
return &ParseError{"", "bad LOC Longitude minutes", l}
|
||||
} else {
|
||||
rr.Longitude += 1000 * 60 * uint32(i)
|
||||
}
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err {
|
||||
if i, err := strconv.ParseFloat(l.token, 32); err != nil || l.err || i < 0 || i >= 60 {
|
||||
return &ParseError{"", "bad LOC Longitude seconds", l}
|
||||
} else {
|
||||
rr.Longitude += uint32(1000 * i)
|
||||
|
@ -667,7 +668,7 @@ Altitude:
|
|||
if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' {
|
||||
l.token = l.token[0 : len(l.token)-1]
|
||||
}
|
||||
if i, e := strconv.ParseFloat(l.token, 32); e != nil {
|
||||
if i, err := strconv.ParseFloat(l.token, 64); err != nil {
|
||||
return &ParseError{"", "bad LOC Altitude", l}
|
||||
} else {
|
||||
rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5)
|
||||
|
@ -681,23 +682,23 @@ Altitude:
|
|||
case zString:
|
||||
switch count {
|
||||
case 0: // Size
|
||||
e, m, ok := stringToCm(l.token)
|
||||
exp, m, ok := stringToCm(l.token)
|
||||
if !ok {
|
||||
return &ParseError{"", "bad LOC Size", l}
|
||||
}
|
||||
rr.Size = e&0x0f | m<<4&0xf0
|
||||
rr.Size = exp&0x0f | m<<4&0xf0
|
||||
case 1: // HorizPre
|
||||
e, m, ok := stringToCm(l.token)
|
||||
exp, m, ok := stringToCm(l.token)
|
||||
if !ok {
|
||||
return &ParseError{"", "bad LOC HorizPre", l}
|
||||
}
|
||||
rr.HorizPre = e&0x0f | m<<4&0xf0
|
||||
rr.HorizPre = exp&0x0f | m<<4&0xf0
|
||||
case 2: // VertPre
|
||||
e, m, ok := stringToCm(l.token)
|
||||
exp, m, ok := stringToCm(l.token)
|
||||
if !ok {
|
||||
return &ParseError{"", "bad LOC VertPre", l}
|
||||
}
|
||||
rr.VertPre = e&0x0f | m<<4&0xf0
|
||||
rr.VertPre = exp&0x0f | m<<4&0xf0
|
||||
}
|
||||
count++
|
||||
case zBlank:
|
||||
|
@ -762,7 +763,7 @@ func (rr *CERT) parse(c *zlexer, o string) *ParseError {
|
|||
l, _ := c.Next()
|
||||
if v, ok := StringToCertType[l.token]; ok {
|
||||
rr.Type = v
|
||||
} else if i, e := strconv.ParseUint(l.token, 10, 16); e != nil {
|
||||
} else if i, err := strconv.ParseUint(l.token, 10, 16); err != nil {
|
||||
return &ParseError{"", "bad CERT Type", l}
|
||||
} else {
|
||||
rr.Type = uint16(i)
|
||||
|
@ -778,7 +779,7 @@ func (rr *CERT) parse(c *zlexer, o string) *ParseError {
|
|||
l, _ = c.Next() // zString
|
||||
if v, ok := StringToAlgorithm[l.token]; ok {
|
||||
rr.Algorithm = v
|
||||
} else if i, e := strconv.ParseUint(l.token, 10, 8); e != nil {
|
||||
} else if i, err := strconv.ParseUint(l.token, 10, 8); err != nil {
|
||||
return &ParseError{"", "bad CERT Algorithm", l}
|
||||
} else {
|
||||
rr.Algorithm = uint8(i)
|
||||
|
@ -812,8 +813,8 @@ func (rr *CSYNC) parse(c *zlexer, o string) *ParseError {
|
|||
c.Next() // zBlank
|
||||
|
||||
l, _ = c.Next()
|
||||
j, e = strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil {
|
||||
j, e1 := strconv.ParseUint(l.token, 10, 16)
|
||||
if e1 != nil {
|
||||
// Serial must be a number
|
||||
return &ParseError{"", "bad CSYNC flags", l}
|
||||
}
|
||||
|
@ -845,9 +846,7 @@ func (rr *CSYNC) parse(c *zlexer, o string) *ParseError {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (rr *SIG) parse(c *zlexer, o string) *ParseError {
|
||||
return rr.RRSIG.parse(c, o)
|
||||
}
|
||||
func (rr *SIG) parse(c *zlexer, o string) *ParseError { return rr.RRSIG.parse(c, o) }
|
||||
|
||||
func (rr *RRSIG) parse(c *zlexer, o string) *ParseError {
|
||||
l, _ := c.Next()
|
||||
|
@ -868,24 +867,24 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError {
|
|||
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
i, err := strconv.ParseUint(l.token, 10, 8)
|
||||
if err != nil || l.err {
|
||||
i, e := strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return &ParseError{"", "bad RRSIG Algorithm", l}
|
||||
}
|
||||
rr.Algorithm = uint8(i)
|
||||
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
i, err = strconv.ParseUint(l.token, 10, 8)
|
||||
if err != nil || l.err {
|
||||
i, e1 := strconv.ParseUint(l.token, 10, 8)
|
||||
if e1 != nil || l.err {
|
||||
return &ParseError{"", "bad RRSIG Labels", l}
|
||||
}
|
||||
rr.Labels = uint8(i)
|
||||
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
i, err = strconv.ParseUint(l.token, 10, 32)
|
||||
if err != nil || l.err {
|
||||
i, e2 := strconv.ParseUint(l.token, 10, 32)
|
||||
if e2 != nil || l.err {
|
||||
return &ParseError{"", "bad RRSIG OrigTtl", l}
|
||||
}
|
||||
rr.OrigTtl = uint32(i)
|
||||
|
@ -894,8 +893,7 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError {
|
|||
l, _ = c.Next()
|
||||
if i, err := StringToTime(l.token); err != nil {
|
||||
// Try to see if all numeric and use it as epoch
|
||||
if i, err := strconv.ParseInt(l.token, 10, 64); err == nil {
|
||||
// TODO(miek): error out on > MAX_UINT32, same below
|
||||
if i, err := strconv.ParseUint(l.token, 10, 32); err == nil {
|
||||
rr.Expiration = uint32(i)
|
||||
} else {
|
||||
return &ParseError{"", "bad RRSIG Expiration", l}
|
||||
|
@ -907,7 +905,7 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError {
|
|||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
if i, err := StringToTime(l.token); err != nil {
|
||||
if i, err := strconv.ParseInt(l.token, 10, 64); err == nil {
|
||||
if i, err := strconv.ParseUint(l.token, 10, 32); err == nil {
|
||||
rr.Inception = uint32(i)
|
||||
} else {
|
||||
return &ParseError{"", "bad RRSIG Inception", l}
|
||||
|
@ -918,8 +916,8 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError {
|
|||
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
i, err = strconv.ParseUint(l.token, 10, 16)
|
||||
if err != nil || l.err {
|
||||
i, e3 := strconv.ParseUint(l.token, 10, 16)
|
||||
if e3 != nil || l.err {
|
||||
return &ParseError{"", "bad RRSIG KeyTag", l}
|
||||
}
|
||||
rr.KeyTag = uint16(i)
|
||||
|
@ -933,9 +931,9 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError {
|
|||
}
|
||||
rr.SignerName = name
|
||||
|
||||
s, e := endingToString(c, "bad RRSIG Signature")
|
||||
if e != nil {
|
||||
return e
|
||||
s, e4 := endingToString(c, "bad RRSIG Signature")
|
||||
if e4 != nil {
|
||||
return e4
|
||||
}
|
||||
rr.Signature = s
|
||||
|
||||
|
@ -985,15 +983,15 @@ func (rr *NSEC3) parse(c *zlexer, o string) *ParseError {
|
|||
rr.Hash = uint8(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
i, e1 := strconv.ParseUint(l.token, 10, 8)
|
||||
if e1 != nil || l.err {
|
||||
return &ParseError{"", "bad NSEC3 Flags", l}
|
||||
}
|
||||
rr.Flags = uint8(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
i, e = strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
i, e2 := strconv.ParseUint(l.token, 10, 16)
|
||||
if e2 != nil || l.err {
|
||||
return &ParseError{"", "bad NSEC3 Iterations", l}
|
||||
}
|
||||
rr.Iterations = uint16(i)
|
||||
|
@ -1050,22 +1048,22 @@ func (rr *NSEC3PARAM) parse(c *zlexer, o string) *ParseError {
|
|||
rr.Hash = uint8(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
i, e1 := strconv.ParseUint(l.token, 10, 8)
|
||||
if e1 != nil || l.err {
|
||||
return &ParseError{"", "bad NSEC3PARAM Flags", l}
|
||||
}
|
||||
rr.Flags = uint8(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
i, e = strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
i, e2 := strconv.ParseUint(l.token, 10, 16)
|
||||
if e2 != nil || l.err {
|
||||
return &ParseError{"", "bad NSEC3PARAM Iterations", l}
|
||||
}
|
||||
rr.Iterations = uint16(i)
|
||||
c.Next()
|
||||
l, _ = c.Next()
|
||||
if l.token != "-" {
|
||||
rr.SaltLength = uint8(len(l.token))
|
||||
rr.SaltLength = uint8(len(l.token) / 2)
|
||||
rr.Salt = l.token
|
||||
}
|
||||
return slurpRemainder(c)
|
||||
|
@ -1132,15 +1130,15 @@ func (rr *SSHFP) parse(c *zlexer, o string) *ParseError {
|
|||
rr.Algorithm = uint8(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
i, e1 := strconv.ParseUint(l.token, 10, 8)
|
||||
if e1 != nil || l.err {
|
||||
return &ParseError{"", "bad SSHFP Type", l}
|
||||
}
|
||||
rr.Type = uint8(i)
|
||||
c.Next() // zBlank
|
||||
s, e1 := endingToString(c, "bad SSHFP Fingerprint")
|
||||
if e1 != nil {
|
||||
return e1
|
||||
s, e2 := endingToString(c, "bad SSHFP Fingerprint")
|
||||
if e2 != nil {
|
||||
return e2
|
||||
}
|
||||
rr.FingerPrint = s
|
||||
return nil
|
||||
|
@ -1155,37 +1153,32 @@ func (rr *DNSKEY) parseDNSKEY(c *zlexer, o, typ string) *ParseError {
|
|||
rr.Flags = uint16(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next() // zString
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
i, e1 := strconv.ParseUint(l.token, 10, 8)
|
||||
if e1 != nil || l.err {
|
||||
return &ParseError{"", "bad " + typ + " Protocol", l}
|
||||
}
|
||||
rr.Protocol = uint8(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next() // zString
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
i, e2 := strconv.ParseUint(l.token, 10, 8)
|
||||
if e2 != nil || l.err {
|
||||
return &ParseError{"", "bad " + typ + " Algorithm", l}
|
||||
}
|
||||
rr.Algorithm = uint8(i)
|
||||
s, e1 := endingToString(c, "bad "+typ+" PublicKey")
|
||||
if e1 != nil {
|
||||
return e1
|
||||
s, e3 := endingToString(c, "bad "+typ+" PublicKey")
|
||||
if e3 != nil {
|
||||
return e3
|
||||
}
|
||||
rr.PublicKey = s
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rr *DNSKEY) parse(c *zlexer, o string) *ParseError {
|
||||
return rr.parseDNSKEY(c, o, "DNSKEY")
|
||||
}
|
||||
|
||||
func (rr *KEY) parse(c *zlexer, o string) *ParseError {
|
||||
return rr.parseDNSKEY(c, o, "KEY")
|
||||
}
|
||||
|
||||
func (rr *CDNSKEY) parse(c *zlexer, o string) *ParseError {
|
||||
return rr.parseDNSKEY(c, o, "CDNSKEY")
|
||||
}
|
||||
func (rr *DNSKEY) parse(c *zlexer, o string) *ParseError { return rr.parseDNSKEY(c, o, "DNSKEY") }
|
||||
func (rr *KEY) parse(c *zlexer, o string) *ParseError { return rr.parseDNSKEY(c, o, "KEY") }
|
||||
func (rr *CDNSKEY) parse(c *zlexer, o string) *ParseError { return rr.parseDNSKEY(c, o, "CDNSKEY") }
|
||||
func (rr *DS) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "DS") }
|
||||
func (rr *DLV) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "DLV") }
|
||||
func (rr *CDS) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "CDS") }
|
||||
|
||||
func (rr *RKEY) parse(c *zlexer, o string) *ParseError {
|
||||
l, _ := c.Next()
|
||||
|
@ -1196,21 +1189,21 @@ func (rr *RKEY) parse(c *zlexer, o string) *ParseError {
|
|||
rr.Flags = uint16(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next() // zString
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
i, e1 := strconv.ParseUint(l.token, 10, 8)
|
||||
if e1 != nil || l.err {
|
||||
return &ParseError{"", "bad RKEY Protocol", l}
|
||||
}
|
||||
rr.Protocol = uint8(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next() // zString
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
i, e2 := strconv.ParseUint(l.token, 10, 8)
|
||||
if e2 != nil || l.err {
|
||||
return &ParseError{"", "bad RKEY Algorithm", l}
|
||||
}
|
||||
rr.Algorithm = uint8(i)
|
||||
s, e1 := endingToString(c, "bad RKEY PublicKey")
|
||||
if e1 != nil {
|
||||
return e1
|
||||
s, e3 := endingToString(c, "bad RKEY PublicKey")
|
||||
if e3 != nil {
|
||||
return e3
|
||||
}
|
||||
rr.PublicKey = s
|
||||
return nil
|
||||
|
@ -1243,15 +1236,15 @@ func (rr *GPOS) parse(c *zlexer, o string) *ParseError {
|
|||
rr.Longitude = l.token
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
_, e = strconv.ParseFloat(l.token, 64)
|
||||
if e != nil || l.err {
|
||||
_, e1 := strconv.ParseFloat(l.token, 64)
|
||||
if e1 != nil || l.err {
|
||||
return &ParseError{"", "bad GPOS Latitude", l}
|
||||
}
|
||||
rr.Latitude = l.token
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
_, e = strconv.ParseFloat(l.token, 64)
|
||||
if e != nil || l.err {
|
||||
_, e2 := strconv.ParseFloat(l.token, 64)
|
||||
if e2 != nil || l.err {
|
||||
return &ParseError{"", "bad GPOS Altitude", l}
|
||||
}
|
||||
rr.Altitude = l.token
|
||||
|
@ -1267,7 +1260,7 @@ func (rr *DS) parseDS(c *zlexer, o, typ string) *ParseError {
|
|||
rr.KeyTag = uint16(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
if i, e = strconv.ParseUint(l.token, 10, 8); e != nil {
|
||||
if i, err := strconv.ParseUint(l.token, 10, 8); err != nil {
|
||||
tokenUpper := strings.ToUpper(l.token)
|
||||
i, ok := StringToAlgorithm[tokenUpper]
|
||||
if !ok || l.err {
|
||||
|
@ -1279,31 +1272,19 @@ func (rr *DS) parseDS(c *zlexer, o, typ string) *ParseError {
|
|||
}
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
i, e1 := strconv.ParseUint(l.token, 10, 8)
|
||||
if e1 != nil || l.err {
|
||||
return &ParseError{"", "bad " + typ + " DigestType", l}
|
||||
}
|
||||
rr.DigestType = uint8(i)
|
||||
s, e1 := endingToString(c, "bad "+typ+" Digest")
|
||||
if e1 != nil {
|
||||
return e1
|
||||
s, e2 := endingToString(c, "bad "+typ+" Digest")
|
||||
if e2 != nil {
|
||||
return e2
|
||||
}
|
||||
rr.Digest = s
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rr *DS) parse(c *zlexer, o string) *ParseError {
|
||||
return rr.parseDS(c, o, "DS")
|
||||
}
|
||||
|
||||
func (rr *DLV) parse(c *zlexer, o string) *ParseError {
|
||||
return rr.parseDS(c, o, "DLV")
|
||||
}
|
||||
|
||||
func (rr *CDS) parse(c *zlexer, o string) *ParseError {
|
||||
return rr.parseDS(c, o, "CDS")
|
||||
}
|
||||
|
||||
func (rr *TA) parse(c *zlexer, o string) *ParseError {
|
||||
l, _ := c.Next()
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
|
@ -1313,7 +1294,7 @@ func (rr *TA) parse(c *zlexer, o string) *ParseError {
|
|||
rr.KeyTag = uint16(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
if i, e := strconv.ParseUint(l.token, 10, 8); e != nil {
|
||||
if i, err := strconv.ParseUint(l.token, 10, 8); err != nil {
|
||||
tokenUpper := strings.ToUpper(l.token)
|
||||
i, ok := StringToAlgorithm[tokenUpper]
|
||||
if !ok || l.err {
|
||||
|
@ -1325,14 +1306,14 @@ func (rr *TA) parse(c *zlexer, o string) *ParseError {
|
|||
}
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
i, e1 := strconv.ParseUint(l.token, 10, 8)
|
||||
if e1 != nil || l.err {
|
||||
return &ParseError{"", "bad TA DigestType", l}
|
||||
}
|
||||
rr.DigestType = uint8(i)
|
||||
s, err := endingToString(c, "bad TA Digest")
|
||||
if err != nil {
|
||||
return err
|
||||
s, e2 := endingToString(c, "bad TA Digest")
|
||||
if e2 != nil {
|
||||
return e2
|
||||
}
|
||||
rr.Digest = s
|
||||
return nil
|
||||
|
@ -1347,22 +1328,22 @@ func (rr *TLSA) parse(c *zlexer, o string) *ParseError {
|
|||
rr.Usage = uint8(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
i, e1 := strconv.ParseUint(l.token, 10, 8)
|
||||
if e1 != nil || l.err {
|
||||
return &ParseError{"", "bad TLSA Selector", l}
|
||||
}
|
||||
rr.Selector = uint8(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
i, e2 := strconv.ParseUint(l.token, 10, 8)
|
||||
if e2 != nil || l.err {
|
||||
return &ParseError{"", "bad TLSA MatchingType", l}
|
||||
}
|
||||
rr.MatchingType = uint8(i)
|
||||
// So this needs be e2 (i.e. different than e), because...??t
|
||||
s, e2 := endingToString(c, "bad TLSA Certificate")
|
||||
if e2 != nil {
|
||||
return e2
|
||||
s, e3 := endingToString(c, "bad TLSA Certificate")
|
||||
if e3 != nil {
|
||||
return e3
|
||||
}
|
||||
rr.Certificate = s
|
||||
return nil
|
||||
|
@ -1377,22 +1358,22 @@ func (rr *SMIMEA) parse(c *zlexer, o string) *ParseError {
|
|||
rr.Usage = uint8(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
i, e1 := strconv.ParseUint(l.token, 10, 8)
|
||||
if e1 != nil || l.err {
|
||||
return &ParseError{"", "bad SMIMEA Selector", l}
|
||||
}
|
||||
rr.Selector = uint8(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
i, e2 := strconv.ParseUint(l.token, 10, 8)
|
||||
if e2 != nil || l.err {
|
||||
return &ParseError{"", "bad SMIMEA MatchingType", l}
|
||||
}
|
||||
rr.MatchingType = uint8(i)
|
||||
// So this needs be e2 (i.e. different than e), because...??t
|
||||
s, e2 := endingToString(c, "bad SMIMEA Certificate")
|
||||
if e2 != nil {
|
||||
return e2
|
||||
s, e3 := endingToString(c, "bad SMIMEA Certificate")
|
||||
if e3 != nil {
|
||||
return e3
|
||||
}
|
||||
rr.Certificate = s
|
||||
return nil
|
||||
|
@ -1406,7 +1387,7 @@ func (rr *RFC3597) parse(c *zlexer, o string) *ParseError {
|
|||
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
rdlength, e := strconv.Atoi(l.token)
|
||||
rdlength, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return &ParseError{"", "bad RFC3597 Rdata ", l}
|
||||
}
|
||||
|
@ -1415,7 +1396,7 @@ func (rr *RFC3597) parse(c *zlexer, o string) *ParseError {
|
|||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if rdlength*2 != len(s) {
|
||||
if int(rdlength)*2 != len(s) {
|
||||
return &ParseError{"", "bad RFC3597 Rdata", l}
|
||||
}
|
||||
rr.Rdata = s
|
||||
|
@ -1469,16 +1450,16 @@ func (rr *URI) parse(c *zlexer, o string) *ParseError {
|
|||
rr.Priority = uint16(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next()
|
||||
i, e = strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
i, e1 := strconv.ParseUint(l.token, 10, 16)
|
||||
if e1 != nil || l.err {
|
||||
return &ParseError{"", "bad URI Weight", l}
|
||||
}
|
||||
rr.Weight = uint16(i)
|
||||
|
||||
c.Next() // zBlank
|
||||
s, err := endingToTxtSlice(c, "bad URI Target")
|
||||
if err != nil {
|
||||
return err
|
||||
s, e2 := endingToTxtSlice(c, "bad URI Target")
|
||||
if e2 != nil {
|
||||
return e2
|
||||
}
|
||||
if len(s) != 1 {
|
||||
return &ParseError{"", "bad URI Target", l}
|
||||
|
@ -1506,9 +1487,9 @@ func (rr *NID) parse(c *zlexer, o string) *ParseError {
|
|||
rr.Preference = uint16(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next() // zString
|
||||
u, err := stringToNodeID(l)
|
||||
if err != nil || l.err {
|
||||
return err
|
||||
u, e1 := stringToNodeID(l)
|
||||
if e1 != nil || l.err {
|
||||
return e1
|
||||
}
|
||||
rr.NodeID = u
|
||||
return slurpRemainder(c)
|
||||
|
@ -1546,7 +1527,6 @@ func (rr *LP) parse(c *zlexer, o string) *ParseError {
|
|||
return &ParseError{"", "bad LP Fqdn", l}
|
||||
}
|
||||
rr.Fqdn = name
|
||||
|
||||
return slurpRemainder(c)
|
||||
}
|
||||
|
||||
|
@ -1559,9 +1539,9 @@ func (rr *L64) parse(c *zlexer, o string) *ParseError {
|
|||
rr.Preference = uint16(i)
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next() // zString
|
||||
u, err := stringToNodeID(l)
|
||||
if err != nil || l.err {
|
||||
return err
|
||||
u, e1 := stringToNodeID(l)
|
||||
if e1 != nil || l.err {
|
||||
return e1
|
||||
}
|
||||
rr.Locator64 = u
|
||||
return slurpRemainder(c)
|
||||
|
@ -1624,14 +1604,13 @@ func (rr *PX) parse(c *zlexer, o string) *ParseError {
|
|||
return &ParseError{"", "bad PX Mapx400", l}
|
||||
}
|
||||
rr.Mapx400 = mapx400
|
||||
|
||||
return slurpRemainder(c)
|
||||
}
|
||||
|
||||
func (rr *CAA) parse(c *zlexer, o string) *ParseError {
|
||||
l, _ := c.Next()
|
||||
i, err := strconv.ParseUint(l.token, 10, 8)
|
||||
if err != nil || l.err {
|
||||
i, e := strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return &ParseError{"", "bad CAA Flag", l}
|
||||
}
|
||||
rr.Flag = uint8(i)
|
||||
|
@ -1644,9 +1623,9 @@ func (rr *CAA) parse(c *zlexer, o string) *ParseError {
|
|||
rr.Tag = l.token
|
||||
|
||||
c.Next() // zBlank
|
||||
s, e := endingToTxtSlice(c, "bad CAA Value")
|
||||
if e != nil {
|
||||
return e
|
||||
s, e1 := endingToTxtSlice(c, "bad CAA Value")
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if len(s) != 1 {
|
||||
return &ParseError{"", "bad CAA Value", l}
|
||||
|
@ -1667,8 +1646,8 @@ func (rr *TKEY) parse(c *zlexer, o string) *ParseError {
|
|||
|
||||
// Get the key length and key values
|
||||
l, _ = c.Next()
|
||||
i, err := strconv.ParseUint(l.token, 10, 8)
|
||||
if err != nil || l.err {
|
||||
i, e := strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return &ParseError{"", "bad TKEY key length", l}
|
||||
}
|
||||
rr.KeySize = uint16(i)
|
||||
|
@ -1682,8 +1661,8 @@ func (rr *TKEY) parse(c *zlexer, o string) *ParseError {
|
|||
|
||||
// Get the otherdata length and string data
|
||||
l, _ = c.Next()
|
||||
i, err = strconv.ParseUint(l.token, 10, 8)
|
||||
if err != nil || l.err {
|
||||
i, e1 := strconv.ParseUint(l.token, 10, 8)
|
||||
if e1 != nil || l.err {
|
||||
return &ParseError{"", "bad TKEY otherdata length", l}
|
||||
}
|
||||
rr.OtherLen = uint16(i)
|
||||
|
@ -1693,6 +1672,71 @@ func (rr *TKEY) parse(c *zlexer, o string) *ParseError {
|
|||
return &ParseError{"", "bad TKEY otherday", l}
|
||||
}
|
||||
rr.OtherData = l.token
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rr *APL) parse(c *zlexer, o string) *ParseError {
|
||||
var prefixes []APLPrefix
|
||||
|
||||
for {
|
||||
l, _ := c.Next()
|
||||
if l.value == zNewline || l.value == zEOF {
|
||||
break
|
||||
}
|
||||
if l.value == zBlank && prefixes != nil {
|
||||
continue
|
||||
}
|
||||
if l.value != zString {
|
||||
return &ParseError{"", "unexpected APL field", l}
|
||||
}
|
||||
|
||||
// Expected format: [!]afi:address/prefix
|
||||
|
||||
colon := strings.IndexByte(l.token, ':')
|
||||
if colon == -1 {
|
||||
return &ParseError{"", "missing colon in APL field", l}
|
||||
}
|
||||
|
||||
family, cidr := l.token[:colon], l.token[colon+1:]
|
||||
|
||||
var negation bool
|
||||
if family != "" && family[0] == '!' {
|
||||
negation = true
|
||||
family = family[1:]
|
||||
}
|
||||
|
||||
afi, e := strconv.ParseUint(family, 10, 16)
|
||||
if e != nil {
|
||||
return &ParseError{"", "failed to parse APL family: " + e.Error(), l}
|
||||
}
|
||||
var addrLen int
|
||||
switch afi {
|
||||
case 1:
|
||||
addrLen = net.IPv4len
|
||||
case 2:
|
||||
addrLen = net.IPv6len
|
||||
default:
|
||||
return &ParseError{"", "unrecognized APL family", l}
|
||||
}
|
||||
|
||||
ip, subnet, e1 := net.ParseCIDR(cidr)
|
||||
if e1 != nil {
|
||||
return &ParseError{"", "failed to parse APL address: " + e1.Error(), l}
|
||||
}
|
||||
if !ip.Equal(subnet.IP) {
|
||||
return &ParseError{"", "extra bits in APL address", l}
|
||||
}
|
||||
|
||||
if len(subnet.IP) != addrLen {
|
||||
return &ParseError{"", "address mismatch with the APL family", l}
|
||||
}
|
||||
|
||||
prefixes = append(prefixes, APLPrefix{
|
||||
Negation: negation,
|
||||
Network: *subnet,
|
||||
})
|
||||
}
|
||||
|
||||
rr.Prefixes = prefixes
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package dns
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
|
@ -36,7 +35,7 @@ func (mux *ServeMux) match(q string, t uint16) Handler {
|
|||
return nil
|
||||
}
|
||||
|
||||
q = strings.ToLower(q)
|
||||
q = CanonicalName(q)
|
||||
|
||||
var handler Handler
|
||||
for off, end := 0, false; !end; off, end = NextLabel(q, off) {
|
||||
|
@ -66,7 +65,7 @@ func (mux *ServeMux) Handle(pattern string, handler Handler) {
|
|||
if mux.z == nil {
|
||||
mux.z = make(map[string]Handler)
|
||||
}
|
||||
mux.z[Fqdn(pattern)] = handler
|
||||
mux.z[CanonicalName(pattern)] = handler
|
||||
mux.m.Unlock()
|
||||
}
|
||||
|
||||
|
@ -81,7 +80,7 @@ func (mux *ServeMux) HandleRemove(pattern string) {
|
|||
panic("dns: invalid pattern " + pattern)
|
||||
}
|
||||
mux.m.Lock()
|
||||
delete(mux.z, Fqdn(pattern))
|
||||
delete(mux.z, CanonicalName(pattern))
|
||||
mux.m.Unlock()
|
||||
}
|
||||
|
||||
|
@ -92,7 +91,7 @@ func (mux *ServeMux) HandleRemove(pattern string) {
|
|||
// are redirected to the parent zone (if that is also registered),
|
||||
// otherwise the child gets the query.
|
||||
//
|
||||
// If no handler is found, or there is no question, a standard SERVFAIL
|
||||
// If no handler is found, or there is no question, a standard REFUSED
|
||||
// message is returned
|
||||
func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) {
|
||||
var h Handler
|
||||
|
@ -103,7 +102,7 @@ func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) {
|
|||
if h != nil {
|
||||
h.ServeDNS(w, req)
|
||||
} else {
|
||||
HandleFailed(w, req)
|
||||
handleRefused(w, req)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -72,13 +72,22 @@ type response struct {
|
|||
tsigStatus error
|
||||
tsigRequestMAC string
|
||||
tsigSecret map[string]string // the tsig secrets
|
||||
udp *net.UDPConn // i/o connection if UDP was used
|
||||
udp net.PacketConn // i/o connection if UDP was used
|
||||
tcp net.Conn // i/o connection if TCP was used
|
||||
udpSession *SessionUDP // oob data to get egress interface right
|
||||
pcSession net.Addr // address to use when writing to a generic net.PacketConn
|
||||
writer Writer // writer to output the raw DNS bits
|
||||
}
|
||||
|
||||
// handleRefused returns a HandlerFunc that returns REFUSED for every request it gets.
|
||||
func handleRefused(w ResponseWriter, r *Msg) {
|
||||
m := new(Msg)
|
||||
m.SetRcode(r, RcodeRefused)
|
||||
w.WriteMsg(m)
|
||||
}
|
||||
|
||||
// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets.
|
||||
// Deprecated: This function is going away.
|
||||
func HandleFailed(w ResponseWriter, r *Msg) {
|
||||
m := new(Msg)
|
||||
m.SetRcode(r, RcodeServerFailure)
|
||||
|
@ -139,12 +148,24 @@ type Reader interface {
|
|||
ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error)
|
||||
}
|
||||
|
||||
// defaultReader is an adapter for the Server struct that implements the Reader interface
|
||||
// using the readTCP and readUDP func of the embedded Server.
|
||||
// PacketConnReader is an optional interface that Readers can implement to support using generic net.PacketConns.
|
||||
type PacketConnReader interface {
|
||||
Reader
|
||||
|
||||
// ReadPacketConn reads a raw message from a generic net.PacketConn UDP connection. Implementations may
|
||||
// alter connection properties, for example the read-deadline.
|
||||
ReadPacketConn(conn net.PacketConn, timeout time.Duration) ([]byte, net.Addr, error)
|
||||
}
|
||||
|
||||
// defaultReader is an adapter for the Server struct that implements the Reader and
|
||||
// PacketConnReader interfaces using the readTCP, readUDP and readPacketConn funcs
|
||||
// of the embedded Server.
|
||||
type defaultReader struct {
|
||||
*Server
|
||||
}
|
||||
|
||||
var _ PacketConnReader = defaultReader{}
|
||||
|
||||
func (dr defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
|
||||
return dr.readTCP(conn, timeout)
|
||||
}
|
||||
|
@ -153,8 +174,14 @@ func (dr defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byt
|
|||
return dr.readUDP(conn, timeout)
|
||||
}
|
||||
|
||||
func (dr defaultReader) ReadPacketConn(conn net.PacketConn, timeout time.Duration) ([]byte, net.Addr, error) {
|
||||
return dr.readPacketConn(conn, timeout)
|
||||
}
|
||||
|
||||
// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader.
|
||||
// Implementations should never return a nil Reader.
|
||||
// Readers should also implement the optional PacketConnReader interface.
|
||||
// PacketConnReader is required to use a generic net.PacketConn.
|
||||
type DecorateReader func(Reader) Reader
|
||||
|
||||
// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer.
|
||||
|
@ -294,6 +321,7 @@ func (srv *Server) ListenAndServe() error {
|
|||
}
|
||||
u := l.(*net.UDPConn)
|
||||
if e := setUDPSocketOptions(u); e != nil {
|
||||
u.Close()
|
||||
return e
|
||||
}
|
||||
srv.PacketConn = l
|
||||
|
@ -317,24 +345,22 @@ func (srv *Server) ActivateAndServe() error {
|
|||
|
||||
srv.init()
|
||||
|
||||
pConn := srv.PacketConn
|
||||
l := srv.Listener
|
||||
if pConn != nil {
|
||||
if srv.PacketConn != nil {
|
||||
// Check PacketConn interface's type is valid and value
|
||||
// is not nil
|
||||
if t, ok := pConn.(*net.UDPConn); ok && t != nil {
|
||||
if t, ok := srv.PacketConn.(*net.UDPConn); ok && t != nil {
|
||||
if e := setUDPSocketOptions(t); e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
srv.started = true
|
||||
unlock()
|
||||
return srv.serveUDP(t)
|
||||
return srv.serveUDP(srv.PacketConn)
|
||||
}
|
||||
}
|
||||
if l != nil {
|
||||
if srv.Listener != nil {
|
||||
srv.started = true
|
||||
unlock()
|
||||
return srv.serveTCP(l)
|
||||
return srv.serveTCP(srv.Listener)
|
||||
}
|
||||
return &Error{err: "bad listeners"}
|
||||
}
|
||||
|
@ -438,18 +464,24 @@ func (srv *Server) serveTCP(l net.Listener) error {
|
|||
}
|
||||
|
||||
// serveUDP starts a UDP listener for the server.
|
||||
func (srv *Server) serveUDP(l *net.UDPConn) error {
|
||||
func (srv *Server) serveUDP(l net.PacketConn) error {
|
||||
defer l.Close()
|
||||
|
||||
if srv.NotifyStartedFunc != nil {
|
||||
srv.NotifyStartedFunc()
|
||||
}
|
||||
|
||||
reader := Reader(defaultReader{srv})
|
||||
if srv.DecorateReader != nil {
|
||||
reader = srv.DecorateReader(reader)
|
||||
}
|
||||
|
||||
lUDP, isUDP := l.(*net.UDPConn)
|
||||
readerPC, canPacketConn := reader.(PacketConnReader)
|
||||
if !isUDP && !canPacketConn {
|
||||
return &Error{err: "PacketConnReader was not implemented on Reader returned from DecorateReader but is required for net.PacketConn"}
|
||||
}
|
||||
|
||||
if srv.NotifyStartedFunc != nil {
|
||||
srv.NotifyStartedFunc()
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
defer func() {
|
||||
wg.Wait()
|
||||
|
@ -459,7 +491,17 @@ func (srv *Server) serveUDP(l *net.UDPConn) error {
|
|||
rtimeout := srv.getReadTimeout()
|
||||
// deadline is not used here
|
||||
for srv.isStarted() {
|
||||
m, s, err := reader.ReadUDP(l, rtimeout)
|
||||
var (
|
||||
m []byte
|
||||
sPC net.Addr
|
||||
sUDP *SessionUDP
|
||||
err error
|
||||
)
|
||||
if isUDP {
|
||||
m, sUDP, err = reader.ReadUDP(lUDP, rtimeout)
|
||||
} else {
|
||||
m, sPC, err = readerPC.ReadPacketConn(l, rtimeout)
|
||||
}
|
||||
if err != nil {
|
||||
if !srv.isStarted() {
|
||||
return nil
|
||||
|
@ -476,7 +518,7 @@ func (srv *Server) serveUDP(l *net.UDPConn) error {
|
|||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go srv.serveUDPPacket(&wg, m, l, s)
|
||||
go srv.serveUDPPacket(&wg, m, l, sUDP, sPC)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -538,8 +580,8 @@ func (srv *Server) serveTCPConn(wg *sync.WaitGroup, rw net.Conn) {
|
|||
}
|
||||
|
||||
// Serve a new UDP request.
|
||||
func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u *net.UDPConn, s *SessionUDP) {
|
||||
w := &response{tsigSecret: srv.TsigSecret, udp: u, udpSession: s}
|
||||
func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u net.PacketConn, udpSession *SessionUDP, pcSession net.Addr) {
|
||||
w := &response{tsigSecret: srv.TsigSecret, udp: u, udpSession: udpSession, pcSession: pcSession}
|
||||
if srv.DecorateWriter != nil {
|
||||
w.writer = srv.DecorateWriter(w)
|
||||
} else {
|
||||
|
@ -651,6 +693,24 @@ func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *S
|
|||
return m, s, nil
|
||||
}
|
||||
|
||||
func (srv *Server) readPacketConn(conn net.PacketConn, timeout time.Duration) ([]byte, net.Addr, error) {
|
||||
srv.lock.RLock()
|
||||
if srv.started {
|
||||
// See the comment in readTCP above.
|
||||
conn.SetReadDeadline(time.Now().Add(timeout))
|
||||
}
|
||||
srv.lock.RUnlock()
|
||||
|
||||
m := srv.udpPool.Get().([]byte)
|
||||
n, addr, err := conn.ReadFrom(m)
|
||||
if err != nil {
|
||||
srv.udpPool.Put(m)
|
||||
return nil, nil, err
|
||||
}
|
||||
m = m[:n]
|
||||
return m, addr, nil
|
||||
}
|
||||
|
||||
// WriteMsg implements the ResponseWriter.WriteMsg method.
|
||||
func (w *response) WriteMsg(m *Msg) (err error) {
|
||||
if w.closed {
|
||||
|
@ -684,17 +744,19 @@ func (w *response) Write(m []byte) (int, error) {
|
|||
|
||||
switch {
|
||||
case w.udp != nil:
|
||||
return WriteToSessionUDP(w.udp, m, w.udpSession)
|
||||
if u, ok := w.udp.(*net.UDPConn); ok {
|
||||
return WriteToSessionUDP(u, m, w.udpSession)
|
||||
}
|
||||
return w.udp.WriteTo(m, w.pcSession)
|
||||
case w.tcp != nil:
|
||||
if len(m) > MaxMsgSize {
|
||||
return 0, &Error{err: "message too large"}
|
||||
}
|
||||
|
||||
l := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(l, uint16(len(m)))
|
||||
|
||||
n, err := (&net.Buffers{l, m}).WriteTo(w.tcp)
|
||||
return int(n), err
|
||||
msg := make([]byte, 2+len(m))
|
||||
binary.BigEndian.PutUint16(msg, uint16(len(m)))
|
||||
copy(msg[2:], m)
|
||||
return w.tcp.Write(msg)
|
||||
default:
|
||||
panic("dns: internal error: udp and tcp both nil")
|
||||
}
|
||||
|
@ -717,10 +779,12 @@ func (w *response) RemoteAddr() net.Addr {
|
|||
switch {
|
||||
case w.udpSession != nil:
|
||||
return w.udpSession.RemoteAddr()
|
||||
case w.pcSession != nil:
|
||||
return w.pcSession
|
||||
case w.tcp != nil:
|
||||
return w.tcp.RemoteAddr()
|
||||
default:
|
||||
panic("dns: internal error: udpSession and tcp both nil")
|
||||
panic("dns: internal error: udpSession, pcSession and tcp are all nil")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package dns
|
|||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"encoding/binary"
|
||||
|
@ -85,7 +84,7 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error {
|
|||
|
||||
var hash crypto.Hash
|
||||
switch rr.Algorithm {
|
||||
case DSA, RSASHA1:
|
||||
case RSASHA1:
|
||||
hash = crypto.SHA1
|
||||
case RSASHA256, ECDSAP256SHA256:
|
||||
hash = crypto.SHA256
|
||||
|
@ -178,17 +177,6 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error {
|
|||
hashed := hasher.Sum(nil)
|
||||
sig := buf[sigend:]
|
||||
switch k.Algorithm {
|
||||
case DSA:
|
||||
pk := k.publicKeyDSA()
|
||||
sig = sig[1:]
|
||||
r := new(big.Int).SetBytes(sig[:len(sig)/2])
|
||||
s := new(big.Int).SetBytes(sig[len(sig)/2:])
|
||||
if pk != nil {
|
||||
if dsa.Verify(pk, hashed, r, s) {
|
||||
return nil
|
||||
}
|
||||
return ErrSig
|
||||
}
|
||||
case RSASHA1, RSASHA256, RSASHA512:
|
||||
pk := k.publicKeyRSA()
|
||||
if pk != nil {
|
||||
|
|
|
@ -0,0 +1,744 @@
|
|||
package dns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"net"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type SVCBKey uint16
|
||||
|
||||
// Keys defined in draft-ietf-dnsop-svcb-https-01 Section 12.3.2.
|
||||
const (
|
||||
SVCB_MANDATORY SVCBKey = 0
|
||||
SVCB_ALPN SVCBKey = 1
|
||||
SVCB_NO_DEFAULT_ALPN SVCBKey = 2
|
||||
SVCB_PORT SVCBKey = 3
|
||||
SVCB_IPV4HINT SVCBKey = 4
|
||||
SVCB_ECHCONFIG SVCBKey = 5
|
||||
SVCB_IPV6HINT SVCBKey = 6
|
||||
svcb_RESERVED SVCBKey = 65535
|
||||
)
|
||||
|
||||
var svcbKeyToStringMap = map[SVCBKey]string{
|
||||
SVCB_MANDATORY: "mandatory",
|
||||
SVCB_ALPN: "alpn",
|
||||
SVCB_NO_DEFAULT_ALPN: "no-default-alpn",
|
||||
SVCB_PORT: "port",
|
||||
SVCB_IPV4HINT: "ipv4hint",
|
||||
SVCB_ECHCONFIG: "echconfig",
|
||||
SVCB_IPV6HINT: "ipv6hint",
|
||||
}
|
||||
|
||||
var svcbStringToKeyMap = reverseSVCBKeyMap(svcbKeyToStringMap)
|
||||
|
||||
func reverseSVCBKeyMap(m map[SVCBKey]string) map[string]SVCBKey {
|
||||
n := make(map[string]SVCBKey, len(m))
|
||||
for u, s := range m {
|
||||
n[s] = u
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// String takes the numerical code of an SVCB key and returns its name.
|
||||
// Returns an empty string for reserved keys.
|
||||
// Accepts unassigned keys as well as experimental/private keys.
|
||||
func (key SVCBKey) String() string {
|
||||
if x := svcbKeyToStringMap[key]; x != "" {
|
||||
return x
|
||||
}
|
||||
if key == svcb_RESERVED {
|
||||
return ""
|
||||
}
|
||||
return "key" + strconv.FormatUint(uint64(key), 10)
|
||||
}
|
||||
|
||||
// svcbStringToKey returns the numerical code of an SVCB key.
|
||||
// Returns svcb_RESERVED for reserved/invalid keys.
|
||||
// Accepts unassigned keys as well as experimental/private keys.
|
||||
func svcbStringToKey(s string) SVCBKey {
|
||||
if strings.HasPrefix(s, "key") {
|
||||
a, err := strconv.ParseUint(s[3:], 10, 16)
|
||||
// no leading zeros
|
||||
// key shouldn't be registered
|
||||
if err != nil || a == 65535 || s[3] == '0' || svcbKeyToStringMap[SVCBKey(a)] != "" {
|
||||
return svcb_RESERVED
|
||||
}
|
||||
return SVCBKey(a)
|
||||
}
|
||||
if key, ok := svcbStringToKeyMap[s]; ok {
|
||||
return key
|
||||
}
|
||||
return svcb_RESERVED
|
||||
}
|
||||
|
||||
func (rr *SVCB) parse(c *zlexer, o string) *ParseError {
|
||||
l, _ := c.Next()
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return &ParseError{l.token, "bad SVCB priority", l}
|
||||
}
|
||||
rr.Priority = uint16(i)
|
||||
|
||||
c.Next() // zBlank
|
||||
l, _ = c.Next() // zString
|
||||
rr.Target = l.token
|
||||
|
||||
name, nameOk := toAbsoluteName(l.token, o)
|
||||
if l.err || !nameOk {
|
||||
return &ParseError{l.token, "bad SVCB Target", l}
|
||||
}
|
||||
rr.Target = name
|
||||
|
||||
// Values (if any)
|
||||
l, _ = c.Next()
|
||||
var xs []SVCBKeyValue
|
||||
// Helps require whitespace between pairs.
|
||||
// Prevents key1000="a"key1001=...
|
||||
canHaveNextKey := true
|
||||
for l.value != zNewline && l.value != zEOF {
|
||||
switch l.value {
|
||||
case zString:
|
||||
if !canHaveNextKey {
|
||||
// The key we can now read was probably meant to be
|
||||
// a part of the last value.
|
||||
return &ParseError{l.token, "bad SVCB value quotation", l}
|
||||
}
|
||||
|
||||
// In key=value pairs, value does not have to be quoted unless value
|
||||
// contains whitespace. And keys don't need to have values.
|
||||
// Similarly, keys with an equality signs after them don't need values.
|
||||
// l.token includes at least up to the first equality sign.
|
||||
idx := strings.IndexByte(l.token, '=')
|
||||
var key, value string
|
||||
if idx < 0 {
|
||||
// Key with no value and no equality sign
|
||||
key = l.token
|
||||
} else if idx == 0 {
|
||||
return &ParseError{l.token, "bad SVCB key", l}
|
||||
} else {
|
||||
key, value = l.token[:idx], l.token[idx+1:]
|
||||
|
||||
if value == "" {
|
||||
// We have a key and an equality sign. Maybe we have nothing
|
||||
// after "=" or we have a double quote.
|
||||
l, _ = c.Next()
|
||||
if l.value == zQuote {
|
||||
// Only needed when value ends with double quotes.
|
||||
// Any value starting with zQuote ends with it.
|
||||
canHaveNextKey = false
|
||||
|
||||
l, _ = c.Next()
|
||||
switch l.value {
|
||||
case zString:
|
||||
// We have a value in double quotes.
|
||||
value = l.token
|
||||
l, _ = c.Next()
|
||||
if l.value != zQuote {
|
||||
return &ParseError{l.token, "SVCB unterminated value", l}
|
||||
}
|
||||
case zQuote:
|
||||
// There's nothing in double quotes.
|
||||
default:
|
||||
return &ParseError{l.token, "bad SVCB value", l}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
kv := makeSVCBKeyValue(svcbStringToKey(key))
|
||||
if kv == nil {
|
||||
return &ParseError{l.token, "bad SVCB key", l}
|
||||
}
|
||||
if err := kv.parse(value); err != nil {
|
||||
return &ParseError{l.token, err.Error(), l}
|
||||
}
|
||||
xs = append(xs, kv)
|
||||
case zQuote:
|
||||
return &ParseError{l.token, "SVCB key can't contain double quotes", l}
|
||||
case zBlank:
|
||||
canHaveNextKey = true
|
||||
default:
|
||||
return &ParseError{l.token, "bad SVCB values", l}
|
||||
}
|
||||
l, _ = c.Next()
|
||||
}
|
||||
rr.Value = xs
|
||||
if rr.Priority == 0 && len(xs) > 0 {
|
||||
return &ParseError{l.token, "SVCB aliasform can't have values", l}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeSVCBKeyValue returns an SVCBKeyValue struct with the key or nil for reserved keys.
|
||||
func makeSVCBKeyValue(key SVCBKey) SVCBKeyValue {
|
||||
switch key {
|
||||
case SVCB_MANDATORY:
|
||||
return new(SVCBMandatory)
|
||||
case SVCB_ALPN:
|
||||
return new(SVCBAlpn)
|
||||
case SVCB_NO_DEFAULT_ALPN:
|
||||
return new(SVCBNoDefaultAlpn)
|
||||
case SVCB_PORT:
|
||||
return new(SVCBPort)
|
||||
case SVCB_IPV4HINT:
|
||||
return new(SVCBIPv4Hint)
|
||||
case SVCB_ECHCONFIG:
|
||||
return new(SVCBECHConfig)
|
||||
case SVCB_IPV6HINT:
|
||||
return new(SVCBIPv6Hint)
|
||||
case svcb_RESERVED:
|
||||
return nil
|
||||
default:
|
||||
e := new(SVCBLocal)
|
||||
e.KeyCode = key
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
// SVCB RR. See RFC xxxx (https://tools.ietf.org/html/draft-ietf-dnsop-svcb-https-01).
|
||||
type SVCB struct {
|
||||
Hdr RR_Header
|
||||
Priority uint16
|
||||
Target string `dns:"domain-name"`
|
||||
Value []SVCBKeyValue `dns:"pairs"` // Value must be empty if Priority is zero.
|
||||
}
|
||||
|
||||
// HTTPS RR. Everything valid for SVCB applies to HTTPS as well.
|
||||
// Except that the HTTPS record is intended for use with the HTTP and HTTPS protocols.
|
||||
type HTTPS struct {
|
||||
SVCB
|
||||
}
|
||||
|
||||
func (rr *HTTPS) String() string {
|
||||
return rr.SVCB.String()
|
||||
}
|
||||
|
||||
func (rr *HTTPS) parse(c *zlexer, o string) *ParseError {
|
||||
return rr.SVCB.parse(c, o)
|
||||
}
|
||||
|
||||
// SVCBKeyValue defines a key=value pair for the SVCB RR type.
|
||||
// An SVCB RR can have multiple SVCBKeyValues appended to it.
|
||||
type SVCBKeyValue interface {
|
||||
Key() SVCBKey // Key returns the numerical key code.
|
||||
pack() ([]byte, error) // pack returns the encoded value.
|
||||
unpack([]byte) error // unpack sets the value.
|
||||
String() string // String returns the string representation of the value.
|
||||
parse(string) error // parse sets the value to the given string representation of the value.
|
||||
copy() SVCBKeyValue // copy returns a deep-copy of the pair.
|
||||
len() int // len returns the length of value in the wire format.
|
||||
}
|
||||
|
||||
// SVCBMandatory pair adds to required keys that must be interpreted for the RR
|
||||
// to be functional.
|
||||
// Basic use pattern for creating a mandatory option:
|
||||
//
|
||||
// s := &dns.SVCB{Hdr: dns.RR_Header{Name: ".", Rrtype: dns.TypeSVCB, Class: dns.ClassINET}}
|
||||
// e := new(dns.SVCBMandatory)
|
||||
// e.Code = []uint16{65403}
|
||||
// s.Value = append(s.Value, e)
|
||||
type SVCBMandatory struct {
|
||||
Code []SVCBKey // Must not include mandatory
|
||||
}
|
||||
|
||||
func (*SVCBMandatory) Key() SVCBKey { return SVCB_MANDATORY }
|
||||
|
||||
func (s *SVCBMandatory) String() string {
|
||||
str := make([]string, len(s.Code))
|
||||
for i, e := range s.Code {
|
||||
str[i] = e.String()
|
||||
}
|
||||
return strings.Join(str, ",")
|
||||
}
|
||||
|
||||
func (s *SVCBMandatory) pack() ([]byte, error) {
|
||||
codes := append([]SVCBKey(nil), s.Code...)
|
||||
sort.Slice(codes, func(i, j int) bool {
|
||||
return codes[i] < codes[j]
|
||||
})
|
||||
b := make([]byte, 2*len(codes))
|
||||
for i, e := range codes {
|
||||
binary.BigEndian.PutUint16(b[2*i:], uint16(e))
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (s *SVCBMandatory) unpack(b []byte) error {
|
||||
if len(b)%2 != 0 {
|
||||
return errors.New("dns: svcbmandatory: value length is not a multiple of 2")
|
||||
}
|
||||
codes := make([]SVCBKey, 0, len(b)/2)
|
||||
for i := 0; i < len(b); i += 2 {
|
||||
// We assume strictly increasing order.
|
||||
codes = append(codes, SVCBKey(binary.BigEndian.Uint16(b[i:])))
|
||||
}
|
||||
s.Code = codes
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SVCBMandatory) parse(b string) error {
|
||||
str := strings.Split(b, ",")
|
||||
codes := make([]SVCBKey, 0, len(str))
|
||||
for _, e := range str {
|
||||
codes = append(codes, svcbStringToKey(e))
|
||||
}
|
||||
s.Code = codes
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SVCBMandatory) len() int {
|
||||
return 2 * len(s.Code)
|
||||
}
|
||||
|
||||
func (s *SVCBMandatory) copy() SVCBKeyValue {
|
||||
return &SVCBMandatory{
|
||||
append([]SVCBKey(nil), s.Code...),
|
||||
}
|
||||
}
|
||||
|
||||
// SVCBAlpn pair is used to list supported connection protocols.
|
||||
// Protocol ids can be found at:
|
||||
// https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids
|
||||
// Basic use pattern for creating an alpn option:
|
||||
//
|
||||
// h := new(dns.HTTPS)
|
||||
// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET}
|
||||
// e := new(dns.SVCBAlpn)
|
||||
// e.Alpn = []string{"h2", "http/1.1"}
|
||||
// h.Value = append(o.Value, e)
|
||||
type SVCBAlpn struct {
|
||||
Alpn []string
|
||||
}
|
||||
|
||||
func (*SVCBAlpn) Key() SVCBKey { return SVCB_ALPN }
|
||||
func (s *SVCBAlpn) String() string { return strings.Join(s.Alpn, ",") }
|
||||
|
||||
func (s *SVCBAlpn) pack() ([]byte, error) {
|
||||
// Liberally estimate the size of an alpn as 10 octets
|
||||
b := make([]byte, 0, 10*len(s.Alpn))
|
||||
for _, e := range s.Alpn {
|
||||
if len(e) == 0 {
|
||||
return nil, errors.New("dns: svcbalpn: empty alpn-id")
|
||||
}
|
||||
if len(e) > 255 {
|
||||
return nil, errors.New("dns: svcbalpn: alpn-id too long")
|
||||
}
|
||||
b = append(b, byte(len(e)))
|
||||
b = append(b, e...)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (s *SVCBAlpn) unpack(b []byte) error {
|
||||
// Estimate the size of the smallest alpn as 4 bytes
|
||||
alpn := make([]string, 0, len(b)/4)
|
||||
for i := 0; i < len(b); {
|
||||
length := int(b[i])
|
||||
i++
|
||||
if i+length > len(b) {
|
||||
return errors.New("dns: svcbalpn: alpn array overflowing")
|
||||
}
|
||||
alpn = append(alpn, string(b[i:i+length]))
|
||||
i += length
|
||||
}
|
||||
s.Alpn = alpn
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SVCBAlpn) parse(b string) error {
|
||||
s.Alpn = strings.Split(b, ",")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SVCBAlpn) len() int {
|
||||
var l int
|
||||
for _, e := range s.Alpn {
|
||||
l += 1 + len(e)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (s *SVCBAlpn) copy() SVCBKeyValue {
|
||||
return &SVCBAlpn{
|
||||
append([]string(nil), s.Alpn...),
|
||||
}
|
||||
}
|
||||
|
||||
// SVCBNoDefaultAlpn pair signifies no support for default connection protocols.
|
||||
// Basic use pattern for creating a no-default-alpn option:
|
||||
//
|
||||
// s := &dns.SVCB{Hdr: dns.RR_Header{Name: ".", Rrtype: dns.TypeSVCB, Class: dns.ClassINET}}
|
||||
// e := new(dns.SVCBNoDefaultAlpn)
|
||||
// s.Value = append(s.Value, e)
|
||||
type SVCBNoDefaultAlpn struct{}
|
||||
|
||||
func (*SVCBNoDefaultAlpn) Key() SVCBKey { return SVCB_NO_DEFAULT_ALPN }
|
||||
func (*SVCBNoDefaultAlpn) copy() SVCBKeyValue { return &SVCBNoDefaultAlpn{} }
|
||||
func (*SVCBNoDefaultAlpn) pack() ([]byte, error) { return []byte{}, nil }
|
||||
func (*SVCBNoDefaultAlpn) String() string { return "" }
|
||||
func (*SVCBNoDefaultAlpn) len() int { return 0 }
|
||||
|
||||
func (*SVCBNoDefaultAlpn) unpack(b []byte) error {
|
||||
if len(b) != 0 {
|
||||
return errors.New("dns: svcbnodefaultalpn: no_default_alpn must have no value")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*SVCBNoDefaultAlpn) parse(b string) error {
|
||||
if len(b) != 0 {
|
||||
return errors.New("dns: svcbnodefaultalpn: no_default_alpn must have no value")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SVCBPort pair defines the port for connection.
|
||||
// Basic use pattern for creating a port option:
|
||||
//
|
||||
// s := &dns.SVCB{Hdr: dns.RR_Header{Name: ".", Rrtype: dns.TypeSVCB, Class: dns.ClassINET}}
|
||||
// e := new(dns.SVCBPort)
|
||||
// e.Port = 80
|
||||
// s.Value = append(s.Value, e)
|
||||
type SVCBPort struct {
|
||||
Port uint16
|
||||
}
|
||||
|
||||
func (*SVCBPort) Key() SVCBKey { return SVCB_PORT }
|
||||
func (*SVCBPort) len() int { return 2 }
|
||||
func (s *SVCBPort) String() string { return strconv.FormatUint(uint64(s.Port), 10) }
|
||||
func (s *SVCBPort) copy() SVCBKeyValue { return &SVCBPort{s.Port} }
|
||||
|
||||
func (s *SVCBPort) unpack(b []byte) error {
|
||||
if len(b) != 2 {
|
||||
return errors.New("dns: svcbport: port length is not exactly 2 octets")
|
||||
}
|
||||
s.Port = binary.BigEndian.Uint16(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SVCBPort) pack() ([]byte, error) {
|
||||
b := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(b, s.Port)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (s *SVCBPort) parse(b string) error {
|
||||
port, err := strconv.ParseUint(b, 10, 16)
|
||||
if err != nil {
|
||||
return errors.New("dns: svcbport: port out of range")
|
||||
}
|
||||
s.Port = uint16(port)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SVCBIPv4Hint pair suggests an IPv4 address which may be used to open connections
|
||||
// if A and AAAA record responses for SVCB's Target domain haven't been received.
|
||||
// In that case, optionally, A and AAAA requests can be made, after which the connection
|
||||
// to the hinted IP address may be terminated and a new connection may be opened.
|
||||
// Basic use pattern for creating an ipv4hint option:
|
||||
//
|
||||
// h := new(dns.HTTPS)
|
||||
// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET}
|
||||
// e := new(dns.SVCBIPv4Hint)
|
||||
// e.Hint = []net.IP{net.IPv4(1,1,1,1).To4()}
|
||||
//
|
||||
// Or
|
||||
//
|
||||
// e.Hint = []net.IP{net.ParseIP("1.1.1.1").To4()}
|
||||
// h.Value = append(h.Value, e)
|
||||
type SVCBIPv4Hint struct {
|
||||
Hint []net.IP
|
||||
}
|
||||
|
||||
func (*SVCBIPv4Hint) Key() SVCBKey { return SVCB_IPV4HINT }
|
||||
func (s *SVCBIPv4Hint) len() int { return 4 * len(s.Hint) }
|
||||
|
||||
func (s *SVCBIPv4Hint) pack() ([]byte, error) {
|
||||
b := make([]byte, 0, 4*len(s.Hint))
|
||||
for _, e := range s.Hint {
|
||||
x := e.To4()
|
||||
if x == nil {
|
||||
return nil, errors.New("dns: svcbipv4hint: expected ipv4, hint is ipv6")
|
||||
}
|
||||
b = append(b, x...)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (s *SVCBIPv4Hint) unpack(b []byte) error {
|
||||
if len(b) == 0 || len(b)%4 != 0 {
|
||||
return errors.New("dns: svcbipv4hint: ipv4 address byte array length is not a multiple of 4")
|
||||
}
|
||||
x := make([]net.IP, 0, len(b)/4)
|
||||
for i := 0; i < len(b); i += 4 {
|
||||
x = append(x, net.IP(b[i:i+4]))
|
||||
}
|
||||
s.Hint = x
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SVCBIPv4Hint) String() string {
|
||||
str := make([]string, len(s.Hint))
|
||||
for i, e := range s.Hint {
|
||||
x := e.To4()
|
||||
if x == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
str[i] = x.String()
|
||||
}
|
||||
return strings.Join(str, ",")
|
||||
}
|
||||
|
||||
func (s *SVCBIPv4Hint) parse(b string) error {
|
||||
if strings.Contains(b, ":") {
|
||||
return errors.New("dns: svcbipv4hint: expected ipv4, got ipv6")
|
||||
}
|
||||
str := strings.Split(b, ",")
|
||||
dst := make([]net.IP, len(str))
|
||||
for i, e := range str {
|
||||
ip := net.ParseIP(e).To4()
|
||||
if ip == nil {
|
||||
return errors.New("dns: svcbipv4hint: bad ip")
|
||||
}
|
||||
dst[i] = ip
|
||||
}
|
||||
s.Hint = dst
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SVCBIPv4Hint) copy() SVCBKeyValue {
|
||||
return &SVCBIPv4Hint{
|
||||
append([]net.IP(nil), s.Hint...),
|
||||
}
|
||||
}
|
||||
|
||||
// SVCBECHConfig pair contains the ECHConfig structure defined in draft-ietf-tls-esni [RFC xxxx].
|
||||
// Basic use pattern for creating an echconfig option:
|
||||
//
|
||||
// h := new(dns.HTTPS)
|
||||
// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET}
|
||||
// e := new(dns.SVCBECHConfig)
|
||||
// e.ECH = []byte{0xfe, 0x08, ...}
|
||||
// h.Value = append(h.Value, e)
|
||||
type SVCBECHConfig struct {
|
||||
ECH []byte
|
||||
}
|
||||
|
||||
func (*SVCBECHConfig) Key() SVCBKey { return SVCB_ECHCONFIG }
|
||||
func (s *SVCBECHConfig) String() string { return toBase64(s.ECH) }
|
||||
func (s *SVCBECHConfig) len() int { return len(s.ECH) }
|
||||
|
||||
func (s *SVCBECHConfig) pack() ([]byte, error) {
|
||||
return append([]byte(nil), s.ECH...), nil
|
||||
}
|
||||
|
||||
func (s *SVCBECHConfig) copy() SVCBKeyValue {
|
||||
return &SVCBECHConfig{
|
||||
append([]byte(nil), s.ECH...),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SVCBECHConfig) unpack(b []byte) error {
|
||||
s.ECH = append([]byte(nil), b...)
|
||||
return nil
|
||||
}
|
||||
func (s *SVCBECHConfig) parse(b string) error {
|
||||
x, err := fromBase64([]byte(b))
|
||||
if err != nil {
|
||||
return errors.New("dns: svcbechconfig: bad base64 echconfig")
|
||||
}
|
||||
s.ECH = x
|
||||
return nil
|
||||
}
|
||||
|
||||
// SVCBIPv6Hint pair suggests an IPv6 address which may be used to open connections
|
||||
// if A and AAAA record responses for SVCB's Target domain haven't been received.
|
||||
// In that case, optionally, A and AAAA requests can be made, after which the
|
||||
// connection to the hinted IP address may be terminated and a new connection may be opened.
|
||||
// Basic use pattern for creating an ipv6hint option:
|
||||
//
|
||||
// h := new(dns.HTTPS)
|
||||
// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET}
|
||||
// e := new(dns.SVCBIPv6Hint)
|
||||
// e.Hint = []net.IP{net.ParseIP("2001:db8::1")}
|
||||
// h.Value = append(h.Value, e)
|
||||
type SVCBIPv6Hint struct {
|
||||
Hint []net.IP
|
||||
}
|
||||
|
||||
func (*SVCBIPv6Hint) Key() SVCBKey { return SVCB_IPV6HINT }
|
||||
func (s *SVCBIPv6Hint) len() int { return 16 * len(s.Hint) }
|
||||
|
||||
func (s *SVCBIPv6Hint) pack() ([]byte, error) {
|
||||
b := make([]byte, 0, 16*len(s.Hint))
|
||||
for _, e := range s.Hint {
|
||||
if len(e) != net.IPv6len || e.To4() != nil {
|
||||
return nil, errors.New("dns: svcbipv6hint: expected ipv6, hint is ipv4")
|
||||
}
|
||||
b = append(b, e...)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (s *SVCBIPv6Hint) unpack(b []byte) error {
|
||||
if len(b) == 0 || len(b)%16 != 0 {
|
||||
return errors.New("dns: svcbipv6hint: ipv6 address byte array length not a multiple of 16")
|
||||
}
|
||||
x := make([]net.IP, 0, len(b)/16)
|
||||
for i := 0; i < len(b); i += 16 {
|
||||
ip := net.IP(b[i : i+16])
|
||||
if ip.To4() != nil {
|
||||
return errors.New("dns: svcbipv6hint: expected ipv6, got ipv4")
|
||||
}
|
||||
x = append(x, ip)
|
||||
}
|
||||
s.Hint = x
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SVCBIPv6Hint) String() string {
|
||||
str := make([]string, len(s.Hint))
|
||||
for i, e := range s.Hint {
|
||||
if x := e.To4(); x != nil {
|
||||
return "<nil>"
|
||||
}
|
||||
str[i] = e.String()
|
||||
}
|
||||
return strings.Join(str, ",")
|
||||
}
|
||||
|
||||
func (s *SVCBIPv6Hint) parse(b string) error {
|
||||
if strings.Contains(b, ".") {
|
||||
return errors.New("dns: svcbipv6hint: expected ipv6, got ipv4")
|
||||
}
|
||||
str := strings.Split(b, ",")
|
||||
dst := make([]net.IP, len(str))
|
||||
for i, e := range str {
|
||||
ip := net.ParseIP(e)
|
||||
if ip == nil {
|
||||
return errors.New("dns: svcbipv6hint: bad ip")
|
||||
}
|
||||
dst[i] = ip
|
||||
}
|
||||
s.Hint = dst
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SVCBIPv6Hint) copy() SVCBKeyValue {
|
||||
return &SVCBIPv6Hint{
|
||||
append([]net.IP(nil), s.Hint...),
|
||||
}
|
||||
}
|
||||
|
||||
// SVCBLocal pair is intended for experimental/private use. The key is recommended
|
||||
// to be in the range [SVCB_PRIVATE_LOWER, SVCB_PRIVATE_UPPER].
|
||||
// Basic use pattern for creating a keyNNNNN option:
|
||||
//
|
||||
// h := new(dns.HTTPS)
|
||||
// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET}
|
||||
// e := new(dns.SVCBLocal)
|
||||
// e.KeyCode = 65400
|
||||
// e.Data = []byte("abc")
|
||||
// h.Value = append(h.Value, e)
|
||||
type SVCBLocal struct {
|
||||
KeyCode SVCBKey // Never 65535 or any assigned keys.
|
||||
Data []byte // All byte sequences are allowed.
|
||||
}
|
||||
|
||||
func (s *SVCBLocal) Key() SVCBKey { return s.KeyCode }
|
||||
func (s *SVCBLocal) pack() ([]byte, error) { return append([]byte(nil), s.Data...), nil }
|
||||
func (s *SVCBLocal) len() int { return len(s.Data) }
|
||||
|
||||
func (s *SVCBLocal) unpack(b []byte) error {
|
||||
s.Data = append([]byte(nil), b...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SVCBLocal) String() string {
|
||||
var str strings.Builder
|
||||
str.Grow(4 * len(s.Data))
|
||||
for _, e := range s.Data {
|
||||
if ' ' <= e && e <= '~' {
|
||||
switch e {
|
||||
case '"', ';', ' ', '\\':
|
||||
str.WriteByte('\\')
|
||||
str.WriteByte(e)
|
||||
default:
|
||||
str.WriteByte(e)
|
||||
}
|
||||
} else {
|
||||
str.WriteString(escapeByte(e))
|
||||
}
|
||||
}
|
||||
return str.String()
|
||||
}
|
||||
|
||||
func (s *SVCBLocal) parse(b string) error {
|
||||
data := make([]byte, 0, len(b))
|
||||
for i := 0; i < len(b); {
|
||||
if b[i] != '\\' {
|
||||
data = append(data, b[i])
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if i+1 == len(b) {
|
||||
return errors.New("dns: svcblocal: svcb private/experimental key escape unterminated")
|
||||
}
|
||||
if isDigit(b[i+1]) {
|
||||
if i+3 < len(b) && isDigit(b[i+2]) && isDigit(b[i+3]) {
|
||||
a, err := strconv.ParseUint(b[i+1:i+4], 10, 8)
|
||||
if err == nil {
|
||||
i += 4
|
||||
data = append(data, byte(a))
|
||||
continue
|
||||
}
|
||||
}
|
||||
return errors.New("dns: svcblocal: svcb private/experimental key bad escaped octet")
|
||||
} else {
|
||||
data = append(data, b[i+1])
|
||||
i += 2
|
||||
}
|
||||
}
|
||||
s.Data = data
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SVCBLocal) copy() SVCBKeyValue {
|
||||
return &SVCBLocal{s.KeyCode,
|
||||
append([]byte(nil), s.Data...),
|
||||
}
|
||||
}
|
||||
|
||||
func (rr *SVCB) String() string {
|
||||
s := rr.Hdr.String() +
|
||||
strconv.Itoa(int(rr.Priority)) + " " +
|
||||
sprintName(rr.Target)
|
||||
for _, e := range rr.Value {
|
||||
s += " " + e.Key().String() + "=\"" + e.String() + "\""
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// areSVCBPairArraysEqual checks if SVCBKeyValue arrays are equal after sorting their
|
||||
// copies. arrA and arrB have equal lengths, otherwise zduplicate.go wouldn't call this function.
|
||||
func areSVCBPairArraysEqual(a []SVCBKeyValue, b []SVCBKeyValue) bool {
|
||||
a = append([]SVCBKeyValue(nil), a...)
|
||||
b = append([]SVCBKeyValue(nil), b...)
|
||||
sort.Slice(a, func(i, j int) bool { return a[i].Key() < a[j].Key() })
|
||||
sort.Slice(b, func(i, j int) bool { return b[i].Key() < b[j].Key() })
|
||||
for i, e := range a {
|
||||
if e.Key() != b[i].Key() {
|
||||
return false
|
||||
}
|
||||
b1, err1 := e.pack()
|
||||
b2, err2 := b[i].pack()
|
||||
if err1 != nil || err2 != nil || !bytes.Equal(b1, b2) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -2,7 +2,6 @@ package dns
|
|||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
|
@ -16,12 +15,65 @@ import (
|
|||
|
||||
// HMAC hashing codes. These are transmitted as domain names.
|
||||
const (
|
||||
HmacMD5 = "hmac-md5.sig-alg.reg.int."
|
||||
HmacSHA1 = "hmac-sha1."
|
||||
HmacSHA224 = "hmac-sha224."
|
||||
HmacSHA256 = "hmac-sha256."
|
||||
HmacSHA384 = "hmac-sha384."
|
||||
HmacSHA512 = "hmac-sha512."
|
||||
|
||||
HmacMD5 = "hmac-md5.sig-alg.reg.int." // Deprecated: HmacMD5 is no longer supported.
|
||||
)
|
||||
|
||||
// TsigProvider provides the API to plug-in a custom TSIG implementation.
|
||||
type TsigProvider interface {
|
||||
// Generate is passed the DNS message to be signed and the partial TSIG RR. It returns the signature and nil, otherwise an error.
|
||||
Generate(msg []byte, t *TSIG) ([]byte, error)
|
||||
// Verify is passed the DNS message to be verified and the TSIG RR. If the signature is valid it will return nil, otherwise an error.
|
||||
Verify(msg []byte, t *TSIG) error
|
||||
}
|
||||
|
||||
type tsigHMACProvider string
|
||||
|
||||
func (key tsigHMACProvider) Generate(msg []byte, t *TSIG) ([]byte, error) {
|
||||
// If we barf here, the caller is to blame
|
||||
rawsecret, err := fromBase64([]byte(key))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var h hash.Hash
|
||||
switch CanonicalName(t.Algorithm) {
|
||||
case HmacSHA1:
|
||||
h = hmac.New(sha1.New, rawsecret)
|
||||
case HmacSHA224:
|
||||
h = hmac.New(sha256.New224, rawsecret)
|
||||
case HmacSHA256:
|
||||
h = hmac.New(sha256.New, rawsecret)
|
||||
case HmacSHA384:
|
||||
h = hmac.New(sha512.New384, rawsecret)
|
||||
case HmacSHA512:
|
||||
h = hmac.New(sha512.New, rawsecret)
|
||||
default:
|
||||
return nil, ErrKeyAlg
|
||||
}
|
||||
h.Write(msg)
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
func (key tsigHMACProvider) Verify(msg []byte, t *TSIG) error {
|
||||
b, err := key.Generate(msg, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mac, err := hex.DecodeString(t.MAC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !hmac.Equal(b, mac) {
|
||||
return ErrSig
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TSIG is the RR the holds the transaction signature of a message.
|
||||
// See RFC 2845 and RFC 4635.
|
||||
type TSIG struct {
|
||||
|
@ -54,8 +106,8 @@ func (rr *TSIG) String() string {
|
|||
return s
|
||||
}
|
||||
|
||||
func (rr *TSIG) parse(c *zlexer, origin string) *ParseError {
|
||||
panic("dns: internal error: parse should never be called on TSIG")
|
||||
func (*TSIG) parse(c *zlexer, origin string) *ParseError {
|
||||
return &ParseError{err: "TSIG records do not have a presentation format"}
|
||||
}
|
||||
|
||||
// The following values must be put in wireformat, so that the MAC can be calculated.
|
||||
|
@ -96,14 +148,13 @@ type timerWireFmt struct {
|
|||
// timersOnly is false.
|
||||
// If something goes wrong an error is returned, otherwise it is nil.
|
||||
func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) {
|
||||
return tsigGenerateProvider(m, tsigHMACProvider(secret), requestMAC, timersOnly)
|
||||
}
|
||||
|
||||
func tsigGenerateProvider(m *Msg, provider TsigProvider, requestMAC string, timersOnly bool) ([]byte, string, error) {
|
||||
if m.IsTsig() == nil {
|
||||
panic("dns: TSIG not last RR in additional")
|
||||
}
|
||||
// If we barf here, the caller is to blame
|
||||
rawsecret, err := fromBase64([]byte(secret))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
rr := m.Extra[len(m.Extra)-1].(*TSIG)
|
||||
m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg
|
||||
|
@ -111,32 +162,21 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s
|
|||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly)
|
||||
buf, err := tsigBuffer(mbuf, rr, requestMAC, timersOnly)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
t := new(TSIG)
|
||||
var h hash.Hash
|
||||
switch strings.ToLower(rr.Algorithm) {
|
||||
case HmacMD5:
|
||||
h = hmac.New(md5.New, rawsecret)
|
||||
case HmacSHA1:
|
||||
h = hmac.New(sha1.New, rawsecret)
|
||||
case HmacSHA256:
|
||||
h = hmac.New(sha256.New, rawsecret)
|
||||
case HmacSHA512:
|
||||
h = hmac.New(sha512.New, rawsecret)
|
||||
default:
|
||||
return nil, "", ErrKeyAlg
|
||||
// Copy all TSIG fields except MAC and its size, which are filled using the computed digest.
|
||||
*t = *rr
|
||||
mac, err := provider.Generate(buf, rr)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
h.Write(buf)
|
||||
t.MAC = hex.EncodeToString(h.Sum(nil))
|
||||
t.MAC = hex.EncodeToString(mac)
|
||||
t.MACSize = uint16(len(t.MAC) / 2) // Size is half!
|
||||
|
||||
t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0}
|
||||
t.Fudge = rr.Fudge
|
||||
t.TimeSigned = rr.TimeSigned
|
||||
t.Algorithm = rr.Algorithm
|
||||
t.OrigId = m.Id
|
||||
|
||||
tbuf := make([]byte, Len(t))
|
||||
off, err := PackRR(t, tbuf, 0, nil, false)
|
||||
if err != nil {
|
||||
|
@ -153,26 +193,34 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s
|
|||
// If the signature does not validate err contains the
|
||||
// error, otherwise it is nil.
|
||||
func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error {
|
||||
rawsecret, err := fromBase64([]byte(secret))
|
||||
if err != nil {
|
||||
return err
|
||||
return tsigVerify(msg, tsigHMACProvider(secret), requestMAC, timersOnly, uint64(time.Now().Unix()))
|
||||
}
|
||||
|
||||
func tsigVerifyProvider(msg []byte, provider TsigProvider, requestMAC string, timersOnly bool) error {
|
||||
return tsigVerify(msg, provider, requestMAC, timersOnly, uint64(time.Now().Unix()))
|
||||
}
|
||||
|
||||
// actual implementation of TsigVerify, taking the current time ('now') as a parameter for the convenience of tests.
|
||||
func tsigVerify(msg []byte, provider TsigProvider, requestMAC string, timersOnly bool, now uint64) error {
|
||||
// Strip the TSIG from the incoming msg
|
||||
stripped, tsig, err := stripTsig(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgMAC, err := hex.DecodeString(tsig.MAC)
|
||||
buf, err := tsigBuffer(stripped, tsig, requestMAC, timersOnly)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly)
|
||||
if err := provider.Verify(buf, tsig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Fudge factor works both ways. A message can arrive before it was signed because
|
||||
// of clock skew.
|
||||
now := uint64(time.Now().Unix())
|
||||
// We check this after verifying the signature, following draft-ietf-dnsop-rfc2845bis
|
||||
// instead of RFC2845, in order to prevent a security vulnerability as reported in CVE-2017-3142/3143.
|
||||
ti := now - tsig.TimeSigned
|
||||
if now < tsig.TimeSigned {
|
||||
ti = tsig.TimeSigned - now
|
||||
|
@ -181,28 +229,11 @@ func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error {
|
|||
return ErrTime
|
||||
}
|
||||
|
||||
var h hash.Hash
|
||||
switch strings.ToLower(tsig.Algorithm) {
|
||||
case HmacMD5:
|
||||
h = hmac.New(md5.New, rawsecret)
|
||||
case HmacSHA1:
|
||||
h = hmac.New(sha1.New, rawsecret)
|
||||
case HmacSHA256:
|
||||
h = hmac.New(sha256.New, rawsecret)
|
||||
case HmacSHA512:
|
||||
h = hmac.New(sha512.New, rawsecret)
|
||||
default:
|
||||
return ErrKeyAlg
|
||||
}
|
||||
h.Write(buf)
|
||||
if !hmac.Equal(h.Sum(nil), msgMAC) {
|
||||
return ErrSig
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a wiredata buffer for the MAC calculation.
|
||||
func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte {
|
||||
func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) ([]byte, error) {
|
||||
var buf []byte
|
||||
if rr.TimeSigned == 0 {
|
||||
rr.TimeSigned = uint64(time.Now().Unix())
|
||||
|
@ -219,7 +250,10 @@ func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []b
|
|||
m.MACSize = uint16(len(requestMAC) / 2)
|
||||
m.MAC = requestMAC
|
||||
buf = make([]byte, len(requestMAC)) // long enough
|
||||
n, _ := packMacWire(m, buf)
|
||||
n, err := packMacWire(m, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf = buf[:n]
|
||||
}
|
||||
|
||||
|
@ -228,20 +262,26 @@ func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []b
|
|||
tsig := new(timerWireFmt)
|
||||
tsig.TimeSigned = rr.TimeSigned
|
||||
tsig.Fudge = rr.Fudge
|
||||
n, _ := packTimerWire(tsig, tsigvar)
|
||||
n, err := packTimerWire(tsig, tsigvar)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tsigvar = tsigvar[:n]
|
||||
} else {
|
||||
tsig := new(tsigWireFmt)
|
||||
tsig.Name = strings.ToLower(rr.Hdr.Name)
|
||||
tsig.Name = CanonicalName(rr.Hdr.Name)
|
||||
tsig.Class = ClassANY
|
||||
tsig.Ttl = rr.Hdr.Ttl
|
||||
tsig.Algorithm = strings.ToLower(rr.Algorithm)
|
||||
tsig.Algorithm = CanonicalName(rr.Algorithm)
|
||||
tsig.TimeSigned = rr.TimeSigned
|
||||
tsig.Fudge = rr.Fudge
|
||||
tsig.Error = rr.Error
|
||||
tsig.OtherLen = rr.OtherLen
|
||||
tsig.OtherData = rr.OtherData
|
||||
n, _ := packTsigWire(tsig, tsigvar)
|
||||
n, err := packTsigWire(tsig, tsigvar)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tsigvar = tsigvar[:n]
|
||||
}
|
||||
|
||||
|
@ -251,7 +291,7 @@ func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []b
|
|||
} else {
|
||||
buf = append(msgbuf, tsigvar...)
|
||||
}
|
||||
return buf
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// Strip the TSIG from the raw message.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package dns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
|
@ -61,6 +62,7 @@ const (
|
|||
TypeCERT uint16 = 37
|
||||
TypeDNAME uint16 = 39
|
||||
TypeOPT uint16 = 41 // EDNS
|
||||
TypeAPL uint16 = 42
|
||||
TypeDS uint16 = 43
|
||||
TypeSSHFP uint16 = 44
|
||||
TypeRRSIG uint16 = 46
|
||||
|
@ -79,6 +81,8 @@ const (
|
|||
TypeCDNSKEY uint16 = 60
|
||||
TypeOPENPGPKEY uint16 = 61
|
||||
TypeCSYNC uint16 = 62
|
||||
TypeSVCB uint16 = 64
|
||||
TypeHTTPS uint16 = 65
|
||||
TypeSPF uint16 = 99
|
||||
TypeUINFO uint16 = 100
|
||||
TypeUID uint16 = 101
|
||||
|
@ -163,11 +167,11 @@ const (
|
|||
_RD = 1 << 8 // recursion desired
|
||||
_RA = 1 << 7 // recursion available
|
||||
_Z = 1 << 6 // Z
|
||||
_AD = 1 << 5 // authticated data
|
||||
_AD = 1 << 5 // authenticated data
|
||||
_CD = 1 << 4 // checking disabled
|
||||
)
|
||||
|
||||
// Various constants used in the LOC RR, See RFC 1887.
|
||||
// Various constants used in the LOC RR. See RFC 1887.
|
||||
const (
|
||||
LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2.
|
||||
LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2.
|
||||
|
@ -207,8 +211,11 @@ var CertTypeToString = map[uint16]string{
|
|||
|
||||
//go:generate go run types_generate.go
|
||||
|
||||
// Question holds a DNS question. There can be multiple questions in the
|
||||
// question section of a message. Usually there is just one.
|
||||
// Question holds a DNS question. Usually there is just one. While the
|
||||
// original DNS RFCs allow multiple questions in the question section of a
|
||||
// message, in practice it never works. Because most DNS servers see multiple
|
||||
// questions as an error, it is recommended to only have one question per
|
||||
// message.
|
||||
type Question struct {
|
||||
Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed)
|
||||
Qtype uint16
|
||||
|
@ -238,8 +245,8 @@ type ANY struct {
|
|||
|
||||
func (rr *ANY) String() string { return rr.Hdr.String() }
|
||||
|
||||
func (rr *ANY) parse(c *zlexer, origin string) *ParseError {
|
||||
panic("dns: internal error: parse should never be called on ANY")
|
||||
func (*ANY) parse(c *zlexer, origin string) *ParseError {
|
||||
return &ParseError{err: "ANY records do not have a presentation format"}
|
||||
}
|
||||
|
||||
// NULL RR. See RFC 1035.
|
||||
|
@ -253,8 +260,8 @@ func (rr *NULL) String() string {
|
|||
return ";" + rr.Hdr.String() + rr.Data
|
||||
}
|
||||
|
||||
func (rr *NULL) parse(c *zlexer, origin string) *ParseError {
|
||||
panic("dns: internal error: parse should never be called on NULL")
|
||||
func (*NULL) parse(c *zlexer, origin string) *ParseError {
|
||||
return &ParseError{err: "NULL records do not have a presentation format"}
|
||||
}
|
||||
|
||||
// CNAME RR. See RFC 1034.
|
||||
|
@ -440,45 +447,38 @@ func sprintName(s string) string {
|
|||
var dst strings.Builder
|
||||
|
||||
for i := 0; i < len(s); {
|
||||
if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' {
|
||||
if s[i] == '.' {
|
||||
if dst.Len() != 0 {
|
||||
dst.WriteString(s[i : i+2])
|
||||
dst.WriteByte('.')
|
||||
}
|
||||
i += 2
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
b, n := nextByte(s, i)
|
||||
if n == 0 {
|
||||
i++
|
||||
continue
|
||||
// Drop "dangling" incomplete escapes.
|
||||
if dst.Len() == 0 {
|
||||
return s[:i]
|
||||
}
|
||||
if b == '.' {
|
||||
if dst.Len() != 0 {
|
||||
dst.WriteByte('.')
|
||||
break
|
||||
}
|
||||
i += n
|
||||
continue
|
||||
}
|
||||
switch b {
|
||||
case ' ', '\'', '@', ';', '(', ')', '"', '\\': // additional chars to escape
|
||||
if isDomainNameLabelSpecial(b) {
|
||||
if dst.Len() == 0 {
|
||||
dst.Grow(len(s) * 2)
|
||||
dst.WriteString(s[:i])
|
||||
}
|
||||
dst.WriteByte('\\')
|
||||
dst.WriteByte(b)
|
||||
default:
|
||||
if ' ' <= b && b <= '~' {
|
||||
if dst.Len() != 0 {
|
||||
dst.WriteByte(b)
|
||||
}
|
||||
} else {
|
||||
} else if b < ' ' || b > '~' { // unprintable, use \DDD
|
||||
if dst.Len() == 0 {
|
||||
dst.Grow(len(s) * 2)
|
||||
dst.WriteString(s[:i])
|
||||
}
|
||||
dst.WriteString(escapeByte(b))
|
||||
} else {
|
||||
if dst.Len() != 0 {
|
||||
dst.WriteByte(b)
|
||||
}
|
||||
}
|
||||
i += n
|
||||
|
@ -501,15 +501,10 @@ func sprintTxtOctet(s string) string {
|
|||
}
|
||||
|
||||
b, n := nextByte(s, i)
|
||||
switch {
|
||||
case n == 0:
|
||||
if n == 0 {
|
||||
i++ // dangling back slash
|
||||
case b == '.':
|
||||
dst.WriteByte('.')
|
||||
case b < ' ' || b > '~':
|
||||
dst.WriteString(escapeByte(b))
|
||||
default:
|
||||
dst.WriteByte(b)
|
||||
} else {
|
||||
writeTXTStringByte(&dst, b)
|
||||
}
|
||||
i += n
|
||||
}
|
||||
|
@ -585,6 +580,17 @@ func escapeByte(b byte) string {
|
|||
return escapedByteLarge[int(b)*4 : int(b)*4+4]
|
||||
}
|
||||
|
||||
// isDomainNameLabelSpecial returns true if
|
||||
// a domain name label byte should be prefixed
|
||||
// with an escaping backslash.
|
||||
func isDomainNameLabelSpecial(b byte) bool {
|
||||
switch b {
|
||||
case '.', ' ', '\'', '@', ';', '(', ')', '"', '\\':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func nextByte(s string, offset int) (byte, int) {
|
||||
if offset >= len(s) {
|
||||
return 0, 0
|
||||
|
@ -758,7 +764,7 @@ type LOC struct {
|
|||
}
|
||||
|
||||
// cmToM takes a cm value expressed in RFC 1876 SIZE mantissa/exponent
|
||||
// format and returns a string in m (two decimals for the cm)
|
||||
// format and returns a string in m (two decimals for the cm).
|
||||
func cmToM(m, e uint8) string {
|
||||
if e < 2 {
|
||||
if e == 1 {
|
||||
|
@ -1116,6 +1122,7 @@ type URI struct {
|
|||
Target string `dns:"octet"`
|
||||
}
|
||||
|
||||
// rr.Target to be parsed as a sequence of character encoded octets according to RFC 3986
|
||||
func (rr *URI) String() string {
|
||||
return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) +
|
||||
" " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target)
|
||||
|
@ -1277,6 +1284,7 @@ type CAA struct {
|
|||
Value string `dns:"octet"`
|
||||
}
|
||||
|
||||
// rr.Value Is the character-string encoding of the value field as specified in RFC 1035, Section 5.1.
|
||||
func (rr *CAA) String() string {
|
||||
return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value)
|
||||
}
|
||||
|
@ -1353,6 +1361,88 @@ func (rr *CSYNC) len(off int, compression map[string]struct{}) int {
|
|||
return l
|
||||
}
|
||||
|
||||
// APL RR. See RFC 3123.
|
||||
type APL struct {
|
||||
Hdr RR_Header
|
||||
Prefixes []APLPrefix `dns:"apl"`
|
||||
}
|
||||
|
||||
// APLPrefix is an address prefix hold by an APL record.
|
||||
type APLPrefix struct {
|
||||
Negation bool
|
||||
Network net.IPNet
|
||||
}
|
||||
|
||||
// String returns presentation form of the APL record.
|
||||
func (rr *APL) String() string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString(rr.Hdr.String())
|
||||
for i, p := range rr.Prefixes {
|
||||
if i > 0 {
|
||||
sb.WriteByte(' ')
|
||||
}
|
||||
sb.WriteString(p.str())
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// str returns presentation form of the APL prefix.
|
||||
func (p *APLPrefix) str() string {
|
||||
var sb strings.Builder
|
||||
if p.Negation {
|
||||
sb.WriteByte('!')
|
||||
}
|
||||
|
||||
switch len(p.Network.IP) {
|
||||
case net.IPv4len:
|
||||
sb.WriteByte('1')
|
||||
case net.IPv6len:
|
||||
sb.WriteByte('2')
|
||||
}
|
||||
|
||||
sb.WriteByte(':')
|
||||
|
||||
switch len(p.Network.IP) {
|
||||
case net.IPv4len:
|
||||
sb.WriteString(p.Network.IP.String())
|
||||
case net.IPv6len:
|
||||
// add prefix for IPv4-mapped IPv6
|
||||
if v4 := p.Network.IP.To4(); v4 != nil {
|
||||
sb.WriteString("::ffff:")
|
||||
}
|
||||
sb.WriteString(p.Network.IP.String())
|
||||
}
|
||||
|
||||
sb.WriteByte('/')
|
||||
|
||||
prefix, _ := p.Network.Mask.Size()
|
||||
sb.WriteString(strconv.Itoa(prefix))
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// equals reports whether two APL prefixes are identical.
|
||||
func (a *APLPrefix) equals(b *APLPrefix) bool {
|
||||
return a.Negation == b.Negation &&
|
||||
bytes.Equal(a.Network.IP, b.Network.IP) &&
|
||||
bytes.Equal(a.Network.Mask, b.Network.Mask)
|
||||
}
|
||||
|
||||
// copy returns a copy of the APL prefix.
|
||||
func (p *APLPrefix) copy() APLPrefix {
|
||||
return APLPrefix{
|
||||
Negation: p.Negation,
|
||||
Network: copyNet(p.Network),
|
||||
}
|
||||
}
|
||||
|
||||
// len returns size of the prefix in wire format.
|
||||
func (p *APLPrefix) len() int {
|
||||
// 4-byte header and the network address prefix (see Section 4 of RFC 3123)
|
||||
prefix, _ := p.Network.Mask.Size()
|
||||
return 4 + (prefix+7)/8
|
||||
}
|
||||
|
||||
// TimeToString translates the RRSIG's incep. and expir. times to the
|
||||
// string representation used when printing the record.
|
||||
// It takes serial arithmetic (RFC 1982) into account.
|
||||
|
@ -1409,6 +1499,17 @@ func copyIP(ip net.IP) net.IP {
|
|||
return p
|
||||
}
|
||||
|
||||
// copyNet returns a copy of a subnet.
|
||||
func copyNet(n net.IPNet) net.IPNet {
|
||||
m := make(net.IPMask, len(n.Mask))
|
||||
copy(m, n.Mask)
|
||||
|
||||
return net.IPNet{
|
||||
IP: copyIP(n.IP),
|
||||
Mask: m,
|
||||
}
|
||||
}
|
||||
|
||||
// SplitN splits a string into N sized string chunks.
|
||||
// This might become an exported function once.
|
||||
func splitN(s string, n int) []string {
|
||||
|
|
|
@ -3,13 +3,13 @@ package dns
|
|||
import "fmt"
|
||||
|
||||
// Version is current version of this library.
|
||||
var Version = V{1, 1, 26}
|
||||
var Version = v{1, 1, 40}
|
||||
|
||||
// V holds the version of this library.
|
||||
type V struct {
|
||||
// v holds the version of this library.
|
||||
type v struct {
|
||||
Major, Minor, Patch int
|
||||
}
|
||||
|
||||
func (v V) String() string {
|
||||
func (v v) String() string {
|
||||
return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
|
||||
}
|
||||
|
|
|
@ -52,6 +52,23 @@ func (r1 *ANY) isDuplicate(_r2 RR) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (r1 *APL) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*APL)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_ = r2
|
||||
if len(r1.Prefixes) != len(r2.Prefixes) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(r1.Prefixes); i++ {
|
||||
if !r1.Prefixes[i].equals(&r2.Prefixes[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r1 *AVC) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*AVC)
|
||||
if !ok {
|
||||
|
@ -87,6 +104,48 @@ func (r1 *CAA) isDuplicate(_r2 RR) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (r1 *CDNSKEY) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*CDNSKEY)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_ = r2
|
||||
if r1.Flags != r2.Flags {
|
||||
return false
|
||||
}
|
||||
if r1.Protocol != r2.Protocol {
|
||||
return false
|
||||
}
|
||||
if r1.Algorithm != r2.Algorithm {
|
||||
return false
|
||||
}
|
||||
if r1.PublicKey != r2.PublicKey {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r1 *CDS) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*CDS)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_ = r2
|
||||
if r1.KeyTag != r2.KeyTag {
|
||||
return false
|
||||
}
|
||||
if r1.Algorithm != r2.Algorithm {
|
||||
return false
|
||||
}
|
||||
if r1.DigestType != r2.DigestType {
|
||||
return false
|
||||
}
|
||||
if r1.Digest != r2.Digest {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r1 *CERT) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*CERT)
|
||||
if !ok {
|
||||
|
@ -155,6 +214,27 @@ func (r1 *DHCID) isDuplicate(_r2 RR) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (r1 *DLV) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*DLV)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_ = r2
|
||||
if r1.KeyTag != r2.KeyTag {
|
||||
return false
|
||||
}
|
||||
if r1.Algorithm != r2.Algorithm {
|
||||
return false
|
||||
}
|
||||
if r1.DigestType != r2.DigestType {
|
||||
return false
|
||||
}
|
||||
if r1.Digest != r2.Digest {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r1 *DNAME) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*DNAME)
|
||||
if !ok {
|
||||
|
@ -322,6 +402,48 @@ func (r1 *HIP) isDuplicate(_r2 RR) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (r1 *HTTPS) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*HTTPS)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_ = r2
|
||||
if r1.Priority != r2.Priority {
|
||||
return false
|
||||
}
|
||||
if !isDuplicateName(r1.Target, r2.Target) {
|
||||
return false
|
||||
}
|
||||
if len(r1.Value) != len(r2.Value) {
|
||||
return false
|
||||
}
|
||||
if !areSVCBPairArraysEqual(r1.Value, r2.Value) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r1 *KEY) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*KEY)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_ = r2
|
||||
if r1.Flags != r2.Flags {
|
||||
return false
|
||||
}
|
||||
if r1.Protocol != r2.Protocol {
|
||||
return false
|
||||
}
|
||||
if r1.Algorithm != r2.Algorithm {
|
||||
return false
|
||||
}
|
||||
if r1.PublicKey != r2.PublicKey {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r1 *KX) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*KX)
|
||||
if !ok {
|
||||
|
@ -832,6 +954,42 @@ func (r1 *RT) isDuplicate(_r2 RR) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (r1 *SIG) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*SIG)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_ = r2
|
||||
if r1.TypeCovered != r2.TypeCovered {
|
||||
return false
|
||||
}
|
||||
if r1.Algorithm != r2.Algorithm {
|
||||
return false
|
||||
}
|
||||
if r1.Labels != r2.Labels {
|
||||
return false
|
||||
}
|
||||
if r1.OrigTtl != r2.OrigTtl {
|
||||
return false
|
||||
}
|
||||
if r1.Expiration != r2.Expiration {
|
||||
return false
|
||||
}
|
||||
if r1.Inception != r2.Inception {
|
||||
return false
|
||||
}
|
||||
if r1.KeyTag != r2.KeyTag {
|
||||
return false
|
||||
}
|
||||
if !isDuplicateName(r1.SignerName, r2.SignerName) {
|
||||
return false
|
||||
}
|
||||
if r1.Signature != r2.Signature {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r1 *SMIMEA) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*SMIMEA)
|
||||
if !ok {
|
||||
|
@ -939,6 +1097,27 @@ func (r1 *SSHFP) isDuplicate(_r2 RR) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (r1 *SVCB) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*SVCB)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_ = r2
|
||||
if r1.Priority != r2.Priority {
|
||||
return false
|
||||
}
|
||||
if !isDuplicateName(r1.Target, r2.Target) {
|
||||
return false
|
||||
}
|
||||
if len(r1.Value) != len(r2.Value) {
|
||||
return false
|
||||
}
|
||||
if !areSVCBPairArraysEqual(r1.Value, r2.Value) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r1 *TA) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*TA)
|
||||
if !ok {
|
||||
|
|
|
@ -36,6 +36,14 @@ func (rr *ANY) pack(msg []byte, off int, compression compressionMap, compress bo
|
|||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *APL) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
|
||||
off, err = packDataApl(rr.Prefixes, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *AVC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
|
||||
off, err = packStringTxt(rr.Txt, msg, off)
|
||||
if err != nil {
|
||||
|
@ -308,6 +316,22 @@ func (rr *HIP) pack(msg []byte, off int, compression compressionMap, compress bo
|
|||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *HTTPS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
|
||||
off, err = packUint16(rr.Priority, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packDomainName(rr.Target, msg, off, compression, false)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packDataSVCB(rr.Value, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *KEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
|
||||
off, err = packUint16(rr.Flags, msg, off)
|
||||
if err != nil {
|
||||
|
@ -898,6 +922,22 @@ func (rr *SSHFP) pack(msg []byte, off int, compression compressionMap, compress
|
|||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *SVCB) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
|
||||
off, err = packUint16(rr.Priority, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packDomainName(rr.Target, msg, off, compression, false)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packDataSVCB(rr.Value, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *TA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
|
||||
off, err = packUint16(rr.KeyTag, msg, off)
|
||||
if err != nil {
|
||||
|
@ -1127,6 +1167,17 @@ func (rr *ANY) unpack(msg []byte, off int) (off1 int, err error) {
|
|||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *APL) unpack(msg []byte, off int) (off1 int, err error) {
|
||||
rdStart := off
|
||||
_ = rdStart
|
||||
|
||||
rr.Prefixes, off, err = unpackDataApl(msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *AVC) unpack(msg []byte, off int) (off1 int, err error) {
|
||||
rdStart := off
|
||||
_ = rdStart
|
||||
|
@ -1540,6 +1591,31 @@ func (rr *HIP) unpack(msg []byte, off int) (off1 int, err error) {
|
|||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *HTTPS) unpack(msg []byte, off int) (off1 int, err error) {
|
||||
rdStart := off
|
||||
_ = rdStart
|
||||
|
||||
rr.Priority, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
if off == len(msg) {
|
||||
return off, nil
|
||||
}
|
||||
rr.Target, off, err = UnpackDomainName(msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
if off == len(msg) {
|
||||
return off, nil
|
||||
}
|
||||
rr.Value, off, err = unpackDataSVCB(msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *KEY) unpack(msg []byte, off int) (off1 int, err error) {
|
||||
rdStart := off
|
||||
_ = rdStart
|
||||
|
@ -2442,6 +2518,31 @@ func (rr *SSHFP) unpack(msg []byte, off int) (off1 int, err error) {
|
|||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *SVCB) unpack(msg []byte, off int) (off1 int, err error) {
|
||||
rdStart := off
|
||||
_ = rdStart
|
||||
|
||||
rr.Priority, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
if off == len(msg) {
|
||||
return off, nil
|
||||
}
|
||||
rr.Target, off, err = UnpackDomainName(msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
if off == len(msg) {
|
||||
return off, nil
|
||||
}
|
||||
rr.Value, off, err = unpackDataSVCB(msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *TA) unpack(msg []byte, off int) (off1 int, err error) {
|
||||
rdStart := off
|
||||
_ = rdStart
|
||||
|
|
|
@ -13,6 +13,7 @@ var TypeToRR = map[uint16]func() RR{
|
|||
TypeAAAA: func() RR { return new(AAAA) },
|
||||
TypeAFSDB: func() RR { return new(AFSDB) },
|
||||
TypeANY: func() RR { return new(ANY) },
|
||||
TypeAPL: func() RR { return new(APL) },
|
||||
TypeAVC: func() RR { return new(AVC) },
|
||||
TypeCAA: func() RR { return new(CAA) },
|
||||
TypeCDNSKEY: func() RR { return new(CDNSKEY) },
|
||||
|
@ -32,6 +33,7 @@ var TypeToRR = map[uint16]func() RR{
|
|||
TypeGPOS: func() RR { return new(GPOS) },
|
||||
TypeHINFO: func() RR { return new(HINFO) },
|
||||
TypeHIP: func() RR { return new(HIP) },
|
||||
TypeHTTPS: func() RR { return new(HTTPS) },
|
||||
TypeKEY: func() RR { return new(KEY) },
|
||||
TypeKX: func() RR { return new(KX) },
|
||||
TypeL32: func() RR { return new(L32) },
|
||||
|
@ -69,6 +71,7 @@ var TypeToRR = map[uint16]func() RR{
|
|||
TypeSPF: func() RR { return new(SPF) },
|
||||
TypeSRV: func() RR { return new(SRV) },
|
||||
TypeSSHFP: func() RR { return new(SSHFP) },
|
||||
TypeSVCB: func() RR { return new(SVCB) },
|
||||
TypeTA: func() RR { return new(TA) },
|
||||
TypeTALINK: func() RR { return new(TALINK) },
|
||||
TypeTKEY: func() RR { return new(TKEY) },
|
||||
|
@ -87,6 +90,7 @@ var TypeToString = map[uint16]string{
|
|||
TypeAAAA: "AAAA",
|
||||
TypeAFSDB: "AFSDB",
|
||||
TypeANY: "ANY",
|
||||
TypeAPL: "APL",
|
||||
TypeATMA: "ATMA",
|
||||
TypeAVC: "AVC",
|
||||
TypeAXFR: "AXFR",
|
||||
|
@ -108,6 +112,7 @@ var TypeToString = map[uint16]string{
|
|||
TypeGPOS: "GPOS",
|
||||
TypeHINFO: "HINFO",
|
||||
TypeHIP: "HIP",
|
||||
TypeHTTPS: "HTTPS",
|
||||
TypeISDN: "ISDN",
|
||||
TypeIXFR: "IXFR",
|
||||
TypeKEY: "KEY",
|
||||
|
@ -151,6 +156,7 @@ var TypeToString = map[uint16]string{
|
|||
TypeSPF: "SPF",
|
||||
TypeSRV: "SRV",
|
||||
TypeSSHFP: "SSHFP",
|
||||
TypeSVCB: "SVCB",
|
||||
TypeTA: "TA",
|
||||
TypeTALINK: "TALINK",
|
||||
TypeTKEY: "TKEY",
|
||||
|
@ -169,6 +175,7 @@ func (rr *A) Header() *RR_Header { return &rr.Hdr }
|
|||
func (rr *AAAA) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *ANY) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *APL) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *AVC) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *CAA) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr }
|
||||
|
@ -188,6 +195,7 @@ func (rr *GID) Header() *RR_Header { return &rr.Hdr }
|
|||
func (rr *GPOS) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *HINFO) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *HIP) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *HTTPS) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *KEY) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *KX) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *L32) Header() *RR_Header { return &rr.Hdr }
|
||||
|
@ -226,6 +234,7 @@ func (rr *SOA) Header() *RR_Header { return &rr.Hdr }
|
|||
func (rr *SPF) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *SRV) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *SVCB) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *TA) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *TALINK) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *TKEY) Header() *RR_Header { return &rr.Hdr }
|
||||
|
@ -262,6 +271,13 @@ func (rr *ANY) len(off int, compression map[string]struct{}) int {
|
|||
l := rr.Hdr.len(off, compression)
|
||||
return l
|
||||
}
|
||||
func (rr *APL) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
for _, x := range rr.Prefixes {
|
||||
l += x.len()
|
||||
}
|
||||
return l
|
||||
}
|
||||
func (rr *AVC) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
for _, x := range rr.Txt {
|
||||
|
@ -582,6 +598,15 @@ func (rr *SSHFP) len(off int, compression map[string]struct{}) int {
|
|||
l += len(rr.FingerPrint) / 2
|
||||
return l
|
||||
}
|
||||
func (rr *SVCB) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // Priority
|
||||
l += domainNameLen(rr.Target, off+l, compression, false)
|
||||
for _, x := range rr.Value {
|
||||
l += 4 + int(x.len())
|
||||
}
|
||||
return l
|
||||
}
|
||||
func (rr *TA) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += 2 // KeyTag
|
||||
|
@ -673,6 +698,13 @@ func (rr *AFSDB) copy() RR {
|
|||
func (rr *ANY) copy() RR {
|
||||
return &ANY{rr.Hdr}
|
||||
}
|
||||
func (rr *APL) copy() RR {
|
||||
Prefixes := make([]APLPrefix, len(rr.Prefixes))
|
||||
for i, e := range rr.Prefixes {
|
||||
Prefixes[i] = e.copy()
|
||||
}
|
||||
return &APL{rr.Hdr, Prefixes}
|
||||
}
|
||||
func (rr *AVC) copy() RR {
|
||||
Txt := make([]string, len(rr.Txt))
|
||||
copy(Txt, rr.Txt)
|
||||
|
@ -681,6 +713,12 @@ func (rr *AVC) copy() RR {
|
|||
func (rr *CAA) copy() RR {
|
||||
return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value}
|
||||
}
|
||||
func (rr *CDNSKEY) copy() RR {
|
||||
return &CDNSKEY{*rr.DNSKEY.copy().(*DNSKEY)}
|
||||
}
|
||||
func (rr *CDS) copy() RR {
|
||||
return &CDS{*rr.DS.copy().(*DS)}
|
||||
}
|
||||
func (rr *CERT) copy() RR {
|
||||
return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate}
|
||||
}
|
||||
|
@ -695,6 +733,9 @@ func (rr *CSYNC) copy() RR {
|
|||
func (rr *DHCID) copy() RR {
|
||||
return &DHCID{rr.Hdr, rr.Digest}
|
||||
}
|
||||
func (rr *DLV) copy() RR {
|
||||
return &DLV{*rr.DS.copy().(*DS)}
|
||||
}
|
||||
func (rr *DNAME) copy() RR {
|
||||
return &DNAME{rr.Hdr, rr.Target}
|
||||
}
|
||||
|
@ -727,6 +768,12 @@ func (rr *HIP) copy() RR {
|
|||
copy(RendezvousServers, rr.RendezvousServers)
|
||||
return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers}
|
||||
}
|
||||
func (rr *HTTPS) copy() RR {
|
||||
return &HTTPS{*rr.SVCB.copy().(*SVCB)}
|
||||
}
|
||||
func (rr *KEY) copy() RR {
|
||||
return &KEY{*rr.DNSKEY.copy().(*DNSKEY)}
|
||||
}
|
||||
func (rr *KX) copy() RR {
|
||||
return &KX{rr.Hdr, rr.Preference, rr.Exchanger}
|
||||
}
|
||||
|
@ -830,6 +877,9 @@ func (rr *RRSIG) copy() RR {
|
|||
func (rr *RT) copy() RR {
|
||||
return &RT{rr.Hdr, rr.Preference, rr.Host}
|
||||
}
|
||||
func (rr *SIG) copy() RR {
|
||||
return &SIG{*rr.RRSIG.copy().(*RRSIG)}
|
||||
}
|
||||
func (rr *SMIMEA) copy() RR {
|
||||
return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
|
||||
}
|
||||
|
@ -847,6 +897,13 @@ func (rr *SRV) copy() RR {
|
|||
func (rr *SSHFP) copy() RR {
|
||||
return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint}
|
||||
}
|
||||
func (rr *SVCB) copy() RR {
|
||||
Value := make([]SVCBKeyValue, len(rr.Value))
|
||||
for i, e := range rr.Value {
|
||||
Value[i] = e.copy()
|
||||
}
|
||||
return &SVCB{rr.Hdr, rr.Priority, rr.Target, Value}
|
||||
}
|
||||
func (rr *TA) copy() RR {
|
||||
return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -66,6 +67,7 @@ type StructCodec struct {
|
|||
DecodeDeepZeroInline bool
|
||||
EncodeOmitDefaultStruct bool
|
||||
AllowUnexportedFields bool
|
||||
OverwriteDuplicatedInlinedFields bool
|
||||
}
|
||||
|
||||
var _ ValueEncoder = &StructCodec{}
|
||||
|
@ -93,6 +95,9 @@ func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions)
|
|||
if structOpt.EncodeOmitDefaultStruct != nil {
|
||||
codec.EncodeOmitDefaultStruct = *structOpt.EncodeOmitDefaultStruct
|
||||
}
|
||||
if structOpt.OverwriteDuplicatedInlinedFields != nil {
|
||||
codec.OverwriteDuplicatedInlinedFields = *structOpt.OverwriteDuplicatedInlinedFields
|
||||
}
|
||||
if structOpt.AllowUnexportedFields != nil {
|
||||
codec.AllowUnexportedFields = *structOpt.AllowUnexportedFields
|
||||
}
|
||||
|
@ -408,6 +413,35 @@ type fieldDescription struct {
|
|||
decoder ValueDecoder
|
||||
}
|
||||
|
||||
type byIndex []fieldDescription
|
||||
|
||||
func (bi byIndex) Len() int { return len(bi) }
|
||||
|
||||
func (bi byIndex) Swap(i, j int) { bi[i], bi[j] = bi[j], bi[i] }
|
||||
|
||||
func (bi byIndex) Less(i, j int) bool {
|
||||
// If a field is inlined, its index in the top level struct is stored at inline[0]
|
||||
iIdx, jIdx := bi[i].idx, bi[j].idx
|
||||
if len(bi[i].inline) > 0 {
|
||||
iIdx = bi[i].inline[0]
|
||||
}
|
||||
if len(bi[j].inline) > 0 {
|
||||
jIdx = bi[j].inline[0]
|
||||
}
|
||||
if iIdx != jIdx {
|
||||
return iIdx < jIdx
|
||||
}
|
||||
for k, biik := range bi[i].inline {
|
||||
if k >= len(bi[j].inline) {
|
||||
return false
|
||||
}
|
||||
if biik != bi[j].inline[k] {
|
||||
return biik < bi[j].inline[k]
|
||||
}
|
||||
}
|
||||
return len(bi[i].inline) < len(bi[j].inline)
|
||||
}
|
||||
|
||||
func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescription, error) {
|
||||
// We need to analyze the struct, including getting the tags, collecting
|
||||
// information about inlining, and create a map of the field name to the field.
|
||||
|
@ -425,6 +459,7 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr
|
|||
inlineMap: -1,
|
||||
}
|
||||
|
||||
var fields []fieldDescription
|
||||
for i := 0; i < numFields; i++ {
|
||||
sf := t.Field(i)
|
||||
if sf.PkgPath != "" && (!sc.AllowUnexportedFields || !sf.Anonymous) {
|
||||
|
@ -484,30 +519,61 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr
|
|||
return nil, err
|
||||
}
|
||||
for _, fd := range inlinesf.fl {
|
||||
if _, exists := sd.fm[fd.name]; exists {
|
||||
return nil, fmt.Errorf("(struct %s) duplicated key %s", t.String(), fd.name)
|
||||
}
|
||||
if fd.inline == nil {
|
||||
fd.inline = []int{i, fd.idx}
|
||||
} else {
|
||||
fd.inline = append([]int{i}, fd.inline...)
|
||||
}
|
||||
sd.fm[fd.name] = fd
|
||||
sd.fl = append(sd.fl, fd)
|
||||
fields = append(fields, fd)
|
||||
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String())
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if _, exists := sd.fm[description.name]; exists {
|
||||
return nil, fmt.Errorf("struct %s) duplicated key %s", t.String(), description.name)
|
||||
fields = append(fields, description)
|
||||
}
|
||||
|
||||
sd.fm[description.name] = description
|
||||
sd.fl = append(sd.fl, description)
|
||||
// Sort fieldDescriptions by name and use dominance rules to determine which should be added for each name
|
||||
sort.Slice(fields, func(i, j int) bool {
|
||||
x := fields
|
||||
// sort field by name, breaking ties with depth, then
|
||||
// breaking ties with index sequence.
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].inline) != len(x[j].inline) {
|
||||
return len(x[i].inline) < len(x[j].inline)
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
})
|
||||
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with the name of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
sd.fl = append(sd.fl, fi)
|
||||
sd.fm[name] = fi
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if !ok || !sc.OverwriteDuplicatedInlinedFields {
|
||||
return nil, fmt.Errorf("struct %s) duplicated key %s", t.String(), name)
|
||||
}
|
||||
sd.fl = append(sd.fl, dominant)
|
||||
sd.fm[name] = dominant
|
||||
}
|
||||
|
||||
sort.Sort(byIndex(sd.fl))
|
||||
|
||||
sc.l.Lock()
|
||||
sc.cache[t] = sd
|
||||
|
@ -516,6 +582,22 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr
|
|||
return sd, nil
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to
|
||||
// have the same name, to find the single field that dominates the
|
||||
// others using Go's inlining rules. If there are multiple top-level
|
||||
// fields, the boolean will be false: This condition is an error in Go
|
||||
// and we skip all the fields.
|
||||
func dominantField(fields []fieldDescription) (fieldDescription, bool) {
|
||||
// The fields are sorted in increasing index-length order, then by presence of tag.
|
||||
// That means that the first field is the dominant one. We need only check
|
||||
// for error cases: two fields at top level.
|
||||
if len(fields) > 1 &&
|
||||
len(fields[0].inline) == len(fields[1].inline) {
|
||||
return fieldDescription{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
func fieldByIndexErr(v reflect.Value, index []int) (result reflect.Value, err error) {
|
||||
defer func() {
|
||||
if recovered := recover(); recovered != nil {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue