Merge pull request #8863 from hashicorp/update-hashicorp-deps

vendor: Update some hashicorp packages to latest versions
This commit is contained in:
Kyle Havlovitz 2020-10-09 09:04:58 -07:00 committed by GitHub
commit 460cd27363
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
92 changed files with 3961 additions and 2501 deletions

19
go.mod
View File

@ -34,13 +34,12 @@ require (
github.com/hashicorp/consul/sdk v0.6.0 github.com/hashicorp/consul/sdk v0.6.0
github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/errwrap v1.0.0
github.com/hashicorp/go-bexpr v0.1.2 github.com/hashicorp/go-bexpr v0.1.2
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de github.com/hashicorp/go-checkpoint v0.5.0
github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-cleanhttp v0.5.1
github.com/hashicorp/go-connlimit v0.3.0 github.com/hashicorp/go-connlimit v0.3.0
github.com/hashicorp/go-discover v0.0.0-20200501174627-ad1e96bde088 github.com/hashicorp/go-discover v0.0.0-20200501174627-ad1e96bde088
github.com/hashicorp/go-hclog v0.12.0 github.com/hashicorp/go-hclog v0.12.0
github.com/hashicorp/go-immutable-radix v1.2.0 // indirect github.com/hashicorp/go-memdb v1.3.0
github.com/hashicorp/go-memdb v1.1.0
github.com/hashicorp/go-msgpack v0.5.5 github.com/hashicorp/go-msgpack v0.5.5
github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/go-multierror v1.1.0
github.com/hashicorp/go-raftchunking v0.6.1 github.com/hashicorp/go-raftchunking v0.6.1
@ -48,22 +47,24 @@ require (
github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/go-sockaddr v1.0.2
github.com/hashicorp/go-syslog v1.0.0 github.com/hashicorp/go-syslog v1.0.0
github.com/hashicorp/go-uuid v1.0.2 github.com/hashicorp/go-uuid v1.0.2
github.com/hashicorp/go-version v1.2.0 github.com/hashicorp/go-version v1.2.1
github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/golang-lru v0.5.4
github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/hcl v1.0.0
github.com/hashicorp/hil v0.0.0-20160711231837-1e86c6b523c5 github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038
github.com/hashicorp/mdns v1.0.3 // indirect
github.com/hashicorp/memberlist v0.2.2 github.com/hashicorp/memberlist v0.2.2
github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69
github.com/hashicorp/raft v1.2.0 github.com/hashicorp/raft v1.2.0
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea
github.com/hashicorp/serf v0.9.5 github.com/hashicorp/serf v0.9.5
github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086 github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce
github.com/imdario/mergo v0.3.6 github.com/imdario/mergo v0.3.6
github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f // indirect github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f // indirect
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
github.com/kr/text v0.1.0 github.com/kr/text v0.1.0
github.com/miekg/dns v1.1.26 github.com/mattn/go-colorable v0.1.7 // indirect
github.com/miekg/dns v1.1.31
github.com/mitchellh/cli v1.1.0 github.com/mitchellh/cli v1.1.0
github.com/mitchellh/copystructure v1.0.0 github.com/mitchellh/copystructure v1.0.0
github.com/mitchellh/go-testing-interface v1.14.0 github.com/mitchellh/go-testing-interface v1.14.0
@ -82,8 +83,8 @@ require (
github.com/stretchr/testify v1.5.1 github.com/stretchr/testify v1.5.1
go.opencensus.io v0.22.0 // indirect go.opencensus.io v0.22.0 // indirect
go.uber.org/goleak v1.0.0 go.uber.org/goleak v1.0.0
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a golang.org/x/crypto v0.0.0-20200930160638-afb6bcd081ae
golang.org/x/net v0.0.0-20200904194848-62affa334b73 golang.org/x/net v0.0.0-20200930145003-4acb6c075d10
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
golang.org/x/sys v0.0.0-20201007082116-8445cc04cbdf golang.org/x/sys v0.0.0-20201007082116-8445cc04cbdf

43
go.sum
View File

@ -220,8 +220,8 @@ github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/U
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-bexpr v0.1.2 h1:ijMXI4qERbzxbCnkxmfUtwMyjrrk3y+Vt0MxojNCbBs= github.com/hashicorp/go-bexpr v0.1.2 h1:ijMXI4qERbzxbCnkxmfUtwMyjrrk3y+Vt0MxojNCbBs=
github.com/hashicorp/go-bexpr v0.1.2/go.mod h1:ANbpTX1oAql27TZkKVeW8p1w8NTdnyzPe/0qqPCKohU= github.com/hashicorp/go-bexpr v0.1.2/go.mod h1:ANbpTX1oAql27TZkKVeW8p1w8NTdnyzPe/0qqPCKohU=
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de h1:XDCSythtg8aWSRSO29uwhgh7b127fWr+m5SemqjSUL8= github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU=
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de/go.mod h1:xIwEieBHERyEvaeKF/TcHh1Hu+lxPM+n2vT1+g9I4m4= github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
@ -235,12 +235,11 @@ github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrj
github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM= github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM=
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE=
github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g=
github.com/hashicorp/go-memdb v1.1.0 h1:ClvpUXpBA6UDs5+vc1h3wqe4UJU+rwum7CU219SeCbk= github.com/hashicorp/go-memdb v1.3.0 h1:xdXq34gBOMEloa9rlGStLxmfX/dyIK8htOv36dQUwHU=
github.com/hashicorp/go-memdb v1.1.0/go.mod h1:LWQ8R70vPrS4OEY9k28D2z8/Zzyu34NVzeRibGAzHO0= github.com/hashicorp/go-memdb v1.3.0/go.mod h1:Mluclgwib3R93Hk5fxEfiRhB+6Dar64wWh71LpNSe3g=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
@ -266,19 +265,21 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/hil v0.0.0-20160711231837-1e86c6b523c5 h1:uk280DXEbQiCOZgCOI3elFSeNxf8YIZiNsbr2pQLYD0= github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038 h1:n9J0rwVWXDpNd5iZnwY7w4WZyq53/rROeI7OVvLW8Ok=
github.com/hashicorp/hil v0.0.0-20160711231837-1e86c6b523c5/go.mod h1:KHvg/R2/dPtaePb16oW4qIyzkMxXOL38xjRN64adsts= github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.1 h1:XFSOubp8KWB+Jd2PDyaX5xUd5bhSP/+pTDZVDMzZJM8= github.com/hashicorp/mdns v1.0.1 h1:XFSOubp8KWB+Jd2PDyaX5xUd5bhSP/+pTDZVDMzZJM8=
github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
github.com/hashicorp/mdns v1.0.3 h1:hPneYJlzSjxFBmUlnDGXRykxBZ++dQAJhU57gCO7TzI=
github.com/hashicorp/mdns v1.0.3/go.mod h1:P9sIDVQGUBr2GtS4qS2QCBdtgqP7TBt6d8looU5l5r4=
github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g=
github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 h1:lc3c72qGlIMDqQpQH82Y4vaglRMMFdJbziYWriR4UcE= github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 h1:lc3c72qGlIMDqQpQH82Y4vaglRMMFdJbziYWriR4UcE=
@ -297,8 +298,8 @@ github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:W
github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw=
github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce h1:7UnVY3T/ZnHUrfviiAgIUjg2PXxsQfs5bphsG8F7Keo=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
@ -344,6 +345,8 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
@ -356,6 +359,10 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26 h1:gPxPSwALAeHJSjarOs00QjVdV9QoBvc1D2ujQUr5BzU= github.com/miekg/dns v1.1.26 h1:gPxPSwALAeHJSjarOs00QjVdV9QoBvc1D2ujQUr5BzU=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.27 h1:aEH/kqUzUxGJ/UHcEKdJY+ugH6WEzsEBBSPa8zuy1aM=
github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.31 h1:sJFOl9BgwbYAWOGEwr61FU28pqsBNdpRBnhGXtO06Oo=
github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0 h1:tEElEatulEHDeedTxwckzyYMA5c86fbmNIUL1hBIiTg= github.com/mitchellh/cli v1.1.0 h1:tEElEatulEHDeedTxwckzyYMA5c86fbmNIUL1hBIiTg=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
@ -530,8 +537,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= golang.org/x/crypto v0.0.0-20200930160638-afb6bcd081ae h1:duLSQW+DZ5MsXKX7kc4rXlq6/mmxz4G6ewJuBPlhRe0=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200930160638-afb6bcd081ae/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@ -540,6 +547,7 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -563,8 +571,8 @@ golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= golang.org/x/net v0.0.0-20200930145003-4acb6c075d10 h1:YfxMZzv3PjGonQYNUaeU2+DhAdqOxerQ30JFB6WgAXo=
golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200930145003-4acb6c075d10/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
@ -633,6 +641,7 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200513154647-78b527d18275 h1:e7nYe9s94RHunFJ7b+mmPxiQMOKMVSqYASToWb1EcHs= golang.org/x/tools v0.0.0-20200513154647-78b527d18275 h1:e7nYe9s94RHunFJ7b+mmPxiQMOKMVSqYASToWb1EcHs=
golang.org/x/tools v0.0.0-20200513154647-78b527d18275/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200513154647-78b527d18275/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -1,11 +1,7 @@
// checkpoint is a package for checking version information and alerts
// for a HashiCorp product.
package checkpoint package checkpoint
import ( import (
"bytes" crand "crypto/rand"
"context"
"crypto/rand"
"encoding/binary" "encoding/binary"
"encoding/json" "encoding/json"
"fmt" "fmt"
@ -23,112 +19,9 @@ import (
"time" "time"
"github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-cleanhttp"
uuid "github.com/hashicorp/go-uuid"
) )
var magicBytes [4]byte = [4]byte{0x35, 0x77, 0x69, 0xFB} var magicBytes = [4]byte{0x35, 0x77, 0x69, 0xFB}
// ReportParams are the parameters for configuring a telemetry report.
type ReportParams struct {
// Signature is some random signature that should be stored and used
// as a cookie-like value. This ensures that alerts aren't repeated.
// If the signature is changed, repeat alerts may be sent down. The
// signature should NOT be anything identifiable to a user (such as
// a MAC address). It should be random.
//
// If SignatureFile is given, then the signature will be read from this
// file. If the file doesn't exist, then a random signature will
// automatically be generated and stored here. SignatureFile will be
// ignored if Signature is given.
Signature string `json:"signature"`
SignatureFile string `json:"-"`
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
Arch string `json:"arch"`
OS string `json:"os"`
Payload interface{} `json:"payload,omitempty"`
Product string `json:"product"`
RunID string `json:"run_id"`
SchemaVersion string `json:"schema_version"`
Version string `json:"version"`
}
func (i *ReportParams) signature() string {
signature := i.Signature
if i.Signature == "" && i.SignatureFile != "" {
var err error
signature, err = checkSignature(i.SignatureFile)
if err != nil {
return ""
}
}
return signature
}
// Report sends telemetry information to checkpoint
func Report(ctx context.Context, r *ReportParams) error {
if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" {
return nil
}
req, err := ReportRequest(r)
if err != nil {
return err
}
client := cleanhttp.DefaultClient()
resp, err := client.Do(req.WithContext(ctx))
if err != nil {
return err
}
if resp.StatusCode != 201 {
return fmt.Errorf("Unknown status: %d", resp.StatusCode)
}
return nil
}
// ReportRequest creates a request object for making a report
func ReportRequest(r *ReportParams) (*http.Request, error) {
// Populate some fields automatically if we can
if r.RunID == "" {
uuid, err := uuid.GenerateUUID()
if err != nil {
return nil, err
}
r.RunID = uuid
}
if r.Arch == "" {
r.Arch = runtime.GOARCH
}
if r.OS == "" {
r.OS = runtime.GOOS
}
if r.Signature == "" {
r.Signature = r.signature()
}
b, err := json.Marshal(r)
if err != nil {
return nil, err
}
u := &url.URL{
Scheme: "https",
Host: "checkpoint-api.hashicorp.com",
Path: fmt.Sprintf("/v1/telemetry/%s", r.Product),
}
req, err := http.NewRequest("POST", u.String(), bytes.NewReader(b))
if err != nil {
return nil, err
}
req.Header.Add("Accept", "application/json")
req.Header.Add("User-Agent", "HashiCorp/go-checkpoint")
return req, nil
}
// CheckParams are the parameters for configuring a check request. // CheckParams are the parameters for configuring a check request.
type CheckParams struct { type CheckParams struct {
@ -177,14 +70,14 @@ type CheckParams struct {
// CheckResponse is the response for a check request. // CheckResponse is the response for a check request.
type CheckResponse struct { type CheckResponse struct {
Product string Product string `json:"product"`
CurrentVersion string `json:"current_version"` CurrentVersion string `json:"current_version"`
CurrentReleaseDate int `json:"current_release_date"` CurrentReleaseDate int `json:"current_release_date"`
CurrentDownloadURL string `json:"current_download_url"` CurrentDownloadURL string `json:"current_download_url"`
CurrentChangelogURL string `json:"current_changelog_url"` CurrentChangelogURL string `json:"current_changelog_url"`
ProjectWebsite string `json:"project_website"` ProjectWebsite string `json:"project_website"`
Outdated bool `json:"outdated"` Outdated bool `json:"outdated"`
Alerts []*CheckAlert Alerts []*CheckAlert `json:"alerts"`
} }
// CheckAlert is a single alert message from a check request. // CheckAlert is a single alert message from a check request.
@ -192,11 +85,11 @@ type CheckResponse struct {
// These never have to be manually constructed, and are typically populated // These never have to be manually constructed, and are typically populated
// into a CheckResponse as a result of the Check request. // into a CheckResponse as a result of the Check request.
type CheckAlert struct { type CheckAlert struct {
ID int ID int `json:"id"`
Date int Date int `json:"date"`
Message string Message string `json:"message"`
URL string URL string `json:"url"`
Level string Level string `json:"level"`
} }
// Check checks for alerts and new version information. // Check checks for alerts and new version information.
@ -205,7 +98,7 @@ func Check(p *CheckParams) (*CheckResponse, error) {
return &CheckResponse{}, nil return &CheckResponse{}, nil
} }
// set a default timeout of 3 sec for the check request (in milliseconds) // Set a default timeout of 3 sec for the check request (in milliseconds)
timeout := 3000 timeout := 3000
if _, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { if _, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil {
timeout, _ = strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")) timeout, _ = strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT"))
@ -253,8 +146,8 @@ func Check(p *CheckParams) (*CheckResponse, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
req.Header.Add("Accept", "application/json") req.Header.Set("Accept", "application/json")
req.Header.Add("User-Agent", "HashiCorp/go-checkpoint") req.Header.Set("User-Agent", "HashiCorp/go-checkpoint")
client := cleanhttp.DefaultClient() client := cleanhttp.DefaultClient()
@ -266,6 +159,8 @@ func Check(p *CheckParams) (*CheckResponse, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer resp.Body.Close()
if resp.StatusCode != 200 { if resp.StatusCode != 200 {
return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode) return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode)
} }
@ -390,14 +285,11 @@ func checkCache(current string, path string, d time.Duration) (io.ReadCloser, er
return f, nil return f, nil
} }
func checkResult(r io.Reader) (*CheckResponse, error) { func checkResult(r io.Reader) (*CheckResponse, error) {
var result CheckResponse var result CheckResponse
dec := json.NewDecoder(r) if err := json.NewDecoder(r).Decode(&result); err != nil {
if err := dec.Decode(&result); err != nil {
return nil, err return nil, err
} }
return &result, nil return &result, nil
} }
@ -426,7 +318,7 @@ func checkSignature(path string) (string, error) {
var b [16]byte var b [16]byte
n := 0 n := 0
for n < 16 { for n < 16 {
n2, err := rand.Read(b[n:]) n2, err := crand.Read(b[n:])
if err != nil { if err != nil {
return "", err return "", err
} }
@ -456,7 +348,7 @@ func writeCacheHeader(f io.Writer, v string) error {
} }
// Write out our current version length // Write out our current version length
var length uint32 = uint32(len(v)) length := uint32(len(v))
if err := binary.Write(f, binary.LittleEndian, length); err != nil { if err := binary.Write(f, binary.LittleEndian, length); err != nil {
return err return err
} }

6
vendor/github.com/hashicorp/go-checkpoint/go.mod generated vendored Normal file
View File

@ -0,0 +1,6 @@
module github.com/hashicorp/go-checkpoint
require (
github.com/hashicorp/go-cleanhttp v0.5.0
github.com/hashicorp/go-uuid v1.0.0
)

4
vendor/github.com/hashicorp/go-checkpoint/go.sum generated vendored Normal file
View File

@ -0,0 +1,4 @@
github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=

118
vendor/github.com/hashicorp/go-checkpoint/telemetry.go generated vendored Normal file
View File

@ -0,0 +1,118 @@
package checkpoint
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"os"
"runtime"
"time"
"github.com/hashicorp/go-cleanhttp"
uuid "github.com/hashicorp/go-uuid"
)
// ReportParams are the parameters for configuring a telemetry report.
type ReportParams struct {
// Signature is some random signature that should be stored and used
// as a cookie-like value. This ensures that alerts aren't repeated.
// If the signature is changed, repeat alerts may be sent down. The
// signature should NOT be anything identifiable to a user (such as
// a MAC address). It should be random.
//
// If SignatureFile is given, then the signature will be read from this
// file. If the file doesn't exist, then a random signature will
// automatically be generated and stored here. SignatureFile will be
// ignored if Signature is given.
Signature string `json:"signature"`
SignatureFile string `json:"-"`
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
Arch string `json:"arch"`
OS string `json:"os"`
Payload interface{} `json:"payload,omitempty"`
Product string `json:"product"`
RunID string `json:"run_id"`
SchemaVersion string `json:"schema_version"`
Version string `json:"version"`
}
func (i *ReportParams) signature() string {
signature := i.Signature
if i.Signature == "" && i.SignatureFile != "" {
var err error
signature, err = checkSignature(i.SignatureFile)
if err != nil {
return ""
}
}
return signature
}
// Report sends telemetry information to checkpoint
func Report(ctx context.Context, r *ReportParams) error {
if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" {
return nil
}
req, err := ReportRequest(r)
if err != nil {
return err
}
client := cleanhttp.DefaultClient()
resp, err := client.Do(req.WithContext(ctx))
if err != nil {
return err
}
if resp.StatusCode != 201 {
return fmt.Errorf("Unknown status: %d", resp.StatusCode)
}
return nil
}
// ReportRequest creates a request object for making a report
func ReportRequest(r *ReportParams) (*http.Request, error) {
// Populate some fields automatically if we can
if r.RunID == "" {
uuid, err := uuid.GenerateUUID()
if err != nil {
return nil, err
}
r.RunID = uuid
}
if r.Arch == "" {
r.Arch = runtime.GOARCH
}
if r.OS == "" {
r.OS = runtime.GOOS
}
if r.Signature == "" {
r.Signature = r.signature()
}
b, err := json.Marshal(r)
if err != nil {
return nil, err
}
u := &url.URL{
Scheme: "https",
Host: "checkpoint-api.hashicorp.com",
Path: fmt.Sprintf("/v1/telemetry/%s", r.Product),
}
req, err := http.NewRequest("POST", u.String(), bytes.NewReader(b))
if err != nil {
return nil, err
}
req.Header.Set("Accept", "application/json")
req.Header.Set("User-Agent", "HashiCorp/go-checkpoint")
return req, nil
}

90
vendor/github.com/hashicorp/go-checkpoint/versions.go generated vendored Normal file
View File

@ -0,0 +1,90 @@
package checkpoint
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"os"
"strconv"
"time"
"github.com/hashicorp/go-cleanhttp"
)
// VersionsParams are the parameters for a versions request.
type VersionsParams struct {
// Service is used to lookup the correct service.
Service string
// Product is used to filter the version contraints.
Product string
// Force, if true, will force the check even if CHECKPOINT_DISABLE
// is set. Within HashiCorp products, this is ONLY USED when the user
// specifically requests it. This is never automatically done without
// the user's consent.
Force bool
}
// VersionsResponse is the response for a versions request.
type VersionsResponse struct {
Service string `json:"service"`
Product string `json:"product"`
Minimum string `json:"minimum"`
Maximum string `json:"maximum"`
Excluding []string `json:"excluding"`
}
// Versions returns the version constrains for a given service and product.
func Versions(p *VersionsParams) (*VersionsResponse, error) {
if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" && !p.Force {
return &VersionsResponse{}, nil
}
// Set a default timeout of 1 sec for the versions request (in milliseconds)
timeout := 1000
if _, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil {
timeout, _ = strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT"))
}
v := url.Values{}
v.Set("product", p.Product)
u := &url.URL{
Scheme: "https",
Host: "checkpoint-api.hashicorp.com",
Path: fmt.Sprintf("/v1/versions/%s", p.Service),
RawQuery: v.Encode(),
}
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
req.Header.Set("Accept", "application/json")
req.Header.Set("User-Agent", "HashiCorp/go-checkpoint")
client := cleanhttp.DefaultClient()
// We use a short timeout since checking for new versions is not critical
// enough to block on if checkpoint is broken/slow.
client.Timeout = time.Duration(timeout) * time.Millisecond
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode)
}
result := &VersionsResponse{}
if err := json.NewDecoder(resp.Body).Decode(result); err != nil {
return nil, err
}
return result, nil
}

View File

@ -1,4 +1,16 @@
# UNRELEASED # 1.3.0 (September 17th, 2020)
FEATURES
* Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)]
# 1.2.0 (March 18th, 2020)
FEATURES
* Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)]
# 1.1.0 (May 22nd, 2019)
FEATURES FEATURES

View File

@ -155,7 +155,7 @@ func (i *Iterator) Next() ([]byte, interface{}, bool) {
// Initialize our stack if needed // Initialize our stack if needed
if i.stack == nil && i.node != nil { if i.stack == nil && i.node != nil {
i.stack = []edges{ i.stack = []edges{
edges{ {
edge{node: i.node}, edge{node: i.node},
}, },
} }

View File

@ -211,6 +211,12 @@ func (n *Node) Iterator() *Iterator {
return &Iterator{node: n} return &Iterator{node: n}
} }
// ReverseIterator is used to return an iterator at
// the given node to walk the tree backwards
func (n *Node) ReverseIterator() *ReverseIterator {
return NewReverseIterator(n)
}
// rawIterator is used to return a raw iterator at the given node to walk the // rawIterator is used to return a raw iterator at the given node to walk the
// tree. // tree.
func (n *Node) rawIterator() *rawIterator { func (n *Node) rawIterator() *rawIterator {
@ -224,6 +230,11 @@ func (n *Node) Walk(fn WalkFn) {
recursiveWalk(n, fn) recursiveWalk(n, fn)
} }
// WalkBackwards is used to walk the tree in reverse order
func (n *Node) WalkBackwards(fn WalkFn) {
reverseRecursiveWalk(n, fn)
}
// WalkPrefix is used to walk the tree under a prefix // WalkPrefix is used to walk the tree under a prefix
func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) {
search := prefix search := prefix
@ -302,3 +313,22 @@ func recursiveWalk(n *Node, fn WalkFn) bool {
} }
return false return false
} }
// reverseRecursiveWalk is used to do a reverse pre-order
// walk of a node recursively. Returns true if the walk
// should be aborted
func reverseRecursiveWalk(n *Node, fn WalkFn) bool {
// Visit the leaf values if any
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
return true
}
// Recurse on the children in reverse order
for i := len(n.edges) - 1; i >= 0; i-- {
e := n.edges[i]
if reverseRecursiveWalk(e.node, fn) {
return true
}
}
return false
}

View File

@ -41,7 +41,7 @@ func (i *rawIterator) Next() {
// Initialize our stack if needed. // Initialize our stack if needed.
if i.stack == nil && i.node != nil { if i.stack == nil && i.node != nil {
i.stack = []rawStackEntry{ i.stack = []rawStackEntry{
rawStackEntry{ {
edges: edges{ edges: edges{
edge{node: i.node}, edge{node: i.node},
}, },

View File

@ -0,0 +1,177 @@
package iradix
import (
"bytes"
)
// ReverseIterator is used to iterate over a set of nodes
// in reverse in-order
type ReverseIterator struct {
i *Iterator
}
// NewReverseIterator returns a new ReverseIterator at a node
func NewReverseIterator(n *Node) *ReverseIterator {
return &ReverseIterator{
i: &Iterator{node: n},
}
}
// SeekPrefixWatch is used to seek the iterator to a given prefix
// and returns the watch channel of the finest granularity
func (ri *ReverseIterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
return ri.i.SeekPrefixWatch(prefix)
}
// SeekPrefix is used to seek the iterator to a given prefix
func (ri *ReverseIterator) SeekPrefix(prefix []byte) {
ri.i.SeekPrefixWatch(prefix)
}
func (ri *ReverseIterator) recurseMax(n *Node) *Node {
// Traverse to the maximum child
if n.leaf != nil {
return n
}
if len(n.edges) > 0 {
// Add all the other edges to the stack (the max node will be added as
// we recurse)
m := len(n.edges)
ri.i.stack = append(ri.i.stack, n.edges[:m-1])
return ri.recurseMax(n.edges[m-1].node)
}
// Shouldn't be possible
return nil
}
// SeekReverseLowerBound is used to seek the iterator to the largest key that is
// lower or equal to the given key. There is no watch variant as it's hard to
// predict based on the radix structure which node(s) changes might affect the
// result.
func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) {
// Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
// go because we need only a subset of edges of many nodes in the path to the
// leaf with the lower bound.
ri.i.stack = []edges{}
n := ri.i.node
search := key
found := func(n *Node) {
ri.i.node = n
ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
}
for {
// Compare current prefix with the search key's same-length prefix.
var prefixCmp int
if len(n.prefix) < len(search) {
prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
} else {
prefixCmp = bytes.Compare(n.prefix, search)
}
if prefixCmp < 0 {
// Prefix is smaller than search prefix, that means there is no lower bound.
// But we are looking in reverse, so the reverse lower bound will be the
// largest leaf under this subtree, since it is the value that would come
// right before the current search prefix if it were in the tree. So we need
// to follow the maximum path in this subtree to find it.
n = ri.recurseMax(n)
if n != nil {
found(n)
}
return
}
if prefixCmp > 0 {
// Prefix is larger than search prefix, that means there is no reverse lower
// bound since nothing comes before our current search prefix.
ri.i.node = nil
return
}
// Prefix is equal, we are still heading for an exact match. If this is a
// leaf we're done.
if n.leaf != nil {
if bytes.Compare(n.leaf.key, key) < 0 {
ri.i.node = nil
return
}
found(n)
return
}
// Consume the search prefix
if len(n.prefix) > len(search) {
search = []byte{}
} else {
search = search[len(n.prefix):]
}
// Otherwise, take the lower bound next edge.
idx, lbNode := n.getLowerBoundEdge(search[0])
// From here, we need to update the stack with all values lower than
// the lower bound edge. Since getLowerBoundEdge() returns -1 when the
// search prefix is larger than all edges, we need to place idx at the
// last edge index so they can all be place in the stack, since they
// come before our search prefix.
if idx == -1 {
idx = len(n.edges)
}
// Create stack edges for the all strictly lower edges in this node.
if len(n.edges[:idx]) > 0 {
ri.i.stack = append(ri.i.stack, n.edges[:idx])
}
// Exit if there's not lower bound edge. The stack will have the
// previous nodes already.
if lbNode == nil {
ri.i.node = nil
return
}
ri.i.node = lbNode
// Recurse
n = lbNode
}
}
// Previous returns the previous node in reverse order
func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) {
// Initialize our stack if needed
if ri.i.stack == nil && ri.i.node != nil {
ri.i.stack = []edges{
{
edge{node: ri.i.node},
},
}
}
for len(ri.i.stack) > 0 {
// Inspect the last element of the stack
n := len(ri.i.stack)
last := ri.i.stack[n-1]
m := len(last)
elem := last[m-1].node
// Update the stack
if m > 1 {
ri.i.stack[n-1] = last[:m-1]
} else {
ri.i.stack = ri.i.stack[:n-1]
}
// Push the edges onto the frontier
if len(elem.edges) > 0 {
ri.i.stack = append(ri.i.stack, elem.edges)
}
// Return the leaf values if any
if elem.leaf != nil {
return elem.leaf.key, elem.leaf.val, true
}
}
return nil, nil, false
}

View File

@ -32,7 +32,7 @@ For the underlying immutable radix trees, see [go-immutable-radix](https://githu
Documentation Documentation
============= =============
The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-memdb). The full documentation is available on [Godoc](https://pkg.go.dev/github.com/hashicorp/go-memdb).
Example Example
======= =======

View File

@ -2,4 +2,7 @@ module github.com/hashicorp/go-memdb
go 1.12 go 1.12
require github.com/hashicorp/go-immutable-radix v1.1.0 require (
github.com/hashicorp/go-immutable-radix v1.3.0
github.com/hashicorp/golang-lru v0.5.4 // indirect
)

View File

@ -1,6 +1,8 @@
github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc= github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE=
github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=

View File

@ -428,6 +428,41 @@ func IsUintType(k reflect.Kind) (size int, okay bool) {
} }
} }
// BoolFieldIndex is used to extract an boolean field from an object using
// reflection and builds an index on that field.
type BoolFieldIndex struct {
Field string
}
func (i *BoolFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
v := reflect.ValueOf(obj)
v = reflect.Indirect(v) // Dereference the pointer if any
fv := v.FieldByName(i.Field)
if !fv.IsValid() {
return false, nil,
fmt.Errorf("field '%s' for %#v is invalid", i.Field, obj)
}
// Check the type
k := fv.Kind()
if k != reflect.Bool {
return false, nil, fmt.Errorf("field %q is of type %v; want a bool", i.Field, k)
}
// Get the value and encode it
buf := make([]byte, 1)
if fv.Bool() {
buf[0] = 1
}
return true, buf, nil
}
func (i *BoolFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
return fromBoolArgs(args)
}
// UUIDFieldIndex is used to extract a field from an object // UUIDFieldIndex is used to extract a field from an object
// using reflection and builds an index on that field by treating // using reflection and builds an index on that field by treating
// it as a UUID. This is an optimization to using a StringFieldIndex // it as a UUID. This is an optimization to using a StringFieldIndex

View File

@ -536,6 +536,34 @@ func (txn *Txn) FirstWatch(table, index string, args ...interface{}) (<-chan str
return watch, value, nil return watch, value, nil
} }
// LastWatch is used to return the last matching object for
// the given constraints on the index along with the watch channel
func (txn *Txn) LastWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) {
// Get the index value
indexSchema, val, err := txn.getIndexValue(table, index, args...)
if err != nil {
return nil, nil, err
}
// Get the index itself
indexTxn := txn.readableIndex(table, indexSchema.Name)
// Do an exact lookup
if indexSchema.Unique && val != nil && indexSchema.Name == index {
watch, obj, ok := indexTxn.GetWatch(val)
if !ok {
return watch, nil, nil
}
return watch, obj, nil
}
// Handle non-unique index by using an iterator and getting the last value
iter := indexTxn.Root().ReverseIterator()
watch := iter.SeekPrefixWatch(val)
_, value, _ := iter.Previous()
return watch, value, nil
}
// First is used to return the first matching object for // First is used to return the first matching object for
// the given constraints on the index // the given constraints on the index
func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, error) { func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, error) {
@ -543,6 +571,13 @@ func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, er
return val, err return val, err
} }
// Last is used to return the last matching object for
// the given constraints on the index
func (txn *Txn) Last(table, index string, args ...interface{}) (interface{}, error) {
_, val, err := txn.LastWatch(table, index, args...)
return val, err
}
// LongestPrefix is used to fetch the longest prefix match for the given // LongestPrefix is used to fetch the longest prefix match for the given
// constraints on the index. Note that this will not work with the memdb // constraints on the index. Note that this will not work with the memdb
// StringFieldIndex because it adds null terminators which prevent the // StringFieldIndex because it adds null terminators which prevent the
@ -654,6 +689,26 @@ func (txn *Txn) Get(table, index string, args ...interface{}) (ResultIterator, e
return iter, nil return iter, nil
} }
// GetReverse is used to construct a Reverse ResultIterator over all the
// rows that match the given constraints of an index.
// The returned ResultIterator's Next() will return the next Previous value
func (txn *Txn) GetReverse(table, index string, args ...interface{}) (ResultIterator, error) {
indexIter, val, err := txn.getIndexIteratorReverse(table, index, args...)
if err != nil {
return nil, err
}
// Seek the iterator to the appropriate sub-set
watchCh := indexIter.SeekPrefixWatch(val)
// Create an iterator
iter := &radixReverseIterator{
iter: indexIter,
watchCh: watchCh,
}
return iter, nil
}
// LowerBound is used to construct a ResultIterator over all the the range of // LowerBound is used to construct a ResultIterator over all the the range of
// rows that have an index value greater than or equal to the provide args. // rows that have an index value greater than or equal to the provide args.
// Calling this then iterating until the rows are larger than required allows // Calling this then iterating until the rows are larger than required allows
@ -676,6 +731,29 @@ func (txn *Txn) LowerBound(table, index string, args ...interface{}) (ResultIter
return iter, nil return iter, nil
} }
// ReverseLowerBound is used to construct a Reverse ResultIterator over all the
// the range of rows that have an index value less than or equal to the
// provide args. Calling this then iterating until the rows are lower than
// required allows range scans within an index. It is not possible to watch the
// resulting iterator since the radix tree doesn't efficiently allow watching
// on lower bound changes. The WatchCh returned will be nill and so will block
// forever.
func (txn *Txn) ReverseLowerBound(table, index string, args ...interface{}) (ResultIterator, error) {
indexIter, val, err := txn.getIndexIteratorReverse(table, index, args...)
if err != nil {
return nil, err
}
// Seek the iterator to the appropriate sub-set
indexIter.SeekReverseLowerBound(val)
// Create an iterator
iter := &radixReverseIterator{
iter: indexIter,
}
return iter, nil
}
// objectID is a tuple of table name and the raw internal id byte slice // objectID is a tuple of table name and the raw internal id byte slice
// converted to a string. It's only converted to a string to make it comparable // converted to a string. It's only converted to a string to make it comparable
// so this struct can be used as a map index. // so this struct can be used as a map index.
@ -744,6 +822,15 @@ func (txn *Txn) Changes() Changes {
// case it's different. Note that m is not a pointer so we are not // case it's different. Note that m is not a pointer so we are not
// modifying the txn.changeSet here - it's already a copy. // modifying the txn.changeSet here - it's already a copy.
m.Before = mi.firstBefore m.Before = mi.firstBefore
// Edge case - if the object was inserted and then eventually deleted in
// the same transaction, then the net affect on that key is a no-op. Don't
// emit a mutation with nil for before and after as it's meaningless and
// might violate expectations and cause a panic in code that assumes at
// least one must be set.
if m.Before == nil && m.After == nil {
continue
}
cs = append(cs, m) cs = append(cs, m)
} }
} }
@ -768,6 +855,22 @@ func (txn *Txn) getIndexIterator(table, index string, args ...interface{}) (*ira
return indexIter, val, nil return indexIter, val, nil
} }
func (txn *Txn) getIndexIteratorReverse(table, index string, args ...interface{}) (*iradix.ReverseIterator, []byte, error) {
// Get the index value to scan
indexSchema, val, err := txn.getIndexValue(table, index, args...)
if err != nil {
return nil, nil, err
}
// Get the index itself
indexTxn := txn.readableIndex(table, indexSchema.Name)
indexRoot := indexTxn.Root()
// Get an interator over the index
indexIter := indexRoot.ReverseIterator()
return indexIter, val, nil
}
// Defer is used to push a new arbitrary function onto a stack which // Defer is used to push a new arbitrary function onto a stack which
// gets called when a transaction is committed and finished. Deferred // gets called when a transaction is committed and finished. Deferred
// functions are called in LIFO order, and only invoked at the end of // functions are called in LIFO order, and only invoked at the end of
@ -795,3 +898,43 @@ func (r *radixIterator) Next() interface{} {
} }
return value return value
} }
type radixReverseIterator struct {
iter *iradix.ReverseIterator
watchCh <-chan struct{}
}
func (r *radixReverseIterator) Next() interface{} {
_, value, ok := r.iter.Previous()
if !ok {
return nil
}
return value
}
func (r *radixReverseIterator) WatchCh() <-chan struct{} {
return r.watchCh
}
// Snapshot creates a snapshot of the current state of the transaction.
// Returns a new read-only transaction or nil if the transaction is already
// aborted or committed.
func (txn *Txn) Snapshot() *Txn {
if txn.rootTxn == nil {
return nil
}
snapshot := &Txn{
db: txn.db,
rootTxn: txn.rootTxn.Clone(),
}
// Commit sub-transactions into the snapshot
for key, subTxn := range txn.modified {
path := indexPath(key.Table, key.Index)
final := subTxn.CommitOnly()
snapshot.rootTxn.Insert(path, final)
}
return snapshot
}

View File

@ -127,3 +127,18 @@ func (w WatchSet) watchMany(ctx context.Context) error {
return ctx.Err() return ctx.Err()
} }
} }
// WatchCh returns a channel that is used to wait for either the watch set to trigger
// or for the context to be cancelled. WatchCh creates a new goroutine each call, so
// callers may need to cache the returned channel to avoid creating extra goroutines.
func (w WatchSet) WatchCh(ctx context.Context) <-chan error {
// Create the outgoing channel
triggerCh := make(chan error, 1)
// Create a goroutine to collect the error from WatchCtx
go func() {
triggerCh <- w.WatchCtx(ctx)
}()
return triggerCh
}

View File

@ -1,13 +0,0 @@
language: go
go:
- 1.2
- 1.3
- 1.4
- 1.9
- "1.10"
- 1.11
- 1.12
script:
- go test

View File

@ -1,5 +1,6 @@
# Versioning Library for Go # Versioning Library for Go
[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version) [![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/master)
[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version)
go-version is a library for parsing versions and version constraints, go-version is a library for parsing versions and version constraints,
and verifying versions against a set of constraints. go-version and verifying versions against a set of constraints. go-version

View File

@ -280,6 +280,10 @@ func comparePrereleases(v string, other string) int {
// Equal tests if two versions are equal. // Equal tests if two versions are equal.
func (v *Version) Equal(o *Version) bool { func (v *Version) Equal(o *Version) bool {
if v == nil || o == nil {
return v == o
}
return v.Compare(o) == 0 return v.Compare(o) == 0
} }
@ -288,7 +292,7 @@ func (v *Version) GreaterThan(o *Version) bool {
return v.Compare(o) > 0 return v.Compare(o) > 0
} }
// GreaterThanOrEqualTo tests if this version is greater than or equal to another version. // GreaterThanOrEqual tests if this version is greater than or equal to another version.
func (v *Version) GreaterThanOrEqual(o *Version) bool { func (v *Version) GreaterThanOrEqual(o *Version) bool {
return v.Compare(o) >= 0 return v.Compare(o) >= 0
} }
@ -298,7 +302,7 @@ func (v *Version) LessThan(o *Version) bool {
return v.Compare(o) < 0 return v.Compare(o) < 0
} }
// LessThanOrEqualTo tests if this version is less than or equal to another version. // LessThanOrEqual tests if this version is less than or equal to another version.
func (v *Version) LessThanOrEqual(o *Version) bool { func (v *Version) LessThanOrEqual(o *Version) bool {
return v.Compare(o) <= 0 return v.Compare(o) <= 0
} }

View File

@ -1,3 +0,0 @@
sudo: false
language: go
go: 1.5

View File

@ -1,6 +1,6 @@
# HIL # HIL
[![GoDoc](https://godoc.org/github.com/hashicorp/hil?status.png)](https://godoc.org/github.com/hashicorp/hil) [![Build Status](https://travis-ci.org/hashicorp/hil.svg?branch=master)](https://travis-ci.org/hashicorp/hil) [![GoDoc](https://godoc.org/github.com/hashicorp/hil?status.png)](https://godoc.org/github.com/hashicorp/hil) [![Build Status](https://circleci.com/gh/hashicorp/hil/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/hil/tree/master)
HIL (HashiCorp Interpolation Language) is a lightweight embedded language used HIL (HashiCorp Interpolation Language) is a lightweight embedded language used
primarily for configuration interpolation. The goal of HIL is to make a simple primarily for configuration interpolation. The goal of HIL is to make a simple
@ -43,7 +43,7 @@ better tested for general purpose use.
## Syntax ## Syntax
For a complete grammar, please see the parser itself. A high-level overview For a complete grammar, please see the parser itself. A high-level overview
of the syntax and grammer is listed here. of the syntax and grammar is listed here.
Code begins within `${` and `}`. Outside of this, text is treated Code begins within `${` and `}`. Outside of this, text is treated
literally. For example, `foo` is a valid HIL program that is just the literally. For example, `foo` is a valid HIL program that is just the

View File

@ -1,18 +0,0 @@
version: "build-{branch}-{build}"
image: Visual Studio 2015
clone_folder: c:\gopath\src\github.com\hashicorp\hil
environment:
GOPATH: c:\gopath
init:
- git config --global core.autocrlf true
install:
- cmd: >-
echo %Path%
go version
go env
go get -d -v -t ./...
build_script:
- cmd: go test -v ./...

View File

@ -5,9 +5,20 @@ type ArithmeticOp int
const ( const (
ArithmeticOpInvalid ArithmeticOp = 0 ArithmeticOpInvalid ArithmeticOp = 0
ArithmeticOpAdd ArithmeticOp = iota
ArithmeticOpAdd ArithmeticOp = iota
ArithmeticOpSub ArithmeticOpSub
ArithmeticOpMul ArithmeticOpMul
ArithmeticOpDiv ArithmeticOpDiv
ArithmeticOpMod ArithmeticOpMod
ArithmeticOpLogicalAnd
ArithmeticOpLogicalOr
ArithmeticOpEqual
ArithmeticOpNotEqual
ArithmeticOpLessThan
ArithmeticOpLessThanOrEqual
ArithmeticOpGreaterThan
ArithmeticOpGreaterThanOrEqual
) )

View File

@ -19,13 +19,22 @@ type Node interface {
// Pos is the starting position of an AST node // Pos is the starting position of an AST node
type Pos struct { type Pos struct {
Column, Line int // Column/Line number, starting at 1 Column, Line int // Column/Line number, starting at 1
Filename string // Optional source filename, if known
} }
func (p Pos) String() string { func (p Pos) String() string {
return fmt.Sprintf("%d:%d", p.Line, p.Column) if p.Filename == "" {
return fmt.Sprintf("%d:%d", p.Line, p.Column)
} else {
return fmt.Sprintf("%s:%d:%d", p.Filename, p.Line, p.Column)
}
} }
// InitPos is an initiaial position value. This should be used as
// the starting position (presets the column and line to 1).
var InitPos = Pos{Column: 1, Line: 1}
// Visitors are just implementations of this function. // Visitors are just implementations of this function.
// //
// The function must return the Node to replace this node with. "nil" is // The function must return the Node to replace this node with. "nil" is
@ -49,11 +58,19 @@ type Type uint32
const ( const (
TypeInvalid Type = 0 TypeInvalid Type = 0
TypeAny Type = 1 << iota TypeAny Type = 1 << iota
TypeBool
TypeString TypeString
TypeInt TypeInt
TypeFloat TypeFloat
TypeList TypeList
TypeMap TypeMap
// This is a special type used by Terraform to mark "unknown" values.
// It is impossible for this type to be introduced into your HIL programs
// unless you explicitly set a variable to this value. In that case,
// any operation including the variable will return "TypeUnknown" as the
// type.
TypeUnknown
) )
func (t Type) Printable() string { func (t Type) Printable() string {
@ -62,6 +79,8 @@ func (t Type) Printable() string {
return "invalid type" return "invalid type"
case TypeAny: case TypeAny:
return "any type" return "any type"
case TypeBool:
return "type bool"
case TypeString: case TypeString:
return "type string" return "type string"
case TypeInt: case TypeInt:
@ -72,6 +91,8 @@ func (t Type) Printable() string {
return "type list" return "type list"
case TypeMap: case TypeMap:
return "type map" return "type map"
case TypeUnknown:
return "type unknown"
default: default:
return "unknown type" return "unknown type"
} }

36
vendor/github.com/hashicorp/hil/ast/conditional.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
package ast
import (
"fmt"
)
type Conditional struct {
CondExpr Node
TrueExpr Node
FalseExpr Node
Posx Pos
}
// Accept passes the given visitor to the child nodes in this order:
// CondExpr, TrueExpr, FalseExpr. It then finally passes itself to the visitor.
func (n *Conditional) Accept(v Visitor) Node {
n.CondExpr = n.CondExpr.Accept(v)
n.TrueExpr = n.TrueExpr.Accept(v)
n.FalseExpr = n.FalseExpr.Accept(v)
return v(n)
}
func (n *Conditional) Pos() Pos {
return n.Posx
}
func (n *Conditional) Type(Scope) (Type, error) {
// This is not actually a useful value; the type checker ignores
// this function when analyzing conditionals, just as with Arithmetic.
return TypeInt, nil
}
func (n *Conditional) GoString() string {
return fmt.Sprintf("*%#v", *n)
}

View File

@ -13,6 +13,8 @@ type Index struct {
} }
func (n *Index) Accept(v Visitor) Node { func (n *Index) Accept(v Visitor) Node {
n.Target = n.Target.Accept(v)
n.Key = n.Key.Accept(v)
return v(n) return v(n)
} }

View File

@ -2,6 +2,7 @@ package ast
import ( import (
"fmt" "fmt"
"reflect"
) )
// LiteralNode represents a single literal value, such as "foo" or // LiteralNode represents a single literal value, such as "foo" or
@ -12,6 +13,51 @@ type LiteralNode struct {
Posx Pos Posx Pos
} }
// NewLiteralNode returns a new literal node representing the given
// literal Go value, which must correspond to one of the primitive types
// supported by HIL. Lists and maps cannot currently be constructed via
// this function.
//
// If an inappropriately-typed value is provided, this function will
// return an error. The main intended use of this function is to produce
// "synthetic" literals from constants in code, where the value type is
// well known at compile time. To easily store these in global variables,
// see also MustNewLiteralNode.
func NewLiteralNode(value interface{}, pos Pos) (*LiteralNode, error) {
goType := reflect.TypeOf(value)
var hilType Type
switch goType.Kind() {
case reflect.Bool:
hilType = TypeBool
case reflect.Int:
hilType = TypeInt
case reflect.Float64:
hilType = TypeFloat
case reflect.String:
hilType = TypeString
default:
return nil, fmt.Errorf("unsupported literal node type: %T", value)
}
return &LiteralNode{
Value: value,
Typex: hilType,
Posx: pos,
}, nil
}
// MustNewLiteralNode wraps NewLiteralNode and panics if an error is
// returned, thus allowing valid literal nodes to be easily assigned to
// global variables.
func MustNewLiteralNode(value interface{}, pos Pos) *LiteralNode {
node, err := NewLiteralNode(value, pos)
if err != nil {
panic(err)
}
return node
}
func (n *LiteralNode) Accept(v Visitor) Node { func (n *LiteralNode) Accept(v Visitor) Node {
return v(n) return v(n)
} }
@ -31,3 +77,12 @@ func (n *LiteralNode) String() string {
func (n *LiteralNode) Type(Scope) (Type, error) { func (n *LiteralNode) Type(Scope) (Type, error) {
return n.Typex, nil return n.Typex, nil
} }
// IsUnknown returns true either if the node's value is itself unknown
// of if it is a collection containing any unknown elements, deeply.
func (n *LiteralNode) IsUnknown() bool {
return IsUnknown(Variable{
Type: n.Typex,
Value: n.Value,
})
}

View File

@ -7,21 +7,25 @@ import "fmt"
const ( const (
_Type_name_0 = "TypeInvalid" _Type_name_0 = "TypeInvalid"
_Type_name_1 = "TypeAny" _Type_name_1 = "TypeAny"
_Type_name_2 = "TypeString" _Type_name_2 = "TypeBool"
_Type_name_3 = "TypeInt" _Type_name_3 = "TypeString"
_Type_name_4 = "TypeFloat" _Type_name_4 = "TypeInt"
_Type_name_5 = "TypeList" _Type_name_5 = "TypeFloat"
_Type_name_6 = "TypeMap" _Type_name_6 = "TypeList"
_Type_name_7 = "TypeMap"
_Type_name_8 = "TypeUnknown"
) )
var ( var (
_Type_index_0 = [...]uint8{0, 11} _Type_index_0 = [...]uint8{0, 11}
_Type_index_1 = [...]uint8{0, 7} _Type_index_1 = [...]uint8{0, 7}
_Type_index_2 = [...]uint8{0, 10} _Type_index_2 = [...]uint8{0, 8}
_Type_index_3 = [...]uint8{0, 7} _Type_index_3 = [...]uint8{0, 10}
_Type_index_4 = [...]uint8{0, 9} _Type_index_4 = [...]uint8{0, 7}
_Type_index_5 = [...]uint8{0, 8} _Type_index_5 = [...]uint8{0, 9}
_Type_index_6 = [...]uint8{0, 7} _Type_index_6 = [...]uint8{0, 8}
_Type_index_7 = [...]uint8{0, 7}
_Type_index_8 = [...]uint8{0, 11}
) )
func (i Type) String() string { func (i Type) String() string {
@ -40,6 +44,10 @@ func (i Type) String() string {
return _Type_name_5 return _Type_name_5
case i == 64: case i == 64:
return _Type_name_6 return _Type_name_6
case i == 128:
return _Type_name_7
case i == 256:
return _Type_name_8
default: default:
return fmt.Sprintf("Type(%d)", i) return fmt.Sprintf("Type(%d)", i)
} }

30
vendor/github.com/hashicorp/hil/ast/unknown.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package ast
// IsUnknown reports whether a variable is unknown or contains any value
// that is unknown. This will recurse into lists and maps and so on.
func IsUnknown(v Variable) bool {
// If it is unknown itself, return true
if v.Type == TypeUnknown {
return true
}
// If it is a container type, check the values
switch v.Type {
case TypeList:
for _, el := range v.Value.([]Variable) {
if IsUnknown(el) {
return true
}
}
case TypeMap:
for _, el := range v.Value.(map[string]Variable) {
if IsUnknown(el) {
return true
}
}
default:
}
// Not a container type or survive the above checks
return false
}

View File

@ -3,43 +3,61 @@ package ast
import "fmt" import "fmt"
func VariableListElementTypesAreHomogenous(variableName string, list []Variable) (Type, error) { func VariableListElementTypesAreHomogenous(variableName string, list []Variable) (Type, error) {
listTypes := make(map[Type]struct{}) if len(list) == 0 {
return TypeInvalid, fmt.Errorf("list %q does not have any elements so cannot determine type.", variableName)
}
elemType := TypeUnknown
for _, v := range list { for _, v := range list {
if _, ok := listTypes[v.Type]; ok { if v.Type == TypeUnknown {
continue continue
} }
listTypes[v.Type] = struct{}{}
if elemType == TypeUnknown {
elemType = v.Type
continue
}
if v.Type != elemType {
return TypeInvalid, fmt.Errorf(
"list %q does not have homogenous types. found %s and then %s",
variableName,
elemType, v.Type,
)
}
elemType = v.Type
} }
if len(listTypes) != 1 && len(list) != 0 { return elemType, nil
return TypeInvalid, fmt.Errorf("list %q does not have homogenous types. found %s", variableName, reportTypes(listTypes))
}
if len(list) > 0 {
return list[0].Type, nil
}
return TypeInvalid, fmt.Errorf("list %q does not have any elements so cannot determine type.", variableName)
} }
func VariableMapValueTypesAreHomogenous(variableName string, vmap map[string]Variable) (Type, error) { func VariableMapValueTypesAreHomogenous(variableName string, vmap map[string]Variable) (Type, error) {
valueTypes := make(map[Type]struct{}) if len(vmap) == 0 {
return TypeInvalid, fmt.Errorf("map %q does not have any elements so cannot determine type.", variableName)
}
elemType := TypeUnknown
for _, v := range vmap { for _, v := range vmap {
if _, ok := valueTypes[v.Type]; ok { if v.Type == TypeUnknown {
continue continue
} }
valueTypes[v.Type] = struct{}{}
if elemType == TypeUnknown {
elemType = v.Type
continue
}
if v.Type != elemType {
return TypeInvalid, fmt.Errorf(
"map %q does not have homogenous types. found %s and then %s",
variableName,
elemType, v.Type,
)
}
elemType = v.Type
} }
if len(valueTypes) != 1 && len(vmap) != 0 { return elemType, nil
return TypeInvalid, fmt.Errorf("map %q does not have homogenous value types. found %s", variableName, reportTypes(valueTypes))
}
// For loop here is an easy way to get a single key, we return immediately.
for _, v := range vmap {
return v.Type, nil
}
// This means the map is empty
return TypeInvalid, fmt.Errorf("map %q does not have any elements so cannot determine type.", variableName)
} }

View File

@ -1,6 +1,7 @@
package hil package hil
import ( import (
"errors"
"strconv" "strconv"
"github.com/hashicorp/hil/ast" "github.com/hashicorp/hil/ast"
@ -17,16 +18,23 @@ func registerBuiltins(scope *ast.BasicScope) *ast.BasicScope {
} }
// Implicit conversions // Implicit conversions
scope.FuncMap["__builtin_BoolToString"] = builtinBoolToString()
scope.FuncMap["__builtin_FloatToInt"] = builtinFloatToInt() scope.FuncMap["__builtin_FloatToInt"] = builtinFloatToInt()
scope.FuncMap["__builtin_FloatToString"] = builtinFloatToString() scope.FuncMap["__builtin_FloatToString"] = builtinFloatToString()
scope.FuncMap["__builtin_IntToFloat"] = builtinIntToFloat() scope.FuncMap["__builtin_IntToFloat"] = builtinIntToFloat()
scope.FuncMap["__builtin_IntToString"] = builtinIntToString() scope.FuncMap["__builtin_IntToString"] = builtinIntToString()
scope.FuncMap["__builtin_StringToInt"] = builtinStringToInt() scope.FuncMap["__builtin_StringToInt"] = builtinStringToInt()
scope.FuncMap["__builtin_StringToFloat"] = builtinStringToFloat() scope.FuncMap["__builtin_StringToFloat"] = builtinStringToFloat()
scope.FuncMap["__builtin_StringToBool"] = builtinStringToBool()
// Math operations // Math operations
scope.FuncMap["__builtin_IntMath"] = builtinIntMath() scope.FuncMap["__builtin_IntMath"] = builtinIntMath()
scope.FuncMap["__builtin_FloatMath"] = builtinFloatMath() scope.FuncMap["__builtin_FloatMath"] = builtinFloatMath()
scope.FuncMap["__builtin_BoolCompare"] = builtinBoolCompare()
scope.FuncMap["__builtin_FloatCompare"] = builtinFloatCompare()
scope.FuncMap["__builtin_IntCompare"] = builtinIntCompare()
scope.FuncMap["__builtin_StringCompare"] = builtinStringCompare()
scope.FuncMap["__builtin_Logical"] = builtinLogical()
return scope return scope
} }
@ -77,8 +85,16 @@ func builtinIntMath() ast.Function {
case ast.ArithmeticOpMul: case ast.ArithmeticOpMul:
result *= arg result *= arg
case ast.ArithmeticOpDiv: case ast.ArithmeticOpDiv:
if arg == 0 {
return nil, errors.New("divide by zero")
}
result /= arg result /= arg
case ast.ArithmeticOpMod: case ast.ArithmeticOpMod:
if arg == 0 {
return nil, errors.New("divide by zero")
}
result = result % arg result = result % arg
} }
} }
@ -88,6 +104,136 @@ func builtinIntMath() ast.Function {
} }
} }
func builtinBoolCompare() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeInt, ast.TypeBool, ast.TypeBool},
Variadic: false,
ReturnType: ast.TypeBool,
Callback: func(args []interface{}) (interface{}, error) {
op := args[0].(ast.ArithmeticOp)
lhs := args[1].(bool)
rhs := args[2].(bool)
switch op {
case ast.ArithmeticOpEqual:
return lhs == rhs, nil
case ast.ArithmeticOpNotEqual:
return lhs != rhs, nil
default:
return nil, errors.New("invalid comparison operation")
}
},
}
}
func builtinFloatCompare() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeInt, ast.TypeFloat, ast.TypeFloat},
Variadic: false,
ReturnType: ast.TypeBool,
Callback: func(args []interface{}) (interface{}, error) {
op := args[0].(ast.ArithmeticOp)
lhs := args[1].(float64)
rhs := args[2].(float64)
switch op {
case ast.ArithmeticOpEqual:
return lhs == rhs, nil
case ast.ArithmeticOpNotEqual:
return lhs != rhs, nil
case ast.ArithmeticOpLessThan:
return lhs < rhs, nil
case ast.ArithmeticOpLessThanOrEqual:
return lhs <= rhs, nil
case ast.ArithmeticOpGreaterThan:
return lhs > rhs, nil
case ast.ArithmeticOpGreaterThanOrEqual:
return lhs >= rhs, nil
default:
return nil, errors.New("invalid comparison operation")
}
},
}
}
func builtinIntCompare() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeInt, ast.TypeInt, ast.TypeInt},
Variadic: false,
ReturnType: ast.TypeBool,
Callback: func(args []interface{}) (interface{}, error) {
op := args[0].(ast.ArithmeticOp)
lhs := args[1].(int)
rhs := args[2].(int)
switch op {
case ast.ArithmeticOpEqual:
return lhs == rhs, nil
case ast.ArithmeticOpNotEqual:
return lhs != rhs, nil
case ast.ArithmeticOpLessThan:
return lhs < rhs, nil
case ast.ArithmeticOpLessThanOrEqual:
return lhs <= rhs, nil
case ast.ArithmeticOpGreaterThan:
return lhs > rhs, nil
case ast.ArithmeticOpGreaterThanOrEqual:
return lhs >= rhs, nil
default:
return nil, errors.New("invalid comparison operation")
}
},
}
}
func builtinStringCompare() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeInt, ast.TypeString, ast.TypeString},
Variadic: false,
ReturnType: ast.TypeBool,
Callback: func(args []interface{}) (interface{}, error) {
op := args[0].(ast.ArithmeticOp)
lhs := args[1].(string)
rhs := args[2].(string)
switch op {
case ast.ArithmeticOpEqual:
return lhs == rhs, nil
case ast.ArithmeticOpNotEqual:
return lhs != rhs, nil
default:
return nil, errors.New("invalid comparison operation")
}
},
}
}
func builtinLogical() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeInt},
Variadic: true,
VariadicType: ast.TypeBool,
ReturnType: ast.TypeBool,
Callback: func(args []interface{}) (interface{}, error) {
op := args[0].(ast.ArithmeticOp)
result := args[1].(bool)
for _, raw := range args[2:] {
arg := raw.(bool)
switch op {
case ast.ArithmeticOpLogicalOr:
result = result || arg
case ast.ArithmeticOpLogicalAnd:
result = result && arg
default:
return nil, errors.New("invalid logical operator")
}
}
return result, nil
},
}
}
func builtinFloatToInt() ast.Function { func builtinFloatToInt() ast.Function {
return ast.Function{ return ast.Function{
ArgTypes: []ast.Type{ast.TypeFloat}, ArgTypes: []ast.Type{ast.TypeFloat},
@ -158,3 +304,28 @@ func builtinStringToFloat() ast.Function {
}, },
} }
} }
func builtinBoolToString() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeBool},
ReturnType: ast.TypeString,
Callback: func(args []interface{}) (interface{}, error) {
return strconv.FormatBool(args[0].(bool)), nil
},
}
}
func builtinStringToBool() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeString},
ReturnType: ast.TypeBool,
Callback: func(args []interface{}) (interface{}, error) {
v, err := strconv.ParseBool(args[0].(string))
if err != nil {
return nil, err
}
return v, nil
},
}
}

View File

@ -44,6 +44,12 @@ func (v *TypeCheck) Visit(root ast.Node) error {
defer v.lock.Unlock() defer v.lock.Unlock()
defer v.reset() defer v.reset()
root.Accept(v.visit) root.Accept(v.visit)
// If the resulting type is unknown, then just let the whole thing go.
if v.err == errExitUnknown {
v.err = nil
}
return v.err return v.err
} }
@ -61,6 +67,9 @@ func (v *TypeCheck) visit(raw ast.Node) ast.Node {
case *ast.Call: case *ast.Call:
tc := &typeCheckCall{n} tc := &typeCheckCall{n}
result, err = tc.TypeCheck(v) result, err = tc.TypeCheck(v)
case *ast.Conditional:
tc := &typeCheckConditional{n}
result, err = tc.TypeCheck(v)
case *ast.Index: case *ast.Index:
tc := &typeCheckIndex{n} tc := &typeCheckIndex{n}
result, err = tc.TypeCheck(v) result, err = tc.TypeCheck(v)
@ -103,6 +112,28 @@ func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) {
exprs[len(tc.n.Exprs)-1-i] = v.StackPop() exprs[len(tc.n.Exprs)-1-i] = v.StackPop()
} }
// If any operand is unknown then our result is automatically unknown
for _, ty := range exprs {
if ty == ast.TypeUnknown {
v.StackPush(ast.TypeUnknown)
return tc.n, nil
}
}
switch tc.n.Op {
case ast.ArithmeticOpLogicalAnd, ast.ArithmeticOpLogicalOr:
return tc.checkLogical(v, exprs)
case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual,
ast.ArithmeticOpLessThan, ast.ArithmeticOpGreaterThan,
ast.ArithmeticOpGreaterThanOrEqual, ast.ArithmeticOpLessThanOrEqual:
return tc.checkComparison(v, exprs)
default:
return tc.checkNumeric(v, exprs)
}
}
func (tc *typeCheckArithmetic) checkNumeric(v *TypeCheck, exprs []ast.Type) (ast.Node, error) {
// Determine the resulting type we want. We do this by going over // Determine the resulting type we want. We do this by going over
// every expression until we find one with a type we recognize. // every expression until we find one with a type we recognize.
// We do this because the first expr might be a string ("var.foo") // We do this because the first expr might be a string ("var.foo")
@ -110,20 +141,11 @@ func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) {
mathFunc := "__builtin_IntMath" mathFunc := "__builtin_IntMath"
mathType := ast.TypeInt mathType := ast.TypeInt
for _, v := range exprs { for _, v := range exprs {
exit := true // We assume int math but if we find ANY float, the entire
switch v { // expression turns into floating point math.
case ast.TypeInt: if v == ast.TypeFloat {
mathFunc = "__builtin_IntMath"
mathType = v
case ast.TypeFloat:
mathFunc = "__builtin_FloatMath" mathFunc = "__builtin_FloatMath"
mathType = v mathType = v
default:
exit = false
}
// We found the type, so leave
if exit {
break break
} }
} }
@ -167,6 +189,131 @@ func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) {
}, nil }, nil
} }
func (tc *typeCheckArithmetic) checkComparison(v *TypeCheck, exprs []ast.Type) (ast.Node, error) {
if len(exprs) != 2 {
// This should never happen, because the parser never produces
// nodes that violate this.
return nil, fmt.Errorf(
"comparison operators must have exactly two operands",
)
}
// The first operand always dictates the type for a comparison.
compareFunc := ""
compareType := exprs[0]
switch compareType {
case ast.TypeBool:
compareFunc = "__builtin_BoolCompare"
case ast.TypeFloat:
compareFunc = "__builtin_FloatCompare"
case ast.TypeInt:
compareFunc = "__builtin_IntCompare"
case ast.TypeString:
compareFunc = "__builtin_StringCompare"
default:
return nil, fmt.Errorf(
"comparison operators apply only to bool, float, int, and string",
)
}
// For non-equality comparisons, we will do implicit conversions to
// integer types if possible. In this case, we need to go through and
// determine the type of comparison we're doing to enable the implicit
// conversion.
if tc.n.Op != ast.ArithmeticOpEqual && tc.n.Op != ast.ArithmeticOpNotEqual {
compareFunc = "__builtin_IntCompare"
compareType = ast.TypeInt
for _, expr := range exprs {
if expr == ast.TypeFloat {
compareFunc = "__builtin_FloatCompare"
compareType = ast.TypeFloat
break
}
}
}
// Verify (and possibly, convert) the args
for i, arg := range exprs {
if arg != compareType {
cn := v.ImplicitConversion(exprs[i], compareType, tc.n.Exprs[i])
if cn != nil {
tc.n.Exprs[i] = cn
continue
}
return nil, fmt.Errorf(
"operand %d should be %s, got %s",
i+1, compareType, arg,
)
}
}
// Only ints and floats can have the <, >, <= and >= operators applied
switch tc.n.Op {
case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual:
// anything goes
default:
switch compareType {
case ast.TypeFloat, ast.TypeInt:
// fine
default:
return nil, fmt.Errorf(
"<, >, <= and >= may apply only to int and float values",
)
}
}
// Comparison operators always return bool
v.StackPush(ast.TypeBool)
// Replace our node with a call to the proper function. This isn't
// type checked but we already verified types.
args := make([]ast.Node, len(tc.n.Exprs)+1)
args[0] = &ast.LiteralNode{
Value: tc.n.Op,
Typex: ast.TypeInt,
Posx: tc.n.Pos(),
}
copy(args[1:], tc.n.Exprs)
return &ast.Call{
Func: compareFunc,
Args: args,
Posx: tc.n.Pos(),
}, nil
}
func (tc *typeCheckArithmetic) checkLogical(v *TypeCheck, exprs []ast.Type) (ast.Node, error) {
for i, t := range exprs {
if t != ast.TypeBool {
cn := v.ImplicitConversion(t, ast.TypeBool, tc.n.Exprs[i])
if cn == nil {
return nil, fmt.Errorf(
"logical operators require boolean operands, not %s",
t,
)
}
tc.n.Exprs[i] = cn
}
}
// Return type is always boolean
v.StackPush(ast.TypeBool)
// Arithmetic nodes are replaced with a call to a built-in function
args := make([]ast.Node, len(tc.n.Exprs)+1)
args[0] = &ast.LiteralNode{
Value: tc.n.Op,
Typex: ast.TypeInt,
Posx: tc.n.Pos(),
}
copy(args[1:], tc.n.Exprs)
return &ast.Call{
Func: "__builtin_Logical",
Args: args,
Posx: tc.n.Pos(),
}, nil
}
type typeCheckCall struct { type typeCheckCall struct {
n *ast.Call n *ast.Call
} }
@ -190,6 +337,11 @@ func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) {
continue continue
} }
if args[i] == ast.TypeUnknown {
v.StackPush(ast.TypeUnknown)
return tc.n, nil
}
if args[i] != expected { if args[i] != expected {
cn := v.ImplicitConversion(args[i], expected, tc.n.Args[i]) cn := v.ImplicitConversion(args[i], expected, tc.n.Args[i])
if cn != nil { if cn != nil {
@ -207,6 +359,11 @@ func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) {
if function.Variadic && function.VariadicType != ast.TypeAny { if function.Variadic && function.VariadicType != ast.TypeAny {
args = args[len(function.ArgTypes):] args = args[len(function.ArgTypes):]
for i, t := range args { for i, t := range args {
if t == ast.TypeUnknown {
v.StackPush(ast.TypeUnknown)
return tc.n, nil
}
if t != function.VariadicType { if t != function.VariadicType {
realI := i + len(function.ArgTypes) realI := i + len(function.ArgTypes)
cn := v.ImplicitConversion( cn := v.ImplicitConversion(
@ -230,6 +387,90 @@ func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) {
return tc.n, nil return tc.n, nil
} }
type typeCheckConditional struct {
n *ast.Conditional
}
func (tc *typeCheckConditional) TypeCheck(v *TypeCheck) (ast.Node, error) {
// On the stack we have the types of the condition, true and false
// expressions, but they are in reverse order.
falseType := v.StackPop()
trueType := v.StackPop()
condType := v.StackPop()
if condType == ast.TypeUnknown {
v.StackPush(ast.TypeUnknown)
return tc.n, nil
}
if condType != ast.TypeBool {
cn := v.ImplicitConversion(condType, ast.TypeBool, tc.n.CondExpr)
if cn == nil {
return nil, fmt.Errorf(
"condition must be type bool, not %s", condType.Printable(),
)
}
tc.n.CondExpr = cn
}
// The types of the true and false expression must match
if trueType != falseType && trueType != ast.TypeUnknown && falseType != ast.TypeUnknown {
// Since passing around stringified versions of other types is
// common, we pragmatically allow the false expression to dictate
// the result type when the true expression is a string.
if trueType == ast.TypeString {
cn := v.ImplicitConversion(trueType, falseType, tc.n.TrueExpr)
if cn == nil {
return nil, fmt.Errorf(
"true and false expression types must match; have %s and %s",
trueType.Printable(), falseType.Printable(),
)
}
tc.n.TrueExpr = cn
trueType = falseType
} else {
cn := v.ImplicitConversion(falseType, trueType, tc.n.FalseExpr)
if cn == nil {
return nil, fmt.Errorf(
"true and false expression types must match; have %s and %s",
trueType.Printable(), falseType.Printable(),
)
}
tc.n.FalseExpr = cn
falseType = trueType
}
}
// Currently list and map types cannot be used, because we cannot
// generally assert that their element types are consistent.
// Such support might be added later, either by improving the type
// system or restricting usage to only variable and literal expressions,
// but for now this is simply prohibited because it doesn't seem to
// be a common enough case to be worth the complexity.
switch trueType {
case ast.TypeList:
return nil, fmt.Errorf(
"conditional operator cannot be used with list values",
)
case ast.TypeMap:
return nil, fmt.Errorf(
"conditional operator cannot be used with map values",
)
}
// Result type (guaranteed to also match falseType due to the above)
if trueType == ast.TypeUnknown {
// falseType may also be unknown, but that's okay because two
// unknowns means our result is unknown anyway.
v.StackPush(falseType)
} else {
v.StackPush(trueType)
}
return tc.n, nil
}
type typeCheckOutput struct { type typeCheckOutput struct {
n *ast.Output n *ast.Output
} }
@ -241,20 +482,33 @@ func (tc *typeCheckOutput) TypeCheck(v *TypeCheck) (ast.Node, error) {
types[len(n.Exprs)-1-i] = v.StackPop() types[len(n.Exprs)-1-i] = v.StackPop()
} }
// If there is only one argument and it is a list, we evaluate to a list for _, ty := range types {
if len(types) == 1 && types[0] == ast.TypeList { if ty == ast.TypeUnknown {
v.StackPush(ast.TypeList) v.StackPush(ast.TypeUnknown)
return n, nil return tc.n, nil
}
} }
// If there is only one argument and it is a map, we evaluate to a map // If there is only one argument and it is a list, we evaluate to a list
if len(types) == 1 && types[0] == ast.TypeMap { if len(types) == 1 {
v.StackPush(ast.TypeMap) switch t := types[0]; t {
return n, nil case ast.TypeList:
fallthrough
case ast.TypeMap:
v.StackPush(t)
return n, nil
}
} }
// Otherwise, all concat args must be strings, so validate that // Otherwise, all concat args must be strings, so validate that
resultType := ast.TypeString
for i, t := range types { for i, t := range types {
if t == ast.TypeUnknown {
resultType = ast.TypeUnknown
continue
}
if t != ast.TypeString { if t != ast.TypeString {
cn := v.ImplicitConversion(t, ast.TypeString, n.Exprs[i]) cn := v.ImplicitConversion(t, ast.TypeString, n.Exprs[i])
if cn != nil { if cn != nil {
@ -267,8 +521,8 @@ func (tc *typeCheckOutput) TypeCheck(v *TypeCheck) (ast.Node, error) {
} }
} }
// This always results in type string // This always results in type string, unless there are unknowns
v.StackPush(ast.TypeString) v.StackPush(resultType)
return n, nil return n, nil
} }
@ -305,30 +559,40 @@ type typeCheckIndex struct {
} }
func (tc *typeCheckIndex) TypeCheck(v *TypeCheck) (ast.Node, error) { func (tc *typeCheckIndex) TypeCheck(v *TypeCheck) (ast.Node, error) {
keyType := v.StackPop()
targetType := v.StackPop()
if keyType == ast.TypeUnknown || targetType == ast.TypeUnknown {
v.StackPush(ast.TypeUnknown)
return tc.n, nil
}
// Ensure we have a VariableAccess as the target // Ensure we have a VariableAccess as the target
varAccessNode, ok := tc.n.Target.(*ast.VariableAccess) varAccessNode, ok := tc.n.Target.(*ast.VariableAccess)
if !ok { if !ok {
return nil, fmt.Errorf("target of an index must be a VariableAccess node, was %T", tc.n.Target) return nil, fmt.Errorf(
"target of an index must be a VariableAccess node, was %T", tc.n.Target)
} }
// Get the variable // Get the variable
variable, ok := v.Scope.LookupVar(varAccessNode.Name) variable, ok := v.Scope.LookupVar(varAccessNode.Name)
if !ok { if !ok {
return nil, fmt.Errorf("unknown variable accessed: %s", varAccessNode.Name) return nil, fmt.Errorf(
"unknown variable accessed: %s", varAccessNode.Name)
} }
keyType, err := tc.n.Key.Type(v.Scope) switch targetType {
if err != nil {
return nil, err
}
switch variable.Type {
case ast.TypeList: case ast.TypeList:
if keyType != ast.TypeInt { if keyType != ast.TypeInt {
return nil, fmt.Errorf("key of an index must be an int, was %s", keyType) tc.n.Key = v.ImplicitConversion(keyType, ast.TypeInt, tc.n.Key)
if tc.n.Key == nil {
return nil, fmt.Errorf(
"key of an index must be an int, was %s", keyType)
}
} }
valType, err := ast.VariableListElementTypesAreHomogenous(varAccessNode.Name, variable.Value.([]ast.Variable)) valType, err := ast.VariableListElementTypesAreHomogenous(
varAccessNode.Name, variable.Value.([]ast.Variable))
if err != nil { if err != nil {
return tc.n, err return tc.n, err
} }
@ -337,10 +601,15 @@ func (tc *typeCheckIndex) TypeCheck(v *TypeCheck) (ast.Node, error) {
return tc.n, nil return tc.n, nil
case ast.TypeMap: case ast.TypeMap:
if keyType != ast.TypeString { if keyType != ast.TypeString {
return nil, fmt.Errorf("key of an index must be a string, was %s", keyType) tc.n.Key = v.ImplicitConversion(keyType, ast.TypeString, tc.n.Key)
if tc.n.Key == nil {
return nil, fmt.Errorf(
"key of an index must be a string, was %s", keyType)
}
} }
valType, err := ast.VariableMapValueTypesAreHomogenous(varAccessNode.Name, variable.Value.(map[string]ast.Variable)) valType, err := ast.VariableMapValueTypesAreHomogenous(
varAccessNode.Name, variable.Value.(map[string]ast.Variable))
if err != nil { if err != nil {
return tc.n, err return tc.n, err
} }
@ -389,3 +658,11 @@ func (v *TypeCheck) StackPop() ast.Type {
x, v.Stack = v.Stack[len(v.Stack)-1], v.Stack[:len(v.Stack)-1] x, v.Stack = v.Stack[len(v.Stack)-1], v.Stack[:len(v.Stack)-1]
return x return x
} }
func (v *TypeCheck) StackPeek() ast.Type {
if len(v.Stack) == 0 {
return ast.TypeInvalid
}
return v.Stack[len(v.Stack)-1]
}

View File

@ -8,6 +8,11 @@ import (
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
) )
// UnknownValue is a sentinel value that can be used to denote
// that a value of a variable (or map element, list element, etc.)
// is unknown. This will always have the type ast.TypeUnknown.
const UnknownValue = "74D93920-ED26-11E3-AC10-0800200C9A66"
var hilMapstructureDecodeHookSlice []interface{} var hilMapstructureDecodeHookSlice []interface{}
var hilMapstructureDecodeHookStringSlice []string var hilMapstructureDecodeHookStringSlice []string
var hilMapstructureDecodeHookMap map[string]interface{} var hilMapstructureDecodeHookMap map[string]interface{}
@ -42,12 +47,33 @@ func hilMapstructureWeakDecode(m interface{}, rawVal interface{}) error {
} }
func InterfaceToVariable(input interface{}) (ast.Variable, error) { func InterfaceToVariable(input interface{}) (ast.Variable, error) {
if inputVariable, ok := input.(ast.Variable); ok { if iv, ok := input.(ast.Variable); ok {
return inputVariable, nil return iv, nil
}
// This is just to maintain backward compatibility
// after https://github.com/mitchellh/mapstructure/pull/98
if v, ok := input.([]ast.Variable); ok {
return ast.Variable{
Type: ast.TypeList,
Value: v,
}, nil
}
if v, ok := input.(map[string]ast.Variable); ok {
return ast.Variable{
Type: ast.TypeMap,
Value: v,
}, nil
} }
var stringVal string var stringVal string
if err := hilMapstructureWeakDecode(input, &stringVal); err == nil { if err := hilMapstructureWeakDecode(input, &stringVal); err == nil {
// Special case the unknown value to turn into "unknown"
if stringVal == UnknownValue {
return ast.Variable{Value: UnknownValue, Type: ast.TypeUnknown}, nil
}
// Otherwise return the string value
return ast.Variable{ return ast.Variable{
Type: ast.TypeString, Type: ast.TypeString,
Value: stringVal, Value: stringVal,

View File

@ -2,6 +2,7 @@ package hil
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"sync" "sync"
@ -23,19 +24,6 @@ type EvalConfig struct {
// semantic check on an AST tree. This will be called with the root node. // semantic check on an AST tree. This will be called with the root node.
type SemanticChecker func(ast.Node) error type SemanticChecker func(ast.Node) error
// EvalType represents the type of the output returned from a HIL
// evaluation.
type EvalType uint32
const (
TypeInvalid EvalType = 0
TypeString EvalType = 1 << iota
TypeList
TypeMap
)
//go:generate stringer -type=EvalType
// EvaluationResult is a struct returned from the hil.Eval function, // EvaluationResult is a struct returned from the hil.Eval function,
// representing the result of an interpolation. Results are returned in their // representing the result of an interpolation. Results are returned in their
// "natural" Go structure rather than in terms of the HIL AST. For the types // "natural" Go structure rather than in terms of the HIL AST. For the types
@ -45,6 +33,7 @@ const (
// TypeString: string // TypeString: string
// TypeList: []interface{} // TypeList: []interface{}
// TypeMap: map[string]interface{} // TypeMap: map[string]interface{}
// TypBool: bool
type EvaluationResult struct { type EvaluationResult struct {
Type EvalType Type EvalType
Value interface{} Value interface{}
@ -55,12 +44,24 @@ type EvaluationResult struct {
// The error is described out of band in the accompanying error return value. // The error is described out of band in the accompanying error return value.
var InvalidResult = EvaluationResult{Type: TypeInvalid, Value: nil} var InvalidResult = EvaluationResult{Type: TypeInvalid, Value: nil}
// errExitUnknown is an internal error that when returned means the result
// is an unknown value. We use this for early exit.
var errExitUnknown = errors.New("unknown value")
func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) { func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) {
output, outputType, err := internalEval(root, config) output, outputType, err := internalEval(root, config)
if err != nil { if err != nil {
return InvalidResult, err return InvalidResult, err
} }
// If the result contains any nested unknowns then the result as a whole
// is unknown, so that callers only have to deal with "entirely known"
// or "entirely unknown" as outcomes.
if ast.IsUnknown(ast.Variable{Type: outputType, Value: output}) {
outputType = ast.TypeUnknown
output = UnknownValue
}
switch outputType { switch outputType {
case ast.TypeList: case ast.TypeList:
val, err := VariableToInterface(ast.Variable{ val, err := VariableToInterface(ast.Variable{
@ -77,7 +78,7 @@ func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) {
Value: output, Value: output,
}) })
return EvaluationResult{ return EvaluationResult{
Type: TypeMap, Type: TypeMap,
Value: val, Value: val,
}, err }, err
case ast.TypeString: case ast.TypeString:
@ -85,6 +86,16 @@ func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) {
Type: TypeString, Type: TypeString,
Value: output, Value: output,
}, nil }, nil
case ast.TypeBool:
return EvaluationResult{
Type: TypeBool,
Value: output,
}, nil
case ast.TypeUnknown:
return EvaluationResult{
Type: TypeUnknown,
Value: UnknownValue,
}, nil
default: default:
return InvalidResult, fmt.Errorf("unknown type %s as interpolation output", outputType) return InvalidResult, fmt.Errorf("unknown type %s as interpolation output", outputType)
} }
@ -110,6 +121,10 @@ func internalEval(root ast.Node, config *EvalConfig) (interface{}, ast.Type, err
ast.TypeString: { ast.TypeString: {
ast.TypeInt: "__builtin_StringToInt", ast.TypeInt: "__builtin_StringToInt",
ast.TypeFloat: "__builtin_StringToFloat", ast.TypeFloat: "__builtin_StringToFloat",
ast.TypeBool: "__builtin_StringToBool",
},
ast.TypeBool: {
ast.TypeString: "__builtin_BoolToString",
}, },
} }
@ -167,6 +182,12 @@ func (v *evalVisitor) Visit(root ast.Node) (interface{}, ast.Type, error) {
result = new(ast.LiteralNode) result = new(ast.LiteralNode)
} }
resultErr := v.err resultErr := v.err
if resultErr == errExitUnknown {
// This means the return value is unknown and we used the error
// as an early exit mechanism. Reset since the value on the stack
// should be the unknown value.
resultErr = nil
}
// Clear everything else so we aren't just dangling // Clear everything else so we aren't just dangling
v.Stack.Reset() v.Stack.Reset()
@ -201,6 +222,13 @@ func (v *evalVisitor) visit(raw ast.Node) ast.Node {
Value: out, Value: out,
Typex: outType, Typex: outType,
}) })
if outType == ast.TypeUnknown {
// Halt immediately
v.err = errExitUnknown
return raw
}
return raw return raw
} }
@ -212,6 +240,8 @@ func evalNode(raw ast.Node) (EvalNode, error) {
return &evalIndex{n}, nil return &evalIndex{n}, nil
case *ast.Call: case *ast.Call:
return &evalCall{n}, nil return &evalCall{n}, nil
case *ast.Conditional:
return &evalConditional{n}, nil
case *ast.Output: case *ast.Output:
return &evalOutput{n}, nil return &evalOutput{n}, nil
case *ast.LiteralNode: case *ast.LiteralNode:
@ -242,6 +272,10 @@ func (v *evalCall) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, e
args := make([]interface{}, len(v.Args)) args := make([]interface{}, len(v.Args))
for i, _ := range v.Args { for i, _ := range v.Args {
node := stack.Pop().(*ast.LiteralNode) node := stack.Pop().(*ast.LiteralNode)
if node.IsUnknown() {
// If any arguments are unknown then the result is automatically unknown
return UnknownValue, ast.TypeUnknown, nil
}
args[len(v.Args)-1-i] = node.Value args[len(v.Args)-1-i] = node.Value
} }
@ -254,42 +288,56 @@ func (v *evalCall) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, e
return result, function.ReturnType, nil return result, function.ReturnType, nil
} }
type evalConditional struct{ *ast.Conditional }
func (v *evalConditional) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
// On the stack we have literal nodes representing the resulting values
// of the condition, true and false expressions, but they are in reverse
// order.
falseLit := stack.Pop().(*ast.LiteralNode)
trueLit := stack.Pop().(*ast.LiteralNode)
condLit := stack.Pop().(*ast.LiteralNode)
if condLit.IsUnknown() {
// If our conditional is unknown then our result is also unknown
return UnknownValue, ast.TypeUnknown, nil
}
if condLit.Value.(bool) {
return trueLit.Value, trueLit.Typex, nil
} else {
return falseLit.Value, trueLit.Typex, nil
}
}
type evalIndex struct{ *ast.Index } type evalIndex struct{ *ast.Index }
func (v *evalIndex) Eval(scope ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { func (v *evalIndex) Eval(scope ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
evalVarAccess, err := evalNode(v.Target) key := stack.Pop().(*ast.LiteralNode)
if err != nil { target := stack.Pop().(*ast.LiteralNode)
return nil, ast.TypeInvalid, err
}
target, targetType, err := evalVarAccess.Eval(scope, stack)
evalKey, err := evalNode(v.Key)
if err != nil {
return nil, ast.TypeInvalid, err
}
key, keyType, err := evalKey.Eval(scope, stack)
if err != nil {
return nil, ast.TypeInvalid, err
}
variableName := v.Index.Target.(*ast.VariableAccess).Name variableName := v.Index.Target.(*ast.VariableAccess).Name
switch targetType { if key.IsUnknown() {
// If our key is unknown then our result is also unknown
return UnknownValue, ast.TypeUnknown, nil
}
// For target, we'll accept collections containing unknown values but
// we still need to catch when the collection itself is unknown, shallowly.
if target.Typex == ast.TypeUnknown {
return UnknownValue, ast.TypeUnknown, nil
}
switch target.Typex {
case ast.TypeList: case ast.TypeList:
if keyType != ast.TypeInt { return v.evalListIndex(variableName, target.Value, key.Value)
return nil, ast.TypeInvalid, fmt.Errorf("key for indexing list %q must be an int, is %s", variableName, keyType)
}
return v.evalListIndex(variableName, target, key)
case ast.TypeMap: case ast.TypeMap:
if keyType != ast.TypeString { return v.evalMapIndex(variableName, target.Value, key.Value)
return nil, ast.TypeInvalid, fmt.Errorf("key for indexing map %q must be a string, is %s", variableName, keyType)
}
return v.evalMapIndex(variableName, target, key)
default: default:
return nil, ast.TypeInvalid, fmt.Errorf("target %q for indexing must be ast.TypeList or ast.TypeMap, is %s", variableName, targetType) return nil, ast.TypeInvalid, fmt.Errorf(
"target %q for indexing must be ast.TypeList or ast.TypeMap, is %s",
variableName, target.Typex)
} }
} }
@ -298,12 +346,14 @@ func (v *evalIndex) evalListIndex(variableName string, target interface{}, key i
// is a list and key is an int // is a list and key is an int
list, ok := target.([]ast.Variable) list, ok := target.([]ast.Variable)
if !ok { if !ok {
return nil, ast.TypeInvalid, fmt.Errorf("cannot cast target to []Variable") return nil, ast.TypeInvalid, fmt.Errorf(
"cannot cast target to []Variable, is: %T", target)
} }
keyInt, ok := key.(int) keyInt, ok := key.(int)
if !ok { if !ok {
return nil, ast.TypeInvalid, fmt.Errorf("cannot cast key to int") return nil, ast.TypeInvalid, fmt.Errorf(
"cannot cast key to int, is: %T", key)
} }
if len(list) == 0 { if len(list) == 0 {
@ -311,12 +361,13 @@ func (v *evalIndex) evalListIndex(variableName string, target interface{}, key i
} }
if keyInt < 0 || len(list) < keyInt+1 { if keyInt < 0 || len(list) < keyInt+1 {
return nil, ast.TypeInvalid, fmt.Errorf("index %d out of range for list %s (max %d)", keyInt, variableName, len(list)) return nil, ast.TypeInvalid, fmt.Errorf(
"index %d out of range for list %s (max %d)",
keyInt, variableName, len(list))
} }
returnVal := list[keyInt].Value returnVal := list[keyInt].Value
returnType := list[keyInt].Type returnType := list[keyInt].Type
return returnVal, returnType, nil return returnVal, returnType, nil
} }
@ -325,12 +376,14 @@ func (v *evalIndex) evalMapIndex(variableName string, target interface{}, key in
// is a map and key is a string // is a map and key is a string
vmap, ok := target.(map[string]ast.Variable) vmap, ok := target.(map[string]ast.Variable)
if !ok { if !ok {
return nil, ast.TypeInvalid, fmt.Errorf("cannot cast target to map[string]Variable") return nil, ast.TypeInvalid, fmt.Errorf(
"cannot cast target to map[string]Variable, is: %T", target)
} }
keyString, ok := key.(string) keyString, ok := key.(string)
if !ok { if !ok {
return nil, ast.TypeInvalid, fmt.Errorf("cannot cast key to string") return nil, ast.TypeInvalid, fmt.Errorf(
"cannot cast key to string, is: %T", key)
} }
if len(vmap) == 0 { if len(vmap) == 0 {
@ -339,7 +392,8 @@ func (v *evalIndex) evalMapIndex(variableName string, target interface{}, key in
value, ok := vmap[keyString] value, ok := vmap[keyString]
if !ok { if !ok {
return nil, ast.TypeInvalid, fmt.Errorf("key %q does not exist in map %s", keyString, variableName) return nil, ast.TypeInvalid, fmt.Errorf(
"key %q does not exist in map %s", keyString, variableName)
} }
return value.Value, value.Type, nil return value.Value, value.Type, nil
@ -351,21 +405,47 @@ func (v *evalOutput) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type,
// The expressions should all be on the stack in reverse // The expressions should all be on the stack in reverse
// order. So pop them off, reverse their order, and concatenate. // order. So pop them off, reverse their order, and concatenate.
nodes := make([]*ast.LiteralNode, 0, len(v.Exprs)) nodes := make([]*ast.LiteralNode, 0, len(v.Exprs))
haveUnknown := false
for range v.Exprs { for range v.Exprs {
nodes = append(nodes, stack.Pop().(*ast.LiteralNode)) n := stack.Pop().(*ast.LiteralNode)
nodes = append(nodes, n)
// If we have any unknowns then the whole result is unknown
// (we must deal with this first, because the type checker can
// skip type conversions in the presence of unknowns, and thus
// any of our other nodes may be incorrectly typed.)
if n.IsUnknown() {
haveUnknown = true
}
}
if haveUnknown {
return UnknownValue, ast.TypeUnknown, nil
} }
// Special case the single list and map // Special case the single list and map
if len(nodes) == 1 && nodes[0].Typex == ast.TypeList { if len(nodes) == 1 {
return nodes[0].Value, ast.TypeList, nil switch t := nodes[0].Typex; t {
} case ast.TypeList:
if len(nodes) == 1 && nodes[0].Typex == ast.TypeMap { fallthrough
return nodes[0].Value, ast.TypeMap, nil case ast.TypeMap:
fallthrough
case ast.TypeUnknown:
return nodes[0].Value, t, nil
}
} }
// Otherwise concatenate the strings // Otherwise concatenate the strings
var buf bytes.Buffer var buf bytes.Buffer
for i := len(nodes) - 1; i >= 0; i-- { for i := len(nodes) - 1; i >= 0; i-- {
if nodes[i].Typex != ast.TypeString {
return nil, ast.TypeInvalid, fmt.Errorf(
"invalid output with %s value at index %d: %#v",
nodes[i].Typex,
i,
nodes[i].Value,
)
}
buf.WriteString(nodes[i].Value.(string)) buf.WriteString(nodes[i].Value.(string))
} }

16
vendor/github.com/hashicorp/hil/eval_type.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
package hil
//go:generate stringer -type=EvalType eval_type.go
// EvalType represents the type of the output returned from a HIL
// evaluation.
type EvalType uint32
const (
TypeInvalid EvalType = 0
TypeString EvalType = 1 << iota
TypeBool
TypeList
TypeMap
TypeUnknown
)

View File

@ -1,4 +1,4 @@
// Code generated by "stringer -type=EvalType"; DO NOT EDIT // Code generated by "stringer -type=EvalType eval_type.go"; DO NOT EDIT
package hil package hil
@ -7,15 +7,19 @@ import "fmt"
const ( const (
_EvalType_name_0 = "TypeInvalid" _EvalType_name_0 = "TypeInvalid"
_EvalType_name_1 = "TypeString" _EvalType_name_1 = "TypeString"
_EvalType_name_2 = "TypeList" _EvalType_name_2 = "TypeBool"
_EvalType_name_3 = "TypeMap" _EvalType_name_3 = "TypeList"
_EvalType_name_4 = "TypeMap"
_EvalType_name_5 = "TypeUnknown"
) )
var ( var (
_EvalType_index_0 = [...]uint8{0, 11} _EvalType_index_0 = [...]uint8{0, 11}
_EvalType_index_1 = [...]uint8{0, 10} _EvalType_index_1 = [...]uint8{0, 10}
_EvalType_index_2 = [...]uint8{0, 8} _EvalType_index_2 = [...]uint8{0, 8}
_EvalType_index_3 = [...]uint8{0, 7} _EvalType_index_3 = [...]uint8{0, 8}
_EvalType_index_4 = [...]uint8{0, 7}
_EvalType_index_5 = [...]uint8{0, 11}
) )
func (i EvalType) String() string { func (i EvalType) String() string {
@ -28,6 +32,10 @@ func (i EvalType) String() string {
return _EvalType_name_2 return _EvalType_name_2
case i == 8: case i == 8:
return _EvalType_name_3 return _EvalType_name_3
case i == 16:
return _EvalType_name_4
case i == 32:
return _EvalType_name_5
default: default:
return fmt.Sprintf("EvalType(%d)", i) return fmt.Sprintf("EvalType(%d)", i)
} }

6
vendor/github.com/hashicorp/hil/go.mod generated vendored Normal file
View File

@ -0,0 +1,6 @@
module github.com/hashicorp/hil
require (
github.com/mitchellh/mapstructure v1.1.2
github.com/mitchellh/reflectwalk v1.0.0
)

4
vendor/github.com/hashicorp/hil/go.sum generated vendored Normal file
View File

@ -0,0 +1,4 @@
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=

View File

@ -1,196 +0,0 @@
// This is the yacc input for creating the parser for interpolation
// expressions in Go. To build it, just run `go generate` on this
// package, as the lexer has the go generate pragma within it.
%{
package hil
import (
"github.com/hashicorp/hil/ast"
)
%}
%union {
node ast.Node
nodeList []ast.Node
str string
token *parserToken
}
%token <str> PROGRAM_BRACKET_LEFT PROGRAM_BRACKET_RIGHT
%token <str> PROGRAM_STRING_START PROGRAM_STRING_END
%token <str> PAREN_LEFT PAREN_RIGHT COMMA
%token <str> SQUARE_BRACKET_LEFT SQUARE_BRACKET_RIGHT
%token <token> ARITH_OP IDENTIFIER INTEGER FLOAT STRING
%type <node> expr interpolation literal literalModeTop literalModeValue
%type <nodeList> args
%left ARITH_OP
%%
top:
{
parserResult = &ast.LiteralNode{
Value: "",
Typex: ast.TypeString,
Posx: ast.Pos{Column: 1, Line: 1},
}
}
| literalModeTop
{
parserResult = $1
// We want to make sure that the top value is always an Output
// so that the return value is always a string, list of map from an
// interpolation.
//
// The logic for checking for a LiteralNode is a little annoying
// because functionally the AST is the same, but we do that because
// it makes for an easy literal check later (to check if a string
// has any interpolations).
if _, ok := $1.(*ast.Output); !ok {
if n, ok := $1.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString {
parserResult = &ast.Output{
Exprs: []ast.Node{$1},
Posx: $1.Pos(),
}
}
}
}
literalModeTop:
literalModeValue
{
$$ = $1
}
| literalModeTop literalModeValue
{
var result []ast.Node
if c, ok := $1.(*ast.Output); ok {
result = append(c.Exprs, $2)
} else {
result = []ast.Node{$1, $2}
}
$$ = &ast.Output{
Exprs: result,
Posx: result[0].Pos(),
}
}
literalModeValue:
literal
{
$$ = $1
}
| interpolation
{
$$ = $1
}
interpolation:
PROGRAM_BRACKET_LEFT expr PROGRAM_BRACKET_RIGHT
{
$$ = $2
}
expr:
PAREN_LEFT expr PAREN_RIGHT
{
$$ = $2
}
| literalModeTop
{
$$ = $1
}
| INTEGER
{
$$ = &ast.LiteralNode{
Value: $1.Value.(int),
Typex: ast.TypeInt,
Posx: $1.Pos,
}
}
| FLOAT
{
$$ = &ast.LiteralNode{
Value: $1.Value.(float64),
Typex: ast.TypeFloat,
Posx: $1.Pos,
}
}
| ARITH_OP expr
{
// This is REALLY jank. We assume that a singular ARITH_OP
// means 0 ARITH_OP expr, which... is weird. We don't want to
// support *, /, etc., only -. We should fix this later with a pure
// Go scanner/parser.
if $1.Value.(ast.ArithmeticOp) != ast.ArithmeticOpSub {
panic("Unary - is only allowed")
}
$$ = &ast.Arithmetic{
Op: $1.Value.(ast.ArithmeticOp),
Exprs: []ast.Node{
&ast.LiteralNode{Value: 0, Typex: ast.TypeInt},
$2,
},
Posx: $2.Pos(),
}
}
| expr ARITH_OP expr
{
$$ = &ast.Arithmetic{
Op: $2.Value.(ast.ArithmeticOp),
Exprs: []ast.Node{$1, $3},
Posx: $1.Pos(),
}
}
| IDENTIFIER
{
$$ = &ast.VariableAccess{Name: $1.Value.(string), Posx: $1.Pos}
}
| IDENTIFIER PAREN_LEFT args PAREN_RIGHT
{
$$ = &ast.Call{Func: $1.Value.(string), Args: $3, Posx: $1.Pos}
}
| IDENTIFIER SQUARE_BRACKET_LEFT expr SQUARE_BRACKET_RIGHT
{
$$ = &ast.Index{
Target: &ast.VariableAccess{
Name: $1.Value.(string),
Posx: $1.Pos,
},
Key: $3,
Posx: $1.Pos,
}
}
args:
{
$$ = nil
}
| args COMMA expr
{
$$ = append($1, $3)
}
| expr
{
$$ = append($$, $1)
}
literal:
STRING
{
$$ = &ast.LiteralNode{
Value: $1.Value.(string),
Typex: ast.TypeString,
Posx: $1.Pos,
}
}
%%

View File

@ -1,407 +0,0 @@
package hil
import (
"bytes"
"fmt"
"strconv"
"unicode"
"unicode/utf8"
"github.com/hashicorp/hil/ast"
)
//go:generate go tool yacc -p parser lang.y
// The parser expects the lexer to return 0 on EOF.
const lexEOF = 0
// The parser uses the type <prefix>Lex as a lexer. It must provide
// the methods Lex(*<prefix>SymType) int and Error(string).
type parserLex struct {
Err error
Input string
mode parserMode
interpolationDepth int
pos int
width int
col, line int
lastLine int
astPos *ast.Pos
}
// parserToken is the token yielded to the parser. The value can be
// determined within the parser type based on the enum value returned
// from Lex.
type parserToken struct {
Value interface{}
Pos ast.Pos
}
// parserMode keeps track of what mode we're in for the parser. We have
// two modes: literal and interpolation. Literal mode is when strings
// don't have to be quoted, and interpolations are defined as ${foo}.
// Interpolation mode means that strings have to be quoted and unquoted
// things are identifiers, such as foo("bar").
type parserMode uint8
const (
parserModeInvalid parserMode = 0
parserModeLiteral = 1 << iota
parserModeInterpolation
)
// The parser calls this method to get each new token.
func (x *parserLex) Lex(yylval *parserSymType) int {
// We always start in literal mode, since programs don't start
// in an interpolation. ex. "foo ${bar}" vs "bar" (and assuming interp.)
if x.mode == parserModeInvalid {
x.mode = parserModeLiteral
}
// Defer an update to set the proper column/line we read the next token.
defer func() {
if yylval.token != nil && yylval.token.Pos.Column == 0 {
yylval.token.Pos = *x.astPos
}
}()
x.astPos = nil
return x.lex(yylval)
}
func (x *parserLex) lex(yylval *parserSymType) int {
switch x.mode {
case parserModeLiteral:
return x.lexModeLiteral(yylval)
case parserModeInterpolation:
return x.lexModeInterpolation(yylval)
default:
x.Error(fmt.Sprintf("Unknown parse mode: %d", x.mode))
return lexEOF
}
}
func (x *parserLex) lexModeLiteral(yylval *parserSymType) int {
for {
c := x.next()
if c == lexEOF {
return lexEOF
}
// Are we starting an interpolation?
if c == '$' && x.peek() == '{' {
x.next()
x.interpolationDepth++
x.mode = parserModeInterpolation
return PROGRAM_BRACKET_LEFT
}
// We're just a normal string that isn't part of any interpolation yet.
x.backup()
result, terminated := x.lexString(yylval, x.interpolationDepth > 0)
// If the string terminated and we're within an interpolation already
// then that means that we finished a nested string, so pop
// back out to interpolation mode.
if terminated && x.interpolationDepth > 0 {
x.mode = parserModeInterpolation
// If the string is empty, just skip it. We're still in
// an interpolation so we do this to avoid empty nodes.
if yylval.token.Value.(string) == "" {
return x.lex(yylval)
}
}
return result
}
}
func (x *parserLex) lexModeInterpolation(yylval *parserSymType) int {
for {
c := x.next()
if c == lexEOF {
return lexEOF
}
// Ignore all whitespace
if unicode.IsSpace(c) {
continue
}
// If we see a double quote then we're lexing a string since
// we're in interpolation mode.
if c == '"' {
result, terminated := x.lexString(yylval, true)
if !terminated {
// The string didn't end, which means that we're in the
// middle of starting another interpolation.
x.mode = parserModeLiteral
// If the string is empty and we're starting an interpolation,
// then just skip it to avoid empty string AST nodes
if yylval.token.Value.(string) == "" {
return x.lex(yylval)
}
}
return result
}
// If we are seeing a number, it is the start of a number. Lex it.
if c >= '0' && c <= '9' {
x.backup()
return x.lexNumber(yylval)
}
switch c {
case '}':
// '}' means we ended the interpolation. Pop back into
// literal mode and reduce our interpolation depth.
x.interpolationDepth--
x.mode = parserModeLiteral
return PROGRAM_BRACKET_RIGHT
case '(':
return PAREN_LEFT
case ')':
return PAREN_RIGHT
case '[':
return SQUARE_BRACKET_LEFT
case ']':
return SQUARE_BRACKET_RIGHT
case ',':
return COMMA
case '+':
yylval.token = &parserToken{Value: ast.ArithmeticOpAdd}
return ARITH_OP
case '-':
yylval.token = &parserToken{Value: ast.ArithmeticOpSub}
return ARITH_OP
case '*':
yylval.token = &parserToken{Value: ast.ArithmeticOpMul}
return ARITH_OP
case '/':
yylval.token = &parserToken{Value: ast.ArithmeticOpDiv}
return ARITH_OP
case '%':
yylval.token = &parserToken{Value: ast.ArithmeticOpMod}
return ARITH_OP
default:
x.backup()
return x.lexId(yylval)
}
}
}
func (x *parserLex) lexId(yylval *parserSymType) int {
var b bytes.Buffer
var last rune
for {
c := x.next()
if c == lexEOF {
break
}
// We only allow * after a '.' for resource splast: type.name.*.id
// Otherwise, its probably multiplication.
if c == '*' && last != '.' {
x.backup()
break
}
// If this isn't a character we want in an ID, return out.
// One day we should make this a regexp.
if c != '_' &&
c != '-' &&
c != '.' &&
c != '*' &&
!unicode.IsLetter(c) &&
!unicode.IsNumber(c) {
x.backup()
break
}
if _, err := b.WriteRune(c); err != nil {
x.Error(err.Error())
return lexEOF
}
last = c
}
yylval.token = &parserToken{Value: b.String()}
return IDENTIFIER
}
// lexNumber lexes out a number: an integer or a float.
func (x *parserLex) lexNumber(yylval *parserSymType) int {
var b bytes.Buffer
gotPeriod := false
for {
c := x.next()
if c == lexEOF {
break
}
// If we see a period, we might be getting a float..
if c == '.' {
// If we've already seen a period, then ignore it, and
// exit. This will probably result in a syntax error later.
if gotPeriod {
x.backup()
break
}
gotPeriod = true
} else if c < '0' || c > '9' {
// If we're not seeing a number, then also exit.
x.backup()
break
}
if _, err := b.WriteRune(c); err != nil {
x.Error(fmt.Sprintf("internal error: %s", err))
return lexEOF
}
}
// If we didn't see a period, it is an int
if !gotPeriod {
v, err := strconv.ParseInt(b.String(), 0, 0)
if err != nil {
x.Error(fmt.Sprintf("expected number: %s", err))
return lexEOF
}
yylval.token = &parserToken{Value: int(v)}
return INTEGER
}
// If we did see a period, it is a float
f, err := strconv.ParseFloat(b.String(), 64)
if err != nil {
x.Error(fmt.Sprintf("expected float: %s", err))
return lexEOF
}
yylval.token = &parserToken{Value: f}
return FLOAT
}
func (x *parserLex) lexString(yylval *parserSymType, quoted bool) (int, bool) {
var b bytes.Buffer
terminated := false
for {
c := x.next()
if c == lexEOF {
if quoted {
x.Error("unterminated string")
}
break
}
// Behavior is a bit different if we're lexing within a quoted string.
if quoted {
// If its a double quote, we've reached the end of the string
if c == '"' {
terminated = true
break
}
// Let's check to see if we're escaping anything.
if c == '\\' {
switch n := x.next(); n {
case '\\', '"':
c = n
case 'n':
c = '\n'
default:
x.backup()
}
}
}
// If we hit a dollar sign, then check if we're starting
// another interpolation. If so, then we're done.
if c == '$' {
n := x.peek()
// If it is '{', then we're starting another interpolation
if n == '{' {
x.backup()
break
}
// If it is '$', then we're escaping a dollar sign
if n == '$' {
x.next()
}
}
if _, err := b.WriteRune(c); err != nil {
x.Error(err.Error())
return lexEOF, false
}
}
yylval.token = &parserToken{Value: b.String()}
return STRING, terminated
}
// Return the next rune for the lexer.
func (x *parserLex) next() rune {
if int(x.pos) >= len(x.Input) {
x.width = 0
return lexEOF
}
r, w := utf8.DecodeRuneInString(x.Input[x.pos:])
x.width = w
x.pos += x.width
if x.line == 0 {
x.line = 1
x.col = 1
} else {
x.col += 1
}
if r == '\n' {
x.lastLine = x.col
x.line += 1
x.col = 1
}
if x.astPos == nil {
x.astPos = &ast.Pos{Column: x.col, Line: x.line}
}
return r
}
// peek returns but does not consume the next rune in the input
func (x *parserLex) peek() rune {
r := x.next()
x.backup()
return r
}
// backup steps back one rune. Can only be called once per next.
func (x *parserLex) backup() {
x.pos -= x.width
x.col -= 1
// If we are at column 0, we're backing up across a line boundary
// so we need to be careful to get the proper value.
if x.col == 0 {
x.col = x.lastLine
x.line -= 1
}
}
// The parser calls this method on a parse error.
func (x *parserLex) Error(s string) {
x.Err = fmt.Errorf("parse error: %s", s)
}

View File

@ -1,30 +1,29 @@
package hil package hil
import ( import (
"sync"
"github.com/hashicorp/hil/ast" "github.com/hashicorp/hil/ast"
"github.com/hashicorp/hil/parser"
"github.com/hashicorp/hil/scanner"
) )
var parserLock sync.Mutex
var parserResult ast.Node
// Parse parses the given program and returns an executable AST tree. // Parse parses the given program and returns an executable AST tree.
//
// Syntax errors are returned with error having the dynamic type
// *parser.ParseError, which gives the caller access to the source position
// where the error was found, which allows (for example) combining it with
// a known source filename to add context to the error message.
func Parse(v string) (ast.Node, error) { func Parse(v string) (ast.Node, error) {
// Unfortunately due to the way that goyacc generated parsers are return ParseWithPosition(v, ast.Pos{Line: 1, Column: 1})
// formatted, we can only do a single parse at a time without a lot }
// of extra work. In the future we can remove this limitation.
parserLock.Lock() // ParseWithPosition is like Parse except that it overrides the source
defer parserLock.Unlock() // row and column position of the first character in the string, which should
// be 1-based.
// Reset our globals //
parserResult = nil // This can be used when HIL is embedded in another language and the outer
// parser knows the row and column where the HIL expression started within
// Create the lexer // the overall source file.
lex := &parserLex{Input: v} func ParseWithPosition(v string, pos ast.Pos) (ast.Node, error) {
ch := scanner.Scan(v, pos)
// Parse! return parser.Parse(ch)
parserParse(lex)
return parserResult, lex.Err
} }

45
vendor/github.com/hashicorp/hil/parser/binary_op.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package parser
import (
"github.com/hashicorp/hil/ast"
"github.com/hashicorp/hil/scanner"
)
var binaryOps []map[scanner.TokenType]ast.ArithmeticOp
func init() {
// This operation table maps from the operator's scanner token type
// to the AST arithmetic operation. All expressions produced from
// binary operators are *ast.Arithmetic nodes.
//
// Binary operator groups are listed in order of precedence, with
// the *lowest* precedence first. Operators within the same group
// have left-to-right associativity.
binaryOps = []map[scanner.TokenType]ast.ArithmeticOp{
{
scanner.OR: ast.ArithmeticOpLogicalOr,
},
{
scanner.AND: ast.ArithmeticOpLogicalAnd,
},
{
scanner.EQUAL: ast.ArithmeticOpEqual,
scanner.NOTEQUAL: ast.ArithmeticOpNotEqual,
},
{
scanner.GT: ast.ArithmeticOpGreaterThan,
scanner.GTE: ast.ArithmeticOpGreaterThanOrEqual,
scanner.LT: ast.ArithmeticOpLessThan,
scanner.LTE: ast.ArithmeticOpLessThanOrEqual,
},
{
scanner.PLUS: ast.ArithmeticOpAdd,
scanner.MINUS: ast.ArithmeticOpSub,
},
{
scanner.STAR: ast.ArithmeticOpMul,
scanner.SLASH: ast.ArithmeticOpDiv,
scanner.PERCENT: ast.ArithmeticOpMod,
},
}
}

38
vendor/github.com/hashicorp/hil/parser/error.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
package parser
import (
"fmt"
"github.com/hashicorp/hil/ast"
"github.com/hashicorp/hil/scanner"
)
type ParseError struct {
Message string
Pos ast.Pos
}
func Errorf(pos ast.Pos, format string, args ...interface{}) error {
return &ParseError{
Message: fmt.Sprintf(format, args...),
Pos: pos,
}
}
// TokenErrorf is a convenient wrapper around Errorf that uses the
// position of the given token.
func TokenErrorf(token *scanner.Token, format string, args ...interface{}) error {
return Errorf(token.Pos, format, args...)
}
func ExpectationError(wanted string, got *scanner.Token) error {
return TokenErrorf(got, "expected %s but found %s", wanted, got)
}
func (e *ParseError) Error() string {
return fmt.Sprintf("parse error at %s: %s", e.Pos, e.Message)
}
func (e *ParseError) String() string {
return e.Error()
}

28
vendor/github.com/hashicorp/hil/parser/fuzz.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
// +build gofuzz
package parser
import (
"github.com/hashicorp/hil/ast"
"github.com/hashicorp/hil/scanner"
)
// This is a fuzz testing function designed to be used with go-fuzz:
// https://github.com/dvyukov/go-fuzz
//
// It's not included in a normal build due to the gofuzz build tag above.
//
// There are some input files that you can use as a seed corpus for go-fuzz
// in the directory ./fuzz-corpus .
func Fuzz(data []byte) int {
str := string(data)
ch := scanner.Scan(str, ast.Pos{Line: 1, Column: 1})
_, err := Parse(ch)
if err != nil {
return 0
}
return 1
}

522
vendor/github.com/hashicorp/hil/parser/parser.go generated vendored Normal file
View File

@ -0,0 +1,522 @@
package parser
import (
"strconv"
"unicode/utf8"
"github.com/hashicorp/hil/ast"
"github.com/hashicorp/hil/scanner"
)
func Parse(ch <-chan *scanner.Token) (ast.Node, error) {
peeker := scanner.NewPeeker(ch)
parser := &parser{peeker}
output, err := parser.ParseTopLevel()
peeker.Close()
return output, err
}
type parser struct {
peeker *scanner.Peeker
}
func (p *parser) ParseTopLevel() (ast.Node, error) {
return p.parseInterpolationSeq(false)
}
func (p *parser) ParseQuoted() (ast.Node, error) {
return p.parseInterpolationSeq(true)
}
// parseInterpolationSeq parses either the top-level sequence of literals
// and interpolation expressions or a similar sequence within a quoted
// string inside an interpolation expression. The latter case is requested
// by setting 'quoted' to true.
func (p *parser) parseInterpolationSeq(quoted bool) (ast.Node, error) {
literalType := scanner.LITERAL
endType := scanner.EOF
if quoted {
// exceptions for quoted sequences
literalType = scanner.STRING
endType = scanner.CQUOTE
}
startPos := p.peeker.Peek().Pos
if quoted {
tok := p.peeker.Read()
if tok.Type != scanner.OQUOTE {
return nil, ExpectationError("open quote", tok)
}
}
var exprs []ast.Node
for {
tok := p.peeker.Read()
if tok.Type == endType {
break
}
switch tok.Type {
case literalType:
val, err := p.parseStringToken(tok)
if err != nil {
return nil, err
}
exprs = append(exprs, &ast.LiteralNode{
Value: val,
Typex: ast.TypeString,
Posx: tok.Pos,
})
case scanner.BEGIN:
expr, err := p.ParseInterpolation()
if err != nil {
return nil, err
}
exprs = append(exprs, expr)
default:
return nil, ExpectationError(`"${"`, tok)
}
}
if len(exprs) == 0 {
// If we have no parts at all then the input must've
// been an empty string.
exprs = append(exprs, &ast.LiteralNode{
Value: "",
Typex: ast.TypeString,
Posx: startPos,
})
}
// As a special case, if our "Output" contains only one expression
// and it's a literal string then we'll hoist it up to be our
// direct return value, so callers can easily recognize a string
// that has no interpolations at all.
if len(exprs) == 1 {
if lit, ok := exprs[0].(*ast.LiteralNode); ok {
if lit.Typex == ast.TypeString {
return lit, nil
}
}
}
return &ast.Output{
Exprs: exprs,
Posx: startPos,
}, nil
}
// parseStringToken takes a token of either LITERAL or STRING type and
// returns the interpreted string, after processing any relevant
// escape sequences.
func (p *parser) parseStringToken(tok *scanner.Token) (string, error) {
var backslashes bool
switch tok.Type {
case scanner.LITERAL:
backslashes = false
case scanner.STRING:
backslashes = true
default:
panic("unsupported string token type")
}
raw := []byte(tok.Content)
buf := make([]byte, 0, len(raw))
for i := 0; i < len(raw); i++ {
b := raw[i]
more := len(raw) > (i + 1)
if b == '$' {
if more && raw[i+1] == '$' {
// skip over the second dollar sign
i++
}
} else if backslashes && b == '\\' {
if !more {
return "", Errorf(
ast.Pos{
Column: tok.Pos.Column + utf8.RuneCount(raw[:i]),
Line: tok.Pos.Line,
},
`unfinished backslash escape sequence`,
)
}
escapeType := raw[i+1]
switch escapeType {
case '\\':
// skip over the second slash
i++
case 'n':
b = '\n'
i++
case '"':
b = '"'
i++
default:
return "", Errorf(
ast.Pos{
Column: tok.Pos.Column + utf8.RuneCount(raw[:i]),
Line: tok.Pos.Line,
},
`invalid backslash escape sequence`,
)
}
}
buf = append(buf, b)
}
return string(buf), nil
}
func (p *parser) ParseInterpolation() (ast.Node, error) {
// By the time we're called, we're already "inside" the ${ sequence
// because the caller consumed the ${ token.
expr, err := p.ParseExpression()
if err != nil {
return nil, err
}
err = p.requireTokenType(scanner.END, `"}"`)
if err != nil {
return nil, err
}
return expr, nil
}
func (p *parser) ParseExpression() (ast.Node, error) {
return p.parseTernaryCond()
}
func (p *parser) parseTernaryCond() (ast.Node, error) {
// The ternary condition operator (.. ? .. : ..) behaves somewhat
// like a binary operator except that the "operator" is itself
// an expression enclosed in two punctuation characters.
// The middle expression is parsed as if the ? and : symbols
// were parentheses. The "rhs" (the "false expression") is then
// treated right-associatively so it behaves similarly to the
// middle in terms of precedence.
startPos := p.peeker.Peek().Pos
var cond, trueExpr, falseExpr ast.Node
var err error
cond, err = p.parseBinaryOps(binaryOps)
if err != nil {
return nil, err
}
next := p.peeker.Peek()
if next.Type != scanner.QUESTION {
return cond, nil
}
p.peeker.Read() // eat question mark
trueExpr, err = p.ParseExpression()
if err != nil {
return nil, err
}
colon := p.peeker.Read()
if colon.Type != scanner.COLON {
return nil, ExpectationError(":", colon)
}
falseExpr, err = p.ParseExpression()
if err != nil {
return nil, err
}
return &ast.Conditional{
CondExpr: cond,
TrueExpr: trueExpr,
FalseExpr: falseExpr,
Posx: startPos,
}, nil
}
// parseBinaryOps calls itself recursively to work through all of the
// operator precedence groups, and then eventually calls ParseExpressionTerm
// for each operand.
func (p *parser) parseBinaryOps(ops []map[scanner.TokenType]ast.ArithmeticOp) (ast.Node, error) {
if len(ops) == 0 {
// We've run out of operators, so now we'll just try to parse a term.
return p.ParseExpressionTerm()
}
thisLevel := ops[0]
remaining := ops[1:]
startPos := p.peeker.Peek().Pos
var lhs, rhs ast.Node
operator := ast.ArithmeticOpInvalid
var err error
// parse a term that might be the first operand of a binary
// expression or it might just be a standalone term, but
// we won't know until we've parsed it and can look ahead
// to see if there's an operator token.
lhs, err = p.parseBinaryOps(remaining)
if err != nil {
return nil, err
}
// We'll keep eating up arithmetic operators until we run
// out, so that operators with the same precedence will combine in a
// left-associative manner:
// a+b+c => (a+b)+c, not a+(b+c)
//
// Should we later want to have right-associative operators, a way
// to achieve that would be to call back up to ParseExpression here
// instead of iteratively parsing only the remaining operators.
for {
next := p.peeker.Peek()
var newOperator ast.ArithmeticOp
var ok bool
if newOperator, ok = thisLevel[next.Type]; !ok {
break
}
// Are we extending an expression started on
// the previous iteration?
if operator != ast.ArithmeticOpInvalid {
lhs = &ast.Arithmetic{
Op: operator,
Exprs: []ast.Node{lhs, rhs},
Posx: startPos,
}
}
operator = newOperator
p.peeker.Read() // eat operator token
rhs, err = p.parseBinaryOps(remaining)
if err != nil {
return nil, err
}
}
if operator != ast.ArithmeticOpInvalid {
return &ast.Arithmetic{
Op: operator,
Exprs: []ast.Node{lhs, rhs},
Posx: startPos,
}, nil
} else {
return lhs, nil
}
}
func (p *parser) ParseExpressionTerm() (ast.Node, error) {
next := p.peeker.Peek()
switch next.Type {
case scanner.OPAREN:
p.peeker.Read()
expr, err := p.ParseExpression()
if err != nil {
return nil, err
}
err = p.requireTokenType(scanner.CPAREN, `")"`)
return expr, err
case scanner.OQUOTE:
return p.ParseQuoted()
case scanner.INTEGER:
tok := p.peeker.Read()
val, err := strconv.Atoi(tok.Content)
if err != nil {
return nil, TokenErrorf(tok, "invalid integer: %s", err)
}
return &ast.LiteralNode{
Value: val,
Typex: ast.TypeInt,
Posx: tok.Pos,
}, nil
case scanner.FLOAT:
tok := p.peeker.Read()
val, err := strconv.ParseFloat(tok.Content, 64)
if err != nil {
return nil, TokenErrorf(tok, "invalid float: %s", err)
}
return &ast.LiteralNode{
Value: val,
Typex: ast.TypeFloat,
Posx: tok.Pos,
}, nil
case scanner.BOOL:
tok := p.peeker.Read()
// the scanner guarantees that tok.Content is either "true" or "false"
var val bool
if tok.Content[0] == 't' {
val = true
} else {
val = false
}
return &ast.LiteralNode{
Value: val,
Typex: ast.TypeBool,
Posx: tok.Pos,
}, nil
case scanner.MINUS:
opTok := p.peeker.Read()
// important to use ParseExpressionTerm rather than ParseExpression
// here, otherwise we can capture a following binary expression into
// our negation.
// e.g. -46+5 should parse as (0-46)+5, not 0-(46+5)
operand, err := p.ParseExpressionTerm()
if err != nil {
return nil, err
}
// The AST currently represents negative numbers as
// a binary subtraction of the number from zero.
return &ast.Arithmetic{
Op: ast.ArithmeticOpSub,
Exprs: []ast.Node{
&ast.LiteralNode{
Value: 0,
Typex: ast.TypeInt,
Posx: opTok.Pos,
},
operand,
},
Posx: opTok.Pos,
}, nil
case scanner.BANG:
opTok := p.peeker.Read()
// important to use ParseExpressionTerm rather than ParseExpression
// here, otherwise we can capture a following binary expression into
// our negation.
operand, err := p.ParseExpressionTerm()
if err != nil {
return nil, err
}
// The AST currently represents binary negation as an equality
// test with "false".
return &ast.Arithmetic{
Op: ast.ArithmeticOpEqual,
Exprs: []ast.Node{
&ast.LiteralNode{
Value: false,
Typex: ast.TypeBool,
Posx: opTok.Pos,
},
operand,
},
Posx: opTok.Pos,
}, nil
case scanner.IDENTIFIER:
return p.ParseScopeInteraction()
default:
return nil, ExpectationError("expression", next)
}
}
// ParseScopeInteraction parses the expression types that interact
// with the evaluation scope: variable access, function calls, and
// indexing.
//
// Indexing should actually be a distinct operator in its own right,
// so that e.g. it can be applied to the result of a function call,
// but for now we're preserving the behavior of the older yacc-based
// parser.
func (p *parser) ParseScopeInteraction() (ast.Node, error) {
first := p.peeker.Read()
startPos := first.Pos
if first.Type != scanner.IDENTIFIER {
return nil, ExpectationError("identifier", first)
}
next := p.peeker.Peek()
if next.Type == scanner.OPAREN {
// function call
funcName := first.Content
p.peeker.Read() // eat paren
var args []ast.Node
for {
if p.peeker.Peek().Type == scanner.CPAREN {
break
}
arg, err := p.ParseExpression()
if err != nil {
return nil, err
}
args = append(args, arg)
if p.peeker.Peek().Type == scanner.COMMA {
p.peeker.Read() // eat comma
continue
} else {
break
}
}
err := p.requireTokenType(scanner.CPAREN, `")"`)
if err != nil {
return nil, err
}
return &ast.Call{
Func: funcName,
Args: args,
Posx: startPos,
}, nil
}
varNode := &ast.VariableAccess{
Name: first.Content,
Posx: startPos,
}
if p.peeker.Peek().Type == scanner.OBRACKET {
// index operator
startPos := p.peeker.Read().Pos // eat bracket
indexExpr, err := p.ParseExpression()
if err != nil {
return nil, err
}
err = p.requireTokenType(scanner.CBRACKET, `"]"`)
if err != nil {
return nil, err
}
return &ast.Index{
Target: varNode,
Key: indexExpr,
Posx: startPos,
}, nil
}
return varNode, nil
}
// requireTokenType consumes the next token an returns an error if its
// type does not match the given type. nil is returned if the type matches.
//
// This is a helper around peeker.Read() for situations where the parser just
// wants to assert that a particular token type must be present.
func (p *parser) requireTokenType(wantType scanner.TokenType, wantName string) error {
token := p.peeker.Read()
if token.Type != wantType {
return ExpectationError(wantName, token)
}
return nil
}

55
vendor/github.com/hashicorp/hil/scanner/peeker.go generated vendored Normal file
View File

@ -0,0 +1,55 @@
package scanner
// Peeker is a utility that wraps a token channel returned by Scan and
// provides an interface that allows a caller (e.g. the parser) to
// work with the token stream in a mode that allows one token of lookahead,
// and provides utilities for more convenient processing of the stream.
type Peeker struct {
ch <-chan *Token
peeked *Token
}
func NewPeeker(ch <-chan *Token) *Peeker {
return &Peeker{
ch: ch,
}
}
// Peek returns the next token in the stream without consuming it. A
// subsequent call to Read will return the same token.
func (p *Peeker) Peek() *Token {
if p.peeked == nil {
p.peeked = <-p.ch
}
return p.peeked
}
// Read consumes the next token in the stream and returns it.
func (p *Peeker) Read() *Token {
token := p.Peek()
// As a special case, we will produce the EOF token forever once
// it is reached.
if token.Type != EOF {
p.peeked = nil
}
return token
}
// Close ensures that the token stream has been exhausted, to prevent
// the goroutine in the underlying scanner from leaking.
//
// It's not necessary to call this if the caller reads the token stream
// to EOF, since that implicitly closes the scanner.
func (p *Peeker) Close() {
for _ = range p.ch {
// discard
}
// Install a synthetic EOF token in 'peeked' in case someone
// erroneously calls Peek() or Read() after we've closed.
p.peeked = &Token{
Type: EOF,
Content: "",
}
}

556
vendor/github.com/hashicorp/hil/scanner/scanner.go generated vendored Normal file
View File

@ -0,0 +1,556 @@
package scanner
import (
"unicode"
"unicode/utf8"
"github.com/hashicorp/hil/ast"
)
// Scan returns a channel that recieves Tokens from the given input string.
//
// The scanner's job is just to partition the string into meaningful parts.
// It doesn't do any transformation of the raw input string, so the caller
// must deal with any further interpretation required, such as parsing INTEGER
// tokens into real ints, or dealing with escape sequences in LITERAL or
// STRING tokens.
//
// Strings in the returned tokens are slices from the original string.
//
// startPos should be set to ast.InitPos unless the caller knows that
// this interpolation string is part of a larger file and knows the position
// of the first character in that larger file.
func Scan(s string, startPos ast.Pos) <-chan *Token {
ch := make(chan *Token)
go scan(s, ch, startPos)
return ch
}
func scan(s string, ch chan<- *Token, pos ast.Pos) {
// 'remain' starts off as the whole string but we gradually
// slice of the front of it as we work our way through.
remain := s
// nesting keeps track of how many ${ .. } sequences we are
// inside, so we can recognize the minor differences in syntax
// between outer string literals (LITERAL tokens) and quoted
// string literals (STRING tokens).
nesting := 0
// We're going to flip back and forth between parsing literals/strings
// and parsing interpolation sequences ${ .. } until we reach EOF or
// some INVALID token.
All:
for {
startPos := pos
// Literal string processing first, since the beginning of
// a string is always outside of an interpolation sequence.
literalVal, terminator := scanLiteral(remain, pos, nesting > 0)
if len(literalVal) > 0 {
litType := LITERAL
if nesting > 0 {
litType = STRING
}
ch <- &Token{
Type: litType,
Content: literalVal,
Pos: startPos,
}
remain = remain[len(literalVal):]
}
ch <- terminator
remain = remain[len(terminator.Content):]
pos = terminator.Pos
// Safe to use len() here because none of the terminator tokens
// can contain UTF-8 sequences.
pos.Column = pos.Column + len(terminator.Content)
switch terminator.Type {
case INVALID:
// Synthetic EOF after invalid token, since further scanning
// is likely to just produce more garbage.
ch <- &Token{
Type: EOF,
Content: "",
Pos: pos,
}
break All
case EOF:
// All done!
break All
case BEGIN:
nesting++
case CQUOTE:
// nothing special to do
default:
// Should never happen
panic("invalid string/literal terminator")
}
// Now we do the processing of the insides of ${ .. } sequences.
// This loop terminates when we encounter either a closing } or
// an opening ", which will cause us to return to literal processing.
Interpolation:
for {
token, size, newPos := scanInterpolationToken(remain, pos)
ch <- token
remain = remain[size:]
pos = newPos
switch token.Type {
case INVALID:
// Synthetic EOF after invalid token, since further scanning
// is likely to just produce more garbage.
ch <- &Token{
Type: EOF,
Content: "",
Pos: pos,
}
break All
case EOF:
// All done
// (though a syntax error that we'll catch in the parser)
break All
case END:
nesting--
if nesting < 0 {
// Can happen if there are unbalanced ${ and } sequences
// in the input, which we'll catch in the parser.
nesting = 0
}
break Interpolation
case OQUOTE:
// Beginning of nested quoted string
break Interpolation
}
}
}
close(ch)
}
// Returns the token found at the start of the given string, followed by
// the number of bytes that were consumed from the string and the adjusted
// source position.
//
// Note that the number of bytes consumed can be more than the length of
// the returned token contents if the string begins with whitespace, since
// it will be silently consumed before reading the token.
func scanInterpolationToken(s string, startPos ast.Pos) (*Token, int, ast.Pos) {
pos := startPos
size := 0
// Consume whitespace, if any
for len(s) > 0 && byteIsSpace(s[0]) {
if s[0] == '\n' {
pos.Column = 1
pos.Line++
} else {
pos.Column++
}
size++
s = s[1:]
}
// Unexpected EOF during sequence
if len(s) == 0 {
return &Token{
Type: EOF,
Content: "",
Pos: pos,
}, size, pos
}
next := s[0]
var token *Token
switch next {
case '(', ')', '[', ']', ',', '.', '+', '-', '*', '/', '%', '?', ':':
// Easy punctuation symbols that don't have any special meaning
// during scanning, and that stand for themselves in the
// TokenType enumeration.
token = &Token{
Type: TokenType(next),
Content: s[:1],
Pos: pos,
}
case '}':
token = &Token{
Type: END,
Content: s[:1],
Pos: pos,
}
case '"':
token = &Token{
Type: OQUOTE,
Content: s[:1],
Pos: pos,
}
case '!':
if len(s) >= 2 && s[:2] == "!=" {
token = &Token{
Type: NOTEQUAL,
Content: s[:2],
Pos: pos,
}
} else {
token = &Token{
Type: BANG,
Content: s[:1],
Pos: pos,
}
}
case '<':
if len(s) >= 2 && s[:2] == "<=" {
token = &Token{
Type: LTE,
Content: s[:2],
Pos: pos,
}
} else {
token = &Token{
Type: LT,
Content: s[:1],
Pos: pos,
}
}
case '>':
if len(s) >= 2 && s[:2] == ">=" {
token = &Token{
Type: GTE,
Content: s[:2],
Pos: pos,
}
} else {
token = &Token{
Type: GT,
Content: s[:1],
Pos: pos,
}
}
case '=':
if len(s) >= 2 && s[:2] == "==" {
token = &Token{
Type: EQUAL,
Content: s[:2],
Pos: pos,
}
} else {
// A single equals is not a valid operator
token = &Token{
Type: INVALID,
Content: s[:1],
Pos: pos,
}
}
case '&':
if len(s) >= 2 && s[:2] == "&&" {
token = &Token{
Type: AND,
Content: s[:2],
Pos: pos,
}
} else {
token = &Token{
Type: INVALID,
Content: s[:1],
Pos: pos,
}
}
case '|':
if len(s) >= 2 && s[:2] == "||" {
token = &Token{
Type: OR,
Content: s[:2],
Pos: pos,
}
} else {
token = &Token{
Type: INVALID,
Content: s[:1],
Pos: pos,
}
}
default:
if next >= '0' && next <= '9' {
num, numType := scanNumber(s)
token = &Token{
Type: numType,
Content: num,
Pos: pos,
}
} else if stringStartsWithIdentifier(s) {
ident, runeLen := scanIdentifier(s)
tokenType := IDENTIFIER
if ident == "true" || ident == "false" {
tokenType = BOOL
}
token = &Token{
Type: tokenType,
Content: ident,
Pos: pos,
}
// Skip usual token handling because it doesn't
// know how to deal with UTF-8 sequences.
pos.Column = pos.Column + runeLen
return token, size + len(ident), pos
} else {
_, byteLen := utf8.DecodeRuneInString(s)
token = &Token{
Type: INVALID,
Content: s[:byteLen],
Pos: pos,
}
// Skip usual token handling because it doesn't
// know how to deal with UTF-8 sequences.
pos.Column = pos.Column + 1
return token, size + byteLen, pos
}
}
// Here we assume that the token content contains no UTF-8 sequences,
// because we dealt with UTF-8 characters as a special case where
// necessary above.
size = size + len(token.Content)
pos.Column = pos.Column + len(token.Content)
return token, size, pos
}
// Returns the (possibly-empty) prefix of the given string that represents
// a literal, followed by the token that marks the end of the literal.
func scanLiteral(s string, startPos ast.Pos, nested bool) (string, *Token) {
litLen := 0
pos := startPos
var terminator *Token
for {
if litLen >= len(s) {
if nested {
// We've ended in the middle of a quoted string,
// which means this token is actually invalid.
return "", &Token{
Type: INVALID,
Content: s,
Pos: startPos,
}
}
terminator = &Token{
Type: EOF,
Content: "",
Pos: pos,
}
break
}
next := s[litLen]
if next == '$' && len(s) > litLen+1 {
follow := s[litLen+1]
if follow == '{' {
terminator = &Token{
Type: BEGIN,
Content: s[litLen : litLen+2],
Pos: pos,
}
pos.Column = pos.Column + 2
break
} else if follow == '$' {
// Double-$ escapes the special processing of $,
// so we will consume both characters here.
pos.Column = pos.Column + 2
litLen = litLen + 2
continue
}
}
// special handling that applies only to quoted strings
if nested {
if next == '"' {
terminator = &Token{
Type: CQUOTE,
Content: s[litLen : litLen+1],
Pos: pos,
}
pos.Column = pos.Column + 1
break
}
// Escaped quote marks do not terminate the string.
//
// All we do here in the scanner is avoid terminating a string
// due to an escaped quote. The parser is responsible for the
// full handling of escape sequences, since it's able to produce
// better error messages than we can produce in here.
if next == '\\' && len(s) > litLen+1 {
follow := s[litLen+1]
if follow == '"' {
// \" escapes the special processing of ",
// so we will consume both characters here.
pos.Column = pos.Column + 2
litLen = litLen + 2
continue
} else if follow == '\\' {
// \\ escapes \
// so we will consume both characters here.
pos.Column = pos.Column + 2
litLen = litLen + 2
continue
}
}
}
if next == '\n' {
pos.Column = 1
pos.Line++
litLen++
} else {
pos.Column++
// "Column" measures runes, so we need to actually consume
// a valid UTF-8 character here.
_, size := utf8.DecodeRuneInString(s[litLen:])
litLen = litLen + size
}
}
return s[:litLen], terminator
}
// scanNumber returns the extent of the prefix of the string that represents
// a valid number, along with what type of number it represents: INT or FLOAT.
//
// scanNumber does only basic character analysis: numbers consist of digits
// and periods, with at least one period signalling a FLOAT. It's the parser's
// responsibility to validate the form and range of the number, such as ensuring
// that a FLOAT actually contains only one period, etc.
func scanNumber(s string) (string, TokenType) {
period := -1
byteLen := 0
numType := INTEGER
for {
if byteLen >= len(s) {
break
}
next := s[byteLen]
if next != '.' && (next < '0' || next > '9') {
// If our last value was a period, then we're not a float,
// we're just an integer that ends in a period.
if period == byteLen-1 {
byteLen--
numType = INTEGER
}
break
}
if next == '.' {
// If we've already seen a period, break out
if period >= 0 {
break
}
period = byteLen
numType = FLOAT
}
byteLen++
}
return s[:byteLen], numType
}
// scanIdentifier returns the extent of the prefix of the string that
// represents a valid identifier, along with the length of that prefix
// in runes.
//
// Identifiers may contain utf8-encoded non-Latin letters, which will
// cause the returned "rune length" to be shorter than the byte length
// of the returned string.
func scanIdentifier(s string) (string, int) {
byteLen := 0
runeLen := 0
for {
if byteLen >= len(s) {
break
}
nextRune, size := utf8.DecodeRuneInString(s[byteLen:])
if !(nextRune == '_' ||
nextRune == '-' ||
nextRune == '.' ||
nextRune == '*' ||
unicode.IsNumber(nextRune) ||
unicode.IsLetter(nextRune) ||
unicode.IsMark(nextRune)) {
break
}
// If we reach a star, it must be between periods to be part
// of the same identifier.
if nextRune == '*' && s[byteLen-1] != '.' {
break
}
// If our previous character was a star, then the current must
// be period. Otherwise, undo that and exit.
if byteLen > 0 && s[byteLen-1] == '*' && nextRune != '.' {
byteLen--
if s[byteLen-1] == '.' {
byteLen--
}
break
}
byteLen = byteLen + size
runeLen = runeLen + 1
}
return s[:byteLen], runeLen
}
// byteIsSpace implements a restrictive interpretation of spaces that includes
// only what's valid inside interpolation sequences: spaces, tabs, newlines.
func byteIsSpace(b byte) bool {
switch b {
case ' ', '\t', '\r', '\n':
return true
default:
return false
}
}
// stringStartsWithIdentifier returns true if the given string begins with
// a character that is a legal start of an identifier: an underscore or
// any character that Unicode considers to be a letter.
func stringStartsWithIdentifier(s string) bool {
if len(s) == 0 {
return false
}
first := s[0]
// Easy ASCII cases first
if (first >= 'a' && first <= 'z') || (first >= 'A' && first <= 'Z') || first == '_' {
return true
}
// If our first byte begins a UTF-8 sequence then the sequence might
// be a unicode letter.
if utf8.RuneStart(first) {
firstRune, _ := utf8.DecodeRuneInString(s)
if unicode.IsLetter(firstRune) {
return true
}
}
return false
}

105
vendor/github.com/hashicorp/hil/scanner/token.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
package scanner
import (
"fmt"
"github.com/hashicorp/hil/ast"
)
type Token struct {
Type TokenType
Content string
Pos ast.Pos
}
//go:generate stringer -type=TokenType
type TokenType rune
const (
// Raw string data outside of ${ .. } sequences
LITERAL TokenType = 'o'
// STRING is like a LITERAL but it's inside a quoted string
// within a ${ ... } sequence, and so it can contain backslash
// escaping.
STRING TokenType = 'S'
// Other Literals
INTEGER TokenType = 'I'
FLOAT TokenType = 'F'
BOOL TokenType = 'B'
BEGIN TokenType = '$' // actually "${"
END TokenType = '}'
OQUOTE TokenType = '“' // Opening quote of a nested quoted sequence
CQUOTE TokenType = '”' // Closing quote of a nested quoted sequence
OPAREN TokenType = '('
CPAREN TokenType = ')'
OBRACKET TokenType = '['
CBRACKET TokenType = ']'
COMMA TokenType = ','
IDENTIFIER TokenType = 'i'
PERIOD TokenType = '.'
PLUS TokenType = '+'
MINUS TokenType = '-'
STAR TokenType = '*'
SLASH TokenType = '/'
PERCENT TokenType = '%'
AND TokenType = '∧'
OR TokenType = ''
BANG TokenType = '!'
EQUAL TokenType = '='
NOTEQUAL TokenType = '≠'
GT TokenType = '>'
LT TokenType = '<'
GTE TokenType = '≥'
LTE TokenType = '≤'
QUESTION TokenType = '?'
COLON TokenType = ':'
EOF TokenType = '␄'
// Produced for sequences that cannot be understood as valid tokens
// e.g. due to use of unrecognized punctuation.
INVALID TokenType = '<27>'
)
func (t *Token) String() string {
switch t.Type {
case EOF:
return "end of string"
case INVALID:
return fmt.Sprintf("invalid sequence %q", t.Content)
case INTEGER:
return fmt.Sprintf("integer %s", t.Content)
case FLOAT:
return fmt.Sprintf("float %s", t.Content)
case STRING:
return fmt.Sprintf("string %q", t.Content)
case LITERAL:
return fmt.Sprintf("literal %q", t.Content)
case OQUOTE:
return fmt.Sprintf("opening quote")
case CQUOTE:
return fmt.Sprintf("closing quote")
case AND:
return "&&"
case OR:
return "||"
case NOTEQUAL:
return "!="
case GTE:
return ">="
case LTE:
return "<="
default:
// The remaining token types have content that
// speaks for itself.
return fmt.Sprintf("%q", t.Content)
}
}

View File

@ -0,0 +1,51 @@
// Code generated by "stringer -type=TokenType"; DO NOT EDIT
package scanner
import "fmt"
const _TokenType_name = "BANGBEGINPERCENTOPARENCPARENSTARPLUSCOMMAMINUSPERIODSLASHCOLONLTEQUALGTQUESTIONBOOLFLOATINTEGERSTRINGOBRACKETCBRACKETIDENTIFIERLITERALENDOQUOTECQUOTEANDORNOTEQUALLTEGTEEOFINVALID"
var _TokenType_map = map[TokenType]string{
33: _TokenType_name[0:4],
36: _TokenType_name[4:9],
37: _TokenType_name[9:16],
40: _TokenType_name[16:22],
41: _TokenType_name[22:28],
42: _TokenType_name[28:32],
43: _TokenType_name[32:36],
44: _TokenType_name[36:41],
45: _TokenType_name[41:46],
46: _TokenType_name[46:52],
47: _TokenType_name[52:57],
58: _TokenType_name[57:62],
60: _TokenType_name[62:64],
61: _TokenType_name[64:69],
62: _TokenType_name[69:71],
63: _TokenType_name[71:79],
66: _TokenType_name[79:83],
70: _TokenType_name[83:88],
73: _TokenType_name[88:95],
83: _TokenType_name[95:101],
91: _TokenType_name[101:109],
93: _TokenType_name[109:117],
105: _TokenType_name[117:127],
111: _TokenType_name[127:134],
125: _TokenType_name[134:137],
8220: _TokenType_name[137:143],
8221: _TokenType_name[143:149],
8743: _TokenType_name[149:152],
8744: _TokenType_name[152:154],
8800: _TokenType_name[154:162],
8804: _TokenType_name[162:165],
8805: _TokenType_name[165:168],
9220: _TokenType_name[168:171],
65533: _TokenType_name[171:178],
}
func (i TokenType) String() string {
if str, ok := _TokenType_map[i]; ok {
return str
}
return fmt.Sprintf("TokenType(%d)", i)
}

662
vendor/github.com/hashicorp/hil/y.go generated vendored
View File

@ -1,662 +0,0 @@
//line lang.y:6
package hil
import __yyfmt__ "fmt"
//line lang.y:6
import (
"github.com/hashicorp/hil/ast"
)
//line lang.y:14
type parserSymType struct {
yys int
node ast.Node
nodeList []ast.Node
str string
token *parserToken
}
const PROGRAM_BRACKET_LEFT = 57346
const PROGRAM_BRACKET_RIGHT = 57347
const PROGRAM_STRING_START = 57348
const PROGRAM_STRING_END = 57349
const PAREN_LEFT = 57350
const PAREN_RIGHT = 57351
const COMMA = 57352
const SQUARE_BRACKET_LEFT = 57353
const SQUARE_BRACKET_RIGHT = 57354
const ARITH_OP = 57355
const IDENTIFIER = 57356
const INTEGER = 57357
const FLOAT = 57358
const STRING = 57359
var parserToknames = [...]string{
"$end",
"error",
"$unk",
"PROGRAM_BRACKET_LEFT",
"PROGRAM_BRACKET_RIGHT",
"PROGRAM_STRING_START",
"PROGRAM_STRING_END",
"PAREN_LEFT",
"PAREN_RIGHT",
"COMMA",
"SQUARE_BRACKET_LEFT",
"SQUARE_BRACKET_RIGHT",
"ARITH_OP",
"IDENTIFIER",
"INTEGER",
"FLOAT",
"STRING",
}
var parserStatenames = [...]string{}
const parserEofCode = 1
const parserErrCode = 2
const parserInitialStackSize = 16
//line lang.y:196
//line yacctab:1
var parserExca = [...]int{
-1, 1,
1, -1,
-2, 0,
}
const parserNprod = 21
const parserPrivate = 57344
var parserTokenNames []string
var parserStates []string
const parserLast = 37
var parserAct = [...]int{
9, 7, 29, 17, 23, 16, 17, 3, 17, 20,
8, 18, 21, 17, 6, 19, 27, 28, 22, 8,
1, 25, 26, 7, 11, 2, 24, 10, 4, 30,
5, 0, 14, 15, 12, 13, 6,
}
var parserPact = [...]int{
-3, -1000, -3, -1000, -1000, -1000, -1000, 19, -1000, 0,
19, -3, -1000, -1000, 19, 1, -1000, 19, -5, -1000,
19, 19, -1000, -1000, 7, -7, -10, -1000, 19, -1000,
-7,
}
var parserPgo = [...]int{
0, 0, 30, 28, 24, 7, 26, 20,
}
var parserR1 = [...]int{
0, 7, 7, 4, 4, 5, 5, 2, 1, 1,
1, 1, 1, 1, 1, 1, 1, 6, 6, 6,
3,
}
var parserR2 = [...]int{
0, 0, 1, 1, 2, 1, 1, 3, 3, 1,
1, 1, 2, 3, 1, 4, 4, 0, 3, 1,
1,
}
var parserChk = [...]int{
-1000, -7, -4, -5, -3, -2, 17, 4, -5, -1,
8, -4, 15, 16, 13, 14, 5, 13, -1, -1,
8, 11, -1, 9, -6, -1, -1, 9, 10, 12,
-1,
}
var parserDef = [...]int{
1, -2, 2, 3, 5, 6, 20, 0, 4, 0,
0, 9, 10, 11, 0, 14, 7, 0, 0, 12,
17, 0, 13, 8, 0, 19, 0, 15, 0, 16,
18,
}
var parserTok1 = [...]int{
1,
}
var parserTok2 = [...]int{
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17,
}
var parserTok3 = [...]int{
0,
}
var parserErrorMessages = [...]struct {
state int
token int
msg string
}{}
//line yaccpar:1
/* parser for yacc output */
var (
parserDebug = 0
parserErrorVerbose = false
)
type parserLexer interface {
Lex(lval *parserSymType) int
Error(s string)
}
type parserParser interface {
Parse(parserLexer) int
Lookahead() int
}
type parserParserImpl struct {
lval parserSymType
stack [parserInitialStackSize]parserSymType
char int
}
func (p *parserParserImpl) Lookahead() int {
return p.char
}
func parserNewParser() parserParser {
return &parserParserImpl{}
}
const parserFlag = -1000
func parserTokname(c int) string {
if c >= 1 && c-1 < len(parserToknames) {
if parserToknames[c-1] != "" {
return parserToknames[c-1]
}
}
return __yyfmt__.Sprintf("tok-%v", c)
}
func parserStatname(s int) string {
if s >= 0 && s < len(parserStatenames) {
if parserStatenames[s] != "" {
return parserStatenames[s]
}
}
return __yyfmt__.Sprintf("state-%v", s)
}
func parserErrorMessage(state, lookAhead int) string {
const TOKSTART = 4
if !parserErrorVerbose {
return "syntax error"
}
for _, e := range parserErrorMessages {
if e.state == state && e.token == lookAhead {
return "syntax error: " + e.msg
}
}
res := "syntax error: unexpected " + parserTokname(lookAhead)
// To match Bison, suggest at most four expected tokens.
expected := make([]int, 0, 4)
// Look for shiftable tokens.
base := parserPact[state]
for tok := TOKSTART; tok-1 < len(parserToknames); tok++ {
if n := base + tok; n >= 0 && n < parserLast && parserChk[parserAct[n]] == tok {
if len(expected) == cap(expected) {
return res
}
expected = append(expected, tok)
}
}
if parserDef[state] == -2 {
i := 0
for parserExca[i] != -1 || parserExca[i+1] != state {
i += 2
}
// Look for tokens that we accept or reduce.
for i += 2; parserExca[i] >= 0; i += 2 {
tok := parserExca[i]
if tok < TOKSTART || parserExca[i+1] == 0 {
continue
}
if len(expected) == cap(expected) {
return res
}
expected = append(expected, tok)
}
// If the default action is to accept or reduce, give up.
if parserExca[i+1] != 0 {
return res
}
}
for i, tok := range expected {
if i == 0 {
res += ", expecting "
} else {
res += " or "
}
res += parserTokname(tok)
}
return res
}
func parserlex1(lex parserLexer, lval *parserSymType) (char, token int) {
token = 0
char = lex.Lex(lval)
if char <= 0 {
token = parserTok1[0]
goto out
}
if char < len(parserTok1) {
token = parserTok1[char]
goto out
}
if char >= parserPrivate {
if char < parserPrivate+len(parserTok2) {
token = parserTok2[char-parserPrivate]
goto out
}
}
for i := 0; i < len(parserTok3); i += 2 {
token = parserTok3[i+0]
if token == char {
token = parserTok3[i+1]
goto out
}
}
out:
if token == 0 {
token = parserTok2[1] /* unknown char */
}
if parserDebug >= 3 {
__yyfmt__.Printf("lex %s(%d)\n", parserTokname(token), uint(char))
}
return char, token
}
func parserParse(parserlex parserLexer) int {
return parserNewParser().Parse(parserlex)
}
func (parserrcvr *parserParserImpl) Parse(parserlex parserLexer) int {
var parsern int
var parserVAL parserSymType
var parserDollar []parserSymType
_ = parserDollar // silence set and not used
parserS := parserrcvr.stack[:]
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
parserstate := 0
parserrcvr.char = -1
parsertoken := -1 // parserrcvr.char translated into internal numbering
defer func() {
// Make sure we report no lookahead when not parsing.
parserstate = -1
parserrcvr.char = -1
parsertoken = -1
}()
parserp := -1
goto parserstack
ret0:
return 0
ret1:
return 1
parserstack:
/* put a state and value onto the stack */
if parserDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", parserTokname(parsertoken), parserStatname(parserstate))
}
parserp++
if parserp >= len(parserS) {
nyys := make([]parserSymType, len(parserS)*2)
copy(nyys, parserS)
parserS = nyys
}
parserS[parserp] = parserVAL
parserS[parserp].yys = parserstate
parsernewstate:
parsern = parserPact[parserstate]
if parsern <= parserFlag {
goto parserdefault /* simple state */
}
if parserrcvr.char < 0 {
parserrcvr.char, parsertoken = parserlex1(parserlex, &parserrcvr.lval)
}
parsern += parsertoken
if parsern < 0 || parsern >= parserLast {
goto parserdefault
}
parsern = parserAct[parsern]
if parserChk[parsern] == parsertoken { /* valid shift */
parserrcvr.char = -1
parsertoken = -1
parserVAL = parserrcvr.lval
parserstate = parsern
if Errflag > 0 {
Errflag--
}
goto parserstack
}
parserdefault:
/* default state action */
parsern = parserDef[parserstate]
if parsern == -2 {
if parserrcvr.char < 0 {
parserrcvr.char, parsertoken = parserlex1(parserlex, &parserrcvr.lval)
}
/* look through exception table */
xi := 0
for {
if parserExca[xi+0] == -1 && parserExca[xi+1] == parserstate {
break
}
xi += 2
}
for xi += 2; ; xi += 2 {
parsern = parserExca[xi+0]
if parsern < 0 || parsern == parsertoken {
break
}
}
parsern = parserExca[xi+1]
if parsern < 0 {
goto ret0
}
}
if parsern == 0 {
/* error ... attempt to resume parsing */
switch Errflag {
case 0: /* brand new error */
parserlex.Error(parserErrorMessage(parserstate, parsertoken))
Nerrs++
if parserDebug >= 1 {
__yyfmt__.Printf("%s", parserStatname(parserstate))
__yyfmt__.Printf(" saw %s\n", parserTokname(parsertoken))
}
fallthrough
case 1, 2: /* incompletely recovered error ... try again */
Errflag = 3
/* find a state where "error" is a legal shift action */
for parserp >= 0 {
parsern = parserPact[parserS[parserp].yys] + parserErrCode
if parsern >= 0 && parsern < parserLast {
parserstate = parserAct[parsern] /* simulate a shift of "error" */
if parserChk[parserstate] == parserErrCode {
goto parserstack
}
}
/* the current p has no shift on "error", pop stack */
if parserDebug >= 2 {
__yyfmt__.Printf("error recovery pops state %d\n", parserS[parserp].yys)
}
parserp--
}
/* there is no state on the stack with an error shift ... abort */
goto ret1
case 3: /* no shift yet; clobber input char */
if parserDebug >= 2 {
__yyfmt__.Printf("error recovery discards %s\n", parserTokname(parsertoken))
}
if parsertoken == parserEofCode {
goto ret1
}
parserrcvr.char = -1
parsertoken = -1
goto parsernewstate /* try again in the same state */
}
}
/* reduction by production parsern */
if parserDebug >= 2 {
__yyfmt__.Printf("reduce %v in:\n\t%v\n", parsern, parserStatname(parserstate))
}
parsernt := parsern
parserpt := parserp
_ = parserpt // guard against "declared and not used"
parserp -= parserR2[parsern]
// parserp is now the index of $0. Perform the default action. Iff the
// reduced production is ε, $1 is possibly out of range.
if parserp+1 >= len(parserS) {
nyys := make([]parserSymType, len(parserS)*2)
copy(nyys, parserS)
parserS = nyys
}
parserVAL = parserS[parserp+1]
/* consult goto table to find next state */
parsern = parserR1[parsern]
parserg := parserPgo[parsern]
parserj := parserg + parserS[parserp].yys + 1
if parserj >= parserLast {
parserstate = parserAct[parserg]
} else {
parserstate = parserAct[parserj]
if parserChk[parserstate] != -parsern {
parserstate = parserAct[parserg]
}
}
// dummy call; replaced with literal code
switch parsernt {
case 1:
parserDollar = parserS[parserpt-0 : parserpt+1]
//line lang.y:36
{
parserResult = &ast.LiteralNode{
Value: "",
Typex: ast.TypeString,
Posx: ast.Pos{Column: 1, Line: 1},
}
}
case 2:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:44
{
parserResult = parserDollar[1].node
// We want to make sure that the top value is always an Output
// so that the return value is always a string, list of map from an
// interpolation.
//
// The logic for checking for a LiteralNode is a little annoying
// because functionally the AST is the same, but we do that because
// it makes for an easy literal check later (to check if a string
// has any interpolations).
if _, ok := parserDollar[1].node.(*ast.Output); !ok {
if n, ok := parserDollar[1].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString {
parserResult = &ast.Output{
Exprs: []ast.Node{parserDollar[1].node},
Posx: parserDollar[1].node.Pos(),
}
}
}
}
case 3:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:67
{
parserVAL.node = parserDollar[1].node
}
case 4:
parserDollar = parserS[parserpt-2 : parserpt+1]
//line lang.y:71
{
var result []ast.Node
if c, ok := parserDollar[1].node.(*ast.Output); ok {
result = append(c.Exprs, parserDollar[2].node)
} else {
result = []ast.Node{parserDollar[1].node, parserDollar[2].node}
}
parserVAL.node = &ast.Output{
Exprs: result,
Posx: result[0].Pos(),
}
}
case 5:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:87
{
parserVAL.node = parserDollar[1].node
}
case 6:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:91
{
parserVAL.node = parserDollar[1].node
}
case 7:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:97
{
parserVAL.node = parserDollar[2].node
}
case 8:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:103
{
parserVAL.node = parserDollar[2].node
}
case 9:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:107
{
parserVAL.node = parserDollar[1].node
}
case 10:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:111
{
parserVAL.node = &ast.LiteralNode{
Value: parserDollar[1].token.Value.(int),
Typex: ast.TypeInt,
Posx: parserDollar[1].token.Pos,
}
}
case 11:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:119
{
parserVAL.node = &ast.LiteralNode{
Value: parserDollar[1].token.Value.(float64),
Typex: ast.TypeFloat,
Posx: parserDollar[1].token.Pos,
}
}
case 12:
parserDollar = parserS[parserpt-2 : parserpt+1]
//line lang.y:127
{
// This is REALLY jank. We assume that a singular ARITH_OP
// means 0 ARITH_OP expr, which... is weird. We don't want to
// support *, /, etc., only -. We should fix this later with a pure
// Go scanner/parser.
if parserDollar[1].token.Value.(ast.ArithmeticOp) != ast.ArithmeticOpSub {
panic("Unary - is only allowed")
}
parserVAL.node = &ast.Arithmetic{
Op: parserDollar[1].token.Value.(ast.ArithmeticOp),
Exprs: []ast.Node{
&ast.LiteralNode{Value: 0, Typex: ast.TypeInt},
parserDollar[2].node,
},
Posx: parserDollar[2].node.Pos(),
}
}
case 13:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:146
{
parserVAL.node = &ast.Arithmetic{
Op: parserDollar[2].token.Value.(ast.ArithmeticOp),
Exprs: []ast.Node{parserDollar[1].node, parserDollar[3].node},
Posx: parserDollar[1].node.Pos(),
}
}
case 14:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:154
{
parserVAL.node = &ast.VariableAccess{Name: parserDollar[1].token.Value.(string), Posx: parserDollar[1].token.Pos}
}
case 15:
parserDollar = parserS[parserpt-4 : parserpt+1]
//line lang.y:158
{
parserVAL.node = &ast.Call{Func: parserDollar[1].token.Value.(string), Args: parserDollar[3].nodeList, Posx: parserDollar[1].token.Pos}
}
case 16:
parserDollar = parserS[parserpt-4 : parserpt+1]
//line lang.y:162
{
parserVAL.node = &ast.Index{
Target: &ast.VariableAccess{
Name: parserDollar[1].token.Value.(string),
Posx: parserDollar[1].token.Pos,
},
Key: parserDollar[3].node,
Posx: parserDollar[1].token.Pos,
}
}
case 17:
parserDollar = parserS[parserpt-0 : parserpt+1]
//line lang.y:174
{
parserVAL.nodeList = nil
}
case 18:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:178
{
parserVAL.nodeList = append(parserDollar[1].nodeList, parserDollar[3].node)
}
case 19:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:182
{
parserVAL.nodeList = append(parserVAL.nodeList, parserDollar[1].node)
}
case 20:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:188
{
parserVAL.node = &ast.LiteralNode{
Value: parserDollar[1].token.Value.(string),
Typex: ast.TypeString,
Posx: parserDollar[1].token.Pos,
}
}
}
goto parserstack /* stack new state and value */
}

View File

@ -1,328 +0,0 @@
state 0
$accept: .top $end
top: . (1)
PROGRAM_BRACKET_LEFT shift 7
STRING shift 6
. reduce 1 (src line 35)
interpolation goto 5
literal goto 4
literalModeTop goto 2
literalModeValue goto 3
top goto 1
state 1
$accept: top.$end
$end accept
. error
state 2
top: literalModeTop. (2)
literalModeTop: literalModeTop.literalModeValue
PROGRAM_BRACKET_LEFT shift 7
STRING shift 6
. reduce 2 (src line 43)
interpolation goto 5
literal goto 4
literalModeValue goto 8
state 3
literalModeTop: literalModeValue. (3)
. reduce 3 (src line 65)
state 4
literalModeValue: literal. (5)
. reduce 5 (src line 85)
state 5
literalModeValue: interpolation. (6)
. reduce 6 (src line 90)
state 6
literal: STRING. (20)
. reduce 20 (src line 186)
state 7
interpolation: PROGRAM_BRACKET_LEFT.expr PROGRAM_BRACKET_RIGHT
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 9
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 8
literalModeTop: literalModeTop literalModeValue. (4)
. reduce 4 (src line 70)
state 9
interpolation: PROGRAM_BRACKET_LEFT expr.PROGRAM_BRACKET_RIGHT
expr: expr.ARITH_OP expr
PROGRAM_BRACKET_RIGHT shift 16
ARITH_OP shift 17
. error
state 10
expr: PAREN_LEFT.expr PAREN_RIGHT
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 18
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 11
literalModeTop: literalModeTop.literalModeValue
expr: literalModeTop. (9)
PROGRAM_BRACKET_LEFT shift 7
STRING shift 6
. reduce 9 (src line 106)
interpolation goto 5
literal goto 4
literalModeValue goto 8
state 12
expr: INTEGER. (10)
. reduce 10 (src line 110)
state 13
expr: FLOAT. (11)
. reduce 11 (src line 118)
state 14
expr: ARITH_OP.expr
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 19
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 15
expr: IDENTIFIER. (14)
expr: IDENTIFIER.PAREN_LEFT args PAREN_RIGHT
expr: IDENTIFIER.SQUARE_BRACKET_LEFT expr SQUARE_BRACKET_RIGHT
PAREN_LEFT shift 20
SQUARE_BRACKET_LEFT shift 21
. reduce 14 (src line 153)
state 16
interpolation: PROGRAM_BRACKET_LEFT expr PROGRAM_BRACKET_RIGHT. (7)
. reduce 7 (src line 95)
state 17
expr: expr ARITH_OP.expr
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 22
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 18
expr: PAREN_LEFT expr.PAREN_RIGHT
expr: expr.ARITH_OP expr
PAREN_RIGHT shift 23
ARITH_OP shift 17
. error
state 19
expr: ARITH_OP expr. (12)
expr: expr.ARITH_OP expr
. reduce 12 (src line 126)
state 20
expr: IDENTIFIER PAREN_LEFT.args PAREN_RIGHT
args: . (17)
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. reduce 17 (src line 173)
expr goto 25
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
args goto 24
state 21
expr: IDENTIFIER SQUARE_BRACKET_LEFT.expr SQUARE_BRACKET_RIGHT
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 26
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 22
expr: expr.ARITH_OP expr
expr: expr ARITH_OP expr. (13)
. reduce 13 (src line 145)
state 23
expr: PAREN_LEFT expr PAREN_RIGHT. (8)
. reduce 8 (src line 101)
state 24
expr: IDENTIFIER PAREN_LEFT args.PAREN_RIGHT
args: args.COMMA expr
PAREN_RIGHT shift 27
COMMA shift 28
. error
state 25
expr: expr.ARITH_OP expr
args: expr. (19)
ARITH_OP shift 17
. reduce 19 (src line 181)
state 26
expr: expr.ARITH_OP expr
expr: IDENTIFIER SQUARE_BRACKET_LEFT expr.SQUARE_BRACKET_RIGHT
SQUARE_BRACKET_RIGHT shift 29
ARITH_OP shift 17
. error
state 27
expr: IDENTIFIER PAREN_LEFT args PAREN_RIGHT. (15)
. reduce 15 (src line 157)
state 28
args: args COMMA.expr
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 30
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 29
expr: IDENTIFIER SQUARE_BRACKET_LEFT expr SQUARE_BRACKET_RIGHT. (16)
. reduce 16 (src line 161)
state 30
expr: expr.ARITH_OP expr
args: args COMMA expr. (18)
ARITH_OP shift 17
. reduce 18 (src line 177)
17 terminals, 8 nonterminals
21 grammar rules, 31/2000 states
0 shift/reduce, 0 reduce/reduce conflicts reported
57 working sets used
memory: parser 45/30000
26 extra closures
67 shift entries, 1 exceptions
16 goto entries
31 entries saved by goto default
Optimizer space used: output 37/30000
37 table entries, 1 zero
maximum spread: 17, maximum offset: 28

View File

@ -13,8 +13,8 @@ Using the library is very simple, here is an example of publishing a service ent
// Setup our service export // Setup our service export
host, _ := os.Hostname() host, _ := os.Hostname()
info := []string{"My awesome service"}, info := []string{"My awesome service"}
service, _ := NewMDNSService(host, "_foobar._tcp", "", "", 8000, nil, info) service, _ := mdns.NewMDNSService(host, "_foobar._tcp", "", "", 8000, nil, info)
// Create the mDNS server, defer shutdown // Create the mDNS server, defer shutdown
server, _ := mdns.NewServer(&mdns.Config{Zone: service}) server, _ := mdns.NewServer(&mdns.Config{Zone: service})

View File

@ -173,7 +173,7 @@ func (c *client) Close() error {
return nil return nil
} }
// setInterface is used to set the query interface, uses sytem // setInterface is used to set the query interface, uses system
// default if not provided // default if not provided
func (c *client) setInterface(iface *net.Interface) error { func (c *client) setInterface(iface *net.Interface) error {
p := ipv4.NewPacketConn(c.ipv4UnicastConn) p := ipv4.NewPacketConn(c.ipv4UnicastConn)
@ -308,10 +308,16 @@ func (c *client) sendQuery(q *dns.Msg) error {
return err return err
} }
if c.ipv4UnicastConn != nil { if c.ipv4UnicastConn != nil {
c.ipv4UnicastConn.WriteToUDP(buf, ipv4Addr) _, err = c.ipv4UnicastConn.WriteToUDP(buf, ipv4Addr)
if err != nil {
return err
}
} }
if c.ipv6UnicastConn != nil { if c.ipv6UnicastConn != nil {
c.ipv6UnicastConn.WriteToUDP(buf, ipv6Addr) _, err = c.ipv6UnicastConn.WriteToUDP(buf, ipv6Addr)
if err != nil {
return err
}
} }
return nil return nil
} }

View File

@ -1,9 +1,8 @@
module github.com/hashicorp/mdns module github.com/hashicorp/mdns
require ( require (
github.com/miekg/dns v1.0.14 github.com/miekg/dns v1.1.27
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 // indirect golang.org/x/net v0.0.0-20190923162816-aa69164e4478
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 // indirect
) )
go 1.13

View File

@ -1,10 +1,19 @@
github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= github.com/miekg/dns v1.1.27 h1:aEH/kqUzUxGJ/UHcEKdJY+ugH6WEzsEBBSPa8zuy1aM=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519 h1:x6rhz8Y9CjbgQkccRGmELH6K+LJj7tOoh3XWeC1yaQM= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U= golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -54,7 +54,7 @@ func (s *Stream) LocalAddr() net.Addr {
return s.session.LocalAddr() return s.session.LocalAddr()
} }
// LocalAddr returns the remote address // RemoteAddr returns the remote address
func (s *Stream) RemoteAddr() net.Addr { func (s *Stream) RemoteAddr() net.Addr {
return s.session.RemoteAddr() return s.session.RemoteAddr()
} }

View File

@ -446,15 +446,17 @@ func (s *Stream) SetDeadline(t time.Time) error {
return nil return nil
} }
// SetReadDeadline sets the deadline for future Read calls. // SetReadDeadline sets the deadline for blocked and future Read calls.
func (s *Stream) SetReadDeadline(t time.Time) error { func (s *Stream) SetReadDeadline(t time.Time) error {
s.readDeadline.Store(t) s.readDeadline.Store(t)
asyncNotify(s.recvNotifyCh)
return nil return nil
} }
// SetWriteDeadline sets the deadline for future Write calls // SetWriteDeadline sets the deadline for blocked and future Write calls
func (s *Stream) SetWriteDeadline(t time.Time) error { func (s *Stream) SetWriteDeadline(t time.Time) error {
s.writeDeadline.Store(t) s.writeDeadline.Store(t)
asyncNotify(s.sendNotifyCh)
return nil return nil
} }

View File

@ -10,6 +10,7 @@ import (
"os" "os"
"strconv" "strconv"
"strings" "strings"
"sync"
"syscall" "syscall"
"unsafe" "unsafe"
@ -27,6 +28,7 @@ const (
backgroundRed = 0x40 backgroundRed = 0x40
backgroundIntensity = 0x80 backgroundIntensity = 0x80
backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
commonLvbUnderscore = 0x8000
cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4
) )
@ -93,6 +95,7 @@ type Writer struct {
oldattr word oldattr word
oldpos coord oldpos coord
rest bytes.Buffer rest bytes.Buffer
mutex sync.Mutex
} }
// NewColorable returns new instance of Writer which handles escape sequence from File. // NewColorable returns new instance of Writer which handles escape sequence from File.
@ -432,6 +435,8 @@ func atoiWithDefault(s string, def int) (int, error) {
// Write writes data on console // Write writes data on console
func (w *Writer) Write(data []byte) (n int, err error) { func (w *Writer) Write(data []byte) (n int, err error) {
w.mutex.Lock()
defer w.mutex.Unlock()
var csbi consoleScreenBufferInfo var csbi consoleScreenBufferInfo
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
@ -683,14 +688,19 @@ loop:
switch { switch {
case n == 0 || n == 100: case n == 0 || n == 100:
attr = w.oldattr attr = w.oldattr
case 1 <= n && n <= 5: case n == 4:
attr |= commonLvbUnderscore
case (1 <= n && n <= 3) || n == 5:
attr |= foregroundIntensity attr |= foregroundIntensity
case n == 7: case n == 7 || n == 27:
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) attr =
case n == 22 || n == 25: (attr &^ (foregroundMask | backgroundMask)) |
attr |= foregroundIntensity ((attr & foregroundMask) << 4) |
case n == 27: ((attr & backgroundMask) >> 4)
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) case n == 22:
attr &^= foregroundIntensity
case n == 24:
attr &^= commonLvbUnderscore
case 30 <= n && n <= 37: case 30 <= n && n <= 37:
attr &= backgroundMask attr &= backgroundMask
if (n-30)&1 != 0 { if (n-30)&1 != 0 {

View File

@ -10,6 +10,7 @@ env:
- GO111MODULE=on - GO111MODULE=on
script: script:
- go generate ./... && test `git ls-files --modified | wc -l` = 0
- go test -race -v -bench=. -coverprofile=coverage.txt -covermode=atomic ./... - go test -race -v -bench=. -coverprofile=coverage.txt -covermode=atomic ./...
after_success: after_success:

View File

@ -28,6 +28,7 @@ A not-so-up-to-date-list-that-may-be-actually-current:
* https://github.com/coredns/coredns * https://github.com/coredns/coredns
* https://cloudflare.com * https://cloudflare.com
* https://github.com/abh/geodns * https://github.com/abh/geodns
* https://github.com/baidu/bfe
* http://www.statdns.com/ * http://www.statdns.com/
* http://www.dnsinspect.com/ * http://www.dnsinspect.com/
* https://github.com/chuangbo/jianbing-dictionary-dns * https://github.com/chuangbo/jianbing-dictionary-dns
@ -70,6 +71,8 @@ A not-so-up-to-date-list-that-may-be-actually-current:
* https://render.com * https://render.com
* https://github.com/peterzen/goresolver * https://github.com/peterzen/goresolver
* https://github.com/folbricht/routedns * https://github.com/folbricht/routedns
* https://domainr.com/
* https://zonedb.org/
Send pull request if you want to be listed here. Send pull request if you want to be listed here.
@ -127,6 +130,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
* 2915 - NAPTR record * 2915 - NAPTR record
* 2929 - DNS IANA Considerations * 2929 - DNS IANA Considerations
* 3110 - RSASHA1 DNS keys * 3110 - RSASHA1 DNS keys
* 3123 - APL record
* 3225 - DO bit (DNSSEC OK) * 3225 - DO bit (DNSSEC OK)
* 340{1,2,3} - NAPTR record * 340{1,2,3} - NAPTR record
* 3445 - Limiting the scope of (DNS)KEY * 3445 - Limiting the scope of (DNS)KEY

View File

@ -124,15 +124,38 @@ func (c *Client) Dial(address string) (conn *Conn, err error) {
// of 512 bytes // of 512 bytes
// To specify a local address or a timeout, the caller has to set the `Client.Dialer` // To specify a local address or a timeout, the caller has to set the `Client.Dialer`
// attribute appropriately // attribute appropriately
func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) { func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
co, err := c.Dial(address)
if err != nil {
return nil, 0, err
}
defer co.Close()
return c.ExchangeWithConn(m, co)
}
// ExchangeWithConn has the same behavior as Exchange, just with a predetermined connection
// that will be used instead of creating a new one.
// Usage pattern with a *dns.Client:
// c := new(dns.Client)
// // connection management logic goes here
//
// conn := c.Dial(address)
// in, rtt, err := c.ExchangeWithConn(message, conn)
//
// This allows users of the library to implement their own connection management,
// as opposed to Exchange, which will always use new connections and incur the added overhead
// that entails when using "tcp" and especially "tcp-tls" clients.
func (c *Client) ExchangeWithConn(m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) {
if !c.SingleInflight { if !c.SingleInflight {
return c.exchange(m, address) return c.exchange(m, conn)
} }
q := m.Question[0] q := m.Question[0]
key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass) key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass)
r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) { r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) {
return c.exchange(m, address) return c.exchange(m, conn)
}) })
if r != nil && shared { if r != nil && shared {
r = r.Copy() r = r.Copy()
@ -141,15 +164,7 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er
return r, rtt, err return r, rtt, err
} }
func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { func (c *Client) exchange(m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) {
var co *Conn
co, err = c.Dial(a)
if err != nil {
return nil, 0, err
}
defer co.Close()
opt := m.IsEdns0() opt := m.IsEdns0()
// If EDNS0 is used use that for size. // If EDNS0 is used use that for size.

View File

@ -105,7 +105,7 @@ func (dns *Msg) SetAxfr(z string) *Msg {
// SetTsig appends a TSIG RR to the message. // SetTsig appends a TSIG RR to the message.
// This is only a skeleton TSIG RR that is added as the last RR in the // This is only a skeleton TSIG RR that is added as the last RR in the
// additional section. The Tsig is calculated when the message is being send. // additional section. The TSIG is calculated when the message is being send.
func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg { func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg {
t := new(TSIG) t := new(TSIG)
t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0} t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0}
@ -317,6 +317,12 @@ func Fqdn(s string) string {
return s + "." return s + "."
} }
// CanonicalName returns the domain name in canonical form. A name in canonical
// form is lowercase and fully qualified. See Section 6.2 in RFC 4034.
func CanonicalName(s string) string {
return strings.ToLower(Fqdn(s))
}
// Copied from the official Go code. // Copied from the official Go code.
// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP // ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP
@ -364,7 +370,7 @@ func (t Type) String() string {
// String returns the string representation for the class c. // String returns the string representation for the class c.
func (c Class) String() string { func (c Class) String() string {
if s, ok := ClassToString[uint16(c)]; ok { if s, ok := ClassToString[uint16(c)]; ok {
// Only emit mnemonics when they are unambiguous, specically ANY is in both. // Only emit mnemonics when they are unambiguous, specially ANY is in both.
if _, ok := StringToType[s]; !ok { if _, ok := StringToType[s]; !ok {
return s return s
} }

View File

@ -200,7 +200,7 @@ func (k *DNSKEY) ToDS(h uint8) *DS {
wire = wire[:n] wire = wire[:n]
owner := make([]byte, 255) owner := make([]byte, 255)
off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false) off, err1 := PackDomainName(CanonicalName(k.Hdr.Name), owner, 0, nil, false)
if err1 != nil { if err1 != nil {
return nil return nil
} }
@ -285,7 +285,7 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
sigwire.Inception = rr.Inception sigwire.Inception = rr.Inception
sigwire.KeyTag = rr.KeyTag sigwire.KeyTag = rr.KeyTag
// For signing, lowercase this name // For signing, lowercase this name
sigwire.SignerName = strings.ToLower(rr.SignerName) sigwire.SignerName = CanonicalName(rr.SignerName)
// Create the desired binary blob // Create the desired binary blob
signdata := make([]byte, DefaultMsgSize) signdata := make([]byte, DefaultMsgSize)
@ -423,7 +423,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
sigwire.Expiration = rr.Expiration sigwire.Expiration = rr.Expiration
sigwire.Inception = rr.Inception sigwire.Inception = rr.Inception
sigwire.KeyTag = rr.KeyTag sigwire.KeyTag = rr.KeyTag
sigwire.SignerName = strings.ToLower(rr.SignerName) sigwire.SignerName = CanonicalName(rr.SignerName)
// Create the desired binary blob // Create the desired binary blob
signeddata := make([]byte, DefaultMsgSize) signeddata := make([]byte, DefaultMsgSize)
n, err := packSigWire(sigwire, signeddata) n, err := packSigWire(sigwire, signeddata)
@ -659,7 +659,7 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
h.Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "." h.Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "."
} }
// RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase
h.Name = strings.ToLower(h.Name) h.Name = CanonicalName(h.Name)
// 6.2. Canonical RR Form. (3) - domain rdata to lowercase. // 6.2. Canonical RR Form. (3) - domain rdata to lowercase.
// NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR, // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
// HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX, // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
@ -672,49 +672,49 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
// conversion. // conversion.
switch x := r1.(type) { switch x := r1.(type) {
case *NS: case *NS:
x.Ns = strings.ToLower(x.Ns) x.Ns = CanonicalName(x.Ns)
case *MD: case *MD:
x.Md = strings.ToLower(x.Md) x.Md = CanonicalName(x.Md)
case *MF: case *MF:
x.Mf = strings.ToLower(x.Mf) x.Mf = CanonicalName(x.Mf)
case *CNAME: case *CNAME:
x.Target = strings.ToLower(x.Target) x.Target = CanonicalName(x.Target)
case *SOA: case *SOA:
x.Ns = strings.ToLower(x.Ns) x.Ns = CanonicalName(x.Ns)
x.Mbox = strings.ToLower(x.Mbox) x.Mbox = CanonicalName(x.Mbox)
case *MB: case *MB:
x.Mb = strings.ToLower(x.Mb) x.Mb = CanonicalName(x.Mb)
case *MG: case *MG:
x.Mg = strings.ToLower(x.Mg) x.Mg = CanonicalName(x.Mg)
case *MR: case *MR:
x.Mr = strings.ToLower(x.Mr) x.Mr = CanonicalName(x.Mr)
case *PTR: case *PTR:
x.Ptr = strings.ToLower(x.Ptr) x.Ptr = CanonicalName(x.Ptr)
case *MINFO: case *MINFO:
x.Rmail = strings.ToLower(x.Rmail) x.Rmail = CanonicalName(x.Rmail)
x.Email = strings.ToLower(x.Email) x.Email = CanonicalName(x.Email)
case *MX: case *MX:
x.Mx = strings.ToLower(x.Mx) x.Mx = CanonicalName(x.Mx)
case *RP: case *RP:
x.Mbox = strings.ToLower(x.Mbox) x.Mbox = CanonicalName(x.Mbox)
x.Txt = strings.ToLower(x.Txt) x.Txt = CanonicalName(x.Txt)
case *AFSDB: case *AFSDB:
x.Hostname = strings.ToLower(x.Hostname) x.Hostname = CanonicalName(x.Hostname)
case *RT: case *RT:
x.Host = strings.ToLower(x.Host) x.Host = CanonicalName(x.Host)
case *SIG: case *SIG:
x.SignerName = strings.ToLower(x.SignerName) x.SignerName = CanonicalName(x.SignerName)
case *PX: case *PX:
x.Map822 = strings.ToLower(x.Map822) x.Map822 = CanonicalName(x.Map822)
x.Mapx400 = strings.ToLower(x.Mapx400) x.Mapx400 = CanonicalName(x.Mapx400)
case *NAPTR: case *NAPTR:
x.Replacement = strings.ToLower(x.Replacement) x.Replacement = CanonicalName(x.Replacement)
case *KX: case *KX:
x.Exchanger = strings.ToLower(x.Exchanger) x.Exchanger = CanonicalName(x.Exchanger)
case *SRV: case *SRV:
x.Target = strings.ToLower(x.Target) x.Target = CanonicalName(x.Target)
case *DNAME: case *DNAME:
x.Target = strings.ToLower(x.Target) x.Target = CanonicalName(x.Target)
} }
// 6.2. Canonical RR Form. (5) - origTTL // 6.2. Canonical RR Form. (5) - origTTL
wire := make([]byte, Len(r1)+1) // +1 to be safe(r) wire := make([]byte, Len(r1)+1) // +1 to be safe(r)

2
vendor/github.com/miekg/dns/doc.go generated vendored
View File

@ -209,7 +209,7 @@ Basic use pattern validating and replying to a message that has TSIG set.
// *Msg r has an TSIG record and it was validated // *Msg r has an TSIG record and it was validated
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
} else { } else {
// *Msg r has an TSIG records and it was not valided // *Msg r has an TSIG records and it was not validated
} }
} }
w.WriteMsg(m) w.WriteMsg(m)

View File

@ -3,9 +3,8 @@ package dns
//go:generate go run duplicate_generate.go //go:generate go run duplicate_generate.go
// IsDuplicate checks of r1 and r2 are duplicates of each other, excluding the TTL. // IsDuplicate checks of r1 and r2 are duplicates of each other, excluding the TTL.
// So this means the header data is equal *and* the RDATA is the same. Return true // So this means the header data is equal *and* the RDATA is the same. Returns true
// is so, otherwise false. // if so, otherwise false. It's a protocol violation to have identical RRs in a message.
// It's a protocol violation to have identical RRs in a message.
func IsDuplicate(r1, r2 RR) bool { func IsDuplicate(r1, r2 RR) bool {
// Check whether the record header is identical. // Check whether the record header is identical.
if !r1.Header().isDuplicate(r2.Header()) { if !r1.Header().isDuplicate(r2.Header()) {

View File

@ -543,6 +543,10 @@ func (e *EDNS0_EXPIRE) pack() ([]byte, error) {
} }
func (e *EDNS0_EXPIRE) unpack(b []byte) error { func (e *EDNS0_EXPIRE) unpack(b []byte) error {
if len(b) == 0 {
// zero-length EXPIRE query, see RFC 7314 Section 2
return nil
}
if len(b) < 4 { if len(b) < 4 {
return ErrBuf return ErrBuf
} }

View File

@ -20,13 +20,13 @@ import (
// of $ after that are interpreted. // of $ after that are interpreted.
func (zp *ZoneParser) generate(l lex) (RR, bool) { func (zp *ZoneParser) generate(l lex) (RR, bool) {
token := l.token token := l.token
step := 1 step := int64(1)
if i := strings.IndexByte(token, '/'); i >= 0 { if i := strings.IndexByte(token, '/'); i >= 0 {
if i+1 == len(token) { if i+1 == len(token) {
return zp.setParseError("bad step in $GENERATE range", l) return zp.setParseError("bad step in $GENERATE range", l)
} }
s, err := strconv.Atoi(token[i+1:]) s, err := strconv.ParseInt(token[i+1:], 10, 64)
if err != nil || s <= 0 { if err != nil || s <= 0 {
return zp.setParseError("bad step in $GENERATE range", l) return zp.setParseError("bad step in $GENERATE range", l)
} }
@ -40,12 +40,12 @@ func (zp *ZoneParser) generate(l lex) (RR, bool) {
return zp.setParseError("bad start-stop in $GENERATE range", l) return zp.setParseError("bad start-stop in $GENERATE range", l)
} }
start, err := strconv.Atoi(sx[0]) start, err := strconv.ParseInt(sx[0], 10, 64)
if err != nil { if err != nil {
return zp.setParseError("bad start in $GENERATE range", l) return zp.setParseError("bad start in $GENERATE range", l)
} }
end, err := strconv.Atoi(sx[1]) end, err := strconv.ParseInt(sx[1], 10, 64)
if err != nil { if err != nil {
return zp.setParseError("bad stop in $GENERATE range", l) return zp.setParseError("bad stop in $GENERATE range", l)
} }
@ -75,10 +75,10 @@ func (zp *ZoneParser) generate(l lex) (RR, bool) {
r := &generateReader{ r := &generateReader{
s: s, s: s,
cur: start, cur: int(start),
start: start, start: int(start),
end: end, end: int(end),
step: step, step: int(step),
file: zp.file, file: zp.file,
lex: &l, lex: &l,
@ -188,7 +188,7 @@ func (r *generateReader) ReadByte() (byte, error) {
if errMsg != "" { if errMsg != "" {
return 0, r.parseError(errMsg, si+3+sep) return 0, r.parseError(errMsg, si+3+sep)
} }
if r.start+offset < 0 || r.end+offset > 1<<31-1 { if r.start+offset < 0 || int64(r.end) + int64(offset) > 1<<31-1 {
return 0, r.parseError("bad offset in $GENERATE", si+3+sep) return 0, r.parseError("bad offset in $GENERATE", si+3+sep)
} }
@ -229,19 +229,19 @@ func modToPrintf(s string) (string, int, string) {
return "", 0, "bad base in $GENERATE" return "", 0, "bad base in $GENERATE"
} }
offset, err := strconv.Atoi(offStr) offset, err := strconv.ParseInt(offStr, 10, 64)
if err != nil { if err != nil {
return "", 0, "bad offset in $GENERATE" return "", 0, "bad offset in $GENERATE"
} }
width, err := strconv.Atoi(widthStr) width, err := strconv.ParseInt(widthStr, 10, 64)
if err != nil || width < 0 || width > 255 { if err != nil || width < 0 || width > 255 {
return "", 0, "bad width in $GENERATE" return "", 0, "bad width in $GENERATE"
} }
if width == 0 { if width == 0 {
return "%" + base, offset, "" return "%" + base, int(offset), ""
} }
return "%0" + widthStr + base, offset, "" return "%0" + widthStr + base, int(offset), ""
} }

5
vendor/github.com/miekg/dns/go.mod generated vendored
View File

@ -3,10 +3,9 @@ module github.com/miekg/dns
go 1.12 go 1.12
require ( require (
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 golang.org/x/net v0.0.0-20190923162816-aa69164e4478
golang.org/x/sync v0.0.0-20190423024810-112230192c58 golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe
golang.org/x/text v0.3.2 // indirect golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 // indirect
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18 // indirect
) )

6
vendor/github.com/miekg/dns/go.sum generated vendored
View File

@ -5,6 +5,9 @@ golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 h1:Gv7RPwsi3eZ2Fgewe3CBsu
golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3 h1:dgd4x4kJt7G4k4m93AYLzM8Ni6h2qLTfh9n9vXJT3/0= golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3 h1:dgd4x4kJt7G4k4m93AYLzM8Ni6h2qLTfh9n9vXJT3/0=
golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@ -30,4 +33,7 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 h1:VvQyQJN0tSuecqgcIxMWnnfG5kSmgy9KZR9sW3W5QeA=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -83,7 +83,7 @@ func CompareDomainName(s1, s2 string) (n int) {
return return
} }
// CountLabel counts the the number of labels in the string s. // CountLabel counts the number of labels in the string s.
// s must be a syntactically valid domain name. // s must be a syntactically valid domain name.
func CountLabel(s string) (labels int) { func CountLabel(s string) (labels int) {
if s == "." { if s == "." {

16
vendor/github.com/miekg/dns/msg.go generated vendored
View File

@ -398,17 +398,12 @@ Loop:
return "", lenmsg, ErrLongDomain return "", lenmsg, ErrLongDomain
} }
for _, b := range msg[off : off+c] { for _, b := range msg[off : off+c] {
switch b { if isDomainNameLabelSpecial(b) {
case '.', '(', ')', ';', ' ', '@':
fallthrough
case '"', '\\':
s = append(s, '\\', b) s = append(s, '\\', b)
default: } else if b < ' ' || b > '~' {
if b < ' ' || b > '~' { // unprintable, use \DDD s = append(s, escapeByte(b)...)
s = append(s, escapeByte(b)...) } else {
} else { s = append(s, b)
s = append(s, b)
}
} }
} }
s = append(s, '.') s = append(s, '.')
@ -661,7 +656,6 @@ func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error)
} }
// If offset does not increase anymore, l is a lie // If offset does not increase anymore, l is a lie
if off1 == off { if off1 == off {
l = i
break break
} }
dst = append(dst, r) dst = append(dst, r)

View File

@ -423,86 +423,12 @@ Option:
if off+int(optlen) > len(msg) { if off+int(optlen) > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking opt"} return nil, len(msg), &Error{err: "overflow unpacking opt"}
} }
switch code { e := makeDataOpt(code)
case EDNS0NSID: if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
e := new(EDNS0_NSID) return nil, len(msg), err
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0SUBNET:
e := new(EDNS0_SUBNET)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0COOKIE:
e := new(EDNS0_COOKIE)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0EXPIRE:
e := new(EDNS0_EXPIRE)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0UL:
e := new(EDNS0_UL)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0LLQ:
e := new(EDNS0_LLQ)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0DAU:
e := new(EDNS0_DAU)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0DHU:
e := new(EDNS0_DHU)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0N3U:
e := new(EDNS0_N3U)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0PADDING:
e := new(EDNS0_PADDING)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
default:
e := new(EDNS0_LOCAL)
e.Code = code
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
} }
edns = append(edns, e)
off += int(optlen)
if off < len(msg) { if off < len(msg) {
goto Option goto Option
@ -511,6 +437,35 @@ Option:
return edns, off, nil return edns, off, nil
} }
func makeDataOpt(code uint16) EDNS0 {
switch code {
case EDNS0NSID:
return new(EDNS0_NSID)
case EDNS0SUBNET:
return new(EDNS0_SUBNET)
case EDNS0COOKIE:
return new(EDNS0_COOKIE)
case EDNS0EXPIRE:
return new(EDNS0_EXPIRE)
case EDNS0UL:
return new(EDNS0_UL)
case EDNS0LLQ:
return new(EDNS0_LLQ)
case EDNS0DAU:
return new(EDNS0_DAU)
case EDNS0DHU:
return new(EDNS0_DHU)
case EDNS0N3U:
return new(EDNS0_N3U)
case EDNS0PADDING:
return new(EDNS0_PADDING)
default:
e := new(EDNS0_LOCAL)
e.Code = code
return e
}
}
func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) { func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
for _, el := range options { for _, el := range options {
b, err := el.pack() b, err := el.pack()
@ -521,9 +476,7 @@ func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length
off += 4 off += 4
if off+len(b) > len(msg) { if off+len(b) > len(msg) {
copy(msg[off:], b) return len(msg), &Error{err: "overflow packing opt"}
off = len(msg)
continue
} }
// Actual data // Actual data
copy(msg[off:off+len(b)], b) copy(msg[off:off+len(b)], b)
@ -688,3 +641,126 @@ func packDataDomainNames(names []string, msg []byte, off int, compression compre
} }
return off, nil return off, nil
} }
func packDataApl(data []APLPrefix, msg []byte, off int) (int, error) {
var err error
for i := range data {
off, err = packDataAplPrefix(&data[i], msg, off)
if err != nil {
return len(msg), err
}
}
return off, nil
}
func packDataAplPrefix(p *APLPrefix, msg []byte, off int) (int, error) {
if len(p.Network.IP) != len(p.Network.Mask) {
return len(msg), &Error{err: "address and mask lengths don't match"}
}
var err error
prefix, _ := p.Network.Mask.Size()
addr := p.Network.IP.Mask(p.Network.Mask)[:(prefix+7)/8]
switch len(p.Network.IP) {
case net.IPv4len:
off, err = packUint16(1, msg, off)
case net.IPv6len:
off, err = packUint16(2, msg, off)
default:
err = &Error{err: "unrecognized address family"}
}
if err != nil {
return len(msg), err
}
off, err = packUint8(uint8(prefix), msg, off)
if err != nil {
return len(msg), err
}
var n uint8
if p.Negation {
n = 0x80
}
adflen := uint8(len(addr)) & 0x7f
off, err = packUint8(n|adflen, msg, off)
if err != nil {
return len(msg), err
}
if off+len(addr) > len(msg) {
return len(msg), &Error{err: "overflow packing APL prefix"}
}
off += copy(msg[off:], addr)
return off, nil
}
func unpackDataApl(msg []byte, off int) ([]APLPrefix, int, error) {
var result []APLPrefix
for off < len(msg) {
prefix, end, err := unpackDataAplPrefix(msg, off)
if err != nil {
return nil, len(msg), err
}
off = end
result = append(result, prefix)
}
return result, off, nil
}
func unpackDataAplPrefix(msg []byte, off int) (APLPrefix, int, error) {
family, off, err := unpackUint16(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
prefix, off, err := unpackUint8(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
nlen, off, err := unpackUint8(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
var ip []byte
switch family {
case 1:
ip = make([]byte, net.IPv4len)
case 2:
ip = make([]byte, net.IPv6len)
default:
return APLPrefix{}, len(msg), &Error{err: "unrecognized APL address family"}
}
if int(prefix) > 8*len(ip) {
return APLPrefix{}, len(msg), &Error{err: "APL prefix too long"}
}
afdlen := int(nlen & 0x7f)
if afdlen > len(ip) {
return APLPrefix{}, len(msg), &Error{err: "APL length too long"}
}
if off+afdlen > len(msg) {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL address"}
}
off += copy(ip, msg[off:off+afdlen])
if afdlen > 0 {
last := ip[afdlen-1]
if last == 0 {
return APLPrefix{}, len(msg), &Error{err: "extra APL address bits"}
}
}
ipnet := net.IPNet{
IP: ip,
Mask: net.CIDRMask(int(prefix), 8*len(ip)),
}
network := ipnet.IP.Mask(ipnet.Mask)
if !network.Equal(ipnet.IP) {
return APLPrefix{}, len(msg), &Error{err: "invalid APL address length"}
}
return APLPrefix{
Negation: (nlen & 0x80) != 0,
Network: ipnet,
}, off, nil
}

View File

@ -73,7 +73,7 @@ func (dns *Msg) Truncate(size int) {
var numExtra int var numExtra int
if l < size { if l < size {
l, numExtra = truncateLoop(dns.Extra, size, l, compression) _, numExtra = truncateLoop(dns.Extra, size, l, compression)
} }
// See the function documentation for when we set this. // See the function documentation for when we set this.

View File

@ -43,7 +43,7 @@ func HashName(label string, ha uint8, iter uint16, salt string) string {
return toBase32(nsec3) return toBase32(nsec3)
} }
// Cover returns true if a name is covered by the NSEC3 record // Cover returns true if a name is covered by the NSEC3 record.
func (rr *NSEC3) Cover(name string) bool { func (rr *NSEC3) Cover(name string) bool {
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
owner := strings.ToUpper(rr.Hdr.Name) owner := strings.ToUpper(rr.Hdr.Name)

View File

@ -13,7 +13,6 @@ type PrivateRdata interface {
// Pack is used when packing a private RR into a buffer. // Pack is used when packing a private RR into a buffer.
Pack([]byte) (int, error) Pack([]byte) (int, error)
// Unpack is used when unpacking a private RR from a buffer. // Unpack is used when unpacking a private RR from a buffer.
// TODO(miek): diff. signature than Pack, see edns0.go for instance.
Unpack([]byte) (int, error) Unpack([]byte) (int, error)
// Copy copies the Rdata into the PrivateRdata argument. // Copy copies the Rdata into the PrivateRdata argument.
Copy(PrivateRdata) error Copy(PrivateRdata) error

89
vendor/github.com/miekg/dns/scan.go generated vendored
View File

@ -87,31 +87,18 @@ type lex struct {
column int // column in the file column int // column in the file
} }
// Token holds the token that are returned when a zone file is parsed.
type Token struct {
// The scanned resource record when error is not nil.
RR
// When an error occurred, this has the error specifics.
Error *ParseError
// A potential comment positioned after the RR and on the same line.
Comment string
}
// ttlState describes the state necessary to fill in an omitted RR TTL // ttlState describes the state necessary to fill in an omitted RR TTL
type ttlState struct { type ttlState struct {
ttl uint32 // ttl is the current default TTL ttl uint32 // ttl is the current default TTL
isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive
} }
// NewRR reads the RR contained in the string s. Only the first RR is // NewRR reads the RR contained in the string s. Only the first RR is returned.
// returned. If s contains no records, NewRR will return nil with no // If s contains no records, NewRR will return nil with no error.
// error.
// //
// The class defaults to IN and TTL defaults to 3600. The full zone // The class defaults to IN and TTL defaults to 3600. The full zone file syntax
// file syntax like $TTL, $ORIGIN, etc. is supported. // like $TTL, $ORIGIN, etc. is supported. All fields of the returned RR are
// // set, except RR.Header().Rdlength which is set to 0.
// All fields of the returned RR are set, except RR.Header().Rdlength
// which is set to 0.
func NewRR(s string) (RR, error) { func NewRR(s string) (RR, error) {
if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline
return ReadRR(strings.NewReader(s+"\n"), "") return ReadRR(strings.NewReader(s+"\n"), "")
@ -133,70 +120,6 @@ func ReadRR(r io.Reader, file string) (RR, error) {
return rr, zp.Err() return rr, zp.Err()
} }
// ParseZone reads a RFC 1035 style zonefile from r. It returns
// Tokens on the returned channel, each consisting of either a
// parsed RR and optional comment or a nil RR and an error. The
// channel is closed by ParseZone when the end of r is reached.
//
// The string file is used in error reporting and to resolve relative
// $INCLUDE directives. The string origin is used as the initial
// origin, as if the file would start with an $ORIGIN directive.
//
// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all
// supported. Note that $GENERATE's range support up to a maximum of
// of 65535 steps.
//
// Basic usage pattern when reading from a string (z) containing the
// zone data:
//
// for x := range dns.ParseZone(strings.NewReader(z), "", "") {
// if x.Error != nil {
// // log.Println(x.Error)
// } else {
// // Do something with x.RR
// }
// }
//
// Comments specified after an RR (and on the same line!) are
// returned too:
//
// foo. IN A 10.0.0.1 ; this is a comment
//
// The text "; this is comment" is returned in Token.Comment.
// Comments inside the RR are returned concatenated along with the
// RR. Comments on a line by themselves are discarded.
//
// To prevent memory leaks it is important to always fully drain the
// returned channel. If an error occurs, it will always be the last
// Token sent on the channel.
//
// Deprecated: New users should prefer the ZoneParser API.
func ParseZone(r io.Reader, origin, file string) chan *Token {
t := make(chan *Token, 10000)
go parseZone(r, origin, file, t)
return t
}
func parseZone(r io.Reader, origin, file string, t chan *Token) {
defer close(t)
zp := NewZoneParser(r, origin, file)
zp.SetIncludeAllowed(true)
for rr, ok := zp.Next(); ok; rr, ok = zp.Next() {
t <- &Token{RR: rr, Comment: zp.Comment()}
}
if err := zp.Err(); err != nil {
pe, ok := err.(*ParseError)
if !ok {
pe = &ParseError{file: file, err: err.Error()}
}
t <- &Token{Error: pe}
}
}
// ZoneParser is a parser for an RFC 1035 style zonefile. // ZoneParser is a parser for an RFC 1035 style zonefile.
// //
// Each parsed RR in the zone is returned sequentially from Next. An // Each parsed RR in the zone is returned sequentially from Next. An
@ -247,7 +170,7 @@ type ZoneParser struct {
includeDepth uint8 includeDepth uint8
includeAllowed bool includeAllowed bool
generateDisallowed bool generateDisallowed bool
} }

View File

@ -1,6 +1,7 @@
package dns package dns
import ( import (
"bytes"
"encoding/base64" "encoding/base64"
"net" "net"
"strconv" "strconv"
@ -10,15 +11,15 @@ import (
// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) // A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces)
// or an error // or an error
func endingToString(c *zlexer, errstr string) (string, *ParseError) { func endingToString(c *zlexer, errstr string) (string, *ParseError) {
var s string var buffer bytes.Buffer
l, _ := c.Next() // zString l, _ := c.Next() // zString
for l.value != zNewline && l.value != zEOF { for l.value != zNewline && l.value != zEOF {
if l.err { if l.err {
return s, &ParseError{"", errstr, l} return buffer.String(), &ParseError{"", errstr, l}
} }
switch l.value { switch l.value {
case zString: case zString:
s += l.token buffer.WriteString(l.token)
case zBlank: // Ok case zBlank: // Ok
default: default:
return "", &ParseError{"", errstr, l} return "", &ParseError{"", errstr, l}
@ -26,7 +27,7 @@ func endingToString(c *zlexer, errstr string) (string, *ParseError) {
l, _ = c.Next() l, _ = c.Next()
} }
return s, nil return buffer.String(), nil
} }
// A remainder of the rdata with embedded spaces, split on unquoted whitespace // A remainder of the rdata with embedded spaces, split on unquoted whitespace
@ -403,7 +404,7 @@ func (rr *SOA) parse(c *zlexer, o string) *ParseError {
if l.err { if l.err {
return &ParseError{"", "bad SOA zone parameter", l} return &ParseError{"", "bad SOA zone parameter", l}
} }
if j, e := strconv.ParseUint(l.token, 10, 32); e != nil { if j, err := strconv.ParseUint(l.token, 10, 32); err != nil {
if i == 0 { if i == 0 {
// Serial must be a number // Serial must be a number
return &ParseError{"", "bad SOA zone parameter", l} return &ParseError{"", "bad SOA zone parameter", l}
@ -446,16 +447,16 @@ func (rr *SRV) parse(c *zlexer, o string) *ParseError {
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() // zString l, _ = c.Next() // zString
i, e = strconv.ParseUint(l.token, 10, 16) i, e1 := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err { if e1 != nil || l.err {
return &ParseError{"", "bad SRV Weight", l} return &ParseError{"", "bad SRV Weight", l}
} }
rr.Weight = uint16(i) rr.Weight = uint16(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() // zString l, _ = c.Next() // zString
i, e = strconv.ParseUint(l.token, 10, 16) i, e2 := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err { if e2 != nil || l.err {
return &ParseError{"", "bad SRV Port", l} return &ParseError{"", "bad SRV Port", l}
} }
rr.Port = uint16(i) rr.Port = uint16(i)
@ -482,8 +483,8 @@ func (rr *NAPTR) parse(c *zlexer, o string) *ParseError {
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() // zString l, _ = c.Next() // zString
i, e = strconv.ParseUint(l.token, 10, 16) i, e1 := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err { if e1 != nil || l.err {
return &ParseError{"", "bad NAPTR Preference", l} return &ParseError{"", "bad NAPTR Preference", l}
} }
rr.Preference = uint16(i) rr.Preference = uint16(i)
@ -581,9 +582,9 @@ func (rr *TALINK) parse(c *zlexer, o string) *ParseError {
func (rr *LOC) parse(c *zlexer, o string) *ParseError { func (rr *LOC) parse(c *zlexer, o string) *ParseError {
// Non zero defaults for LOC record, see RFC 1876, Section 3. // Non zero defaults for LOC record, see RFC 1876, Section 3.
rr.HorizPre = 165 // 10000 rr.Size = 0x12 // 1e2 cm (1m)
rr.VertPre = 162 // 10 rr.HorizPre = 0x16 // 1e6 cm (10000m)
rr.Size = 18 // 1 rr.VertPre = 0x13 // 1e3 cm (10m)
ok := false ok := false
// North // North
@ -600,15 +601,15 @@ func (rr *LOC) parse(c *zlexer, o string) *ParseError {
if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
goto East goto East
} }
i, e = strconv.ParseUint(l.token, 10, 32) if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err {
if e != nil || l.err {
return &ParseError{"", "bad LOC Latitude minutes", l} return &ParseError{"", "bad LOC Latitude minutes", l}
} else {
rr.Latitude += 1000 * 60 * uint32(i)
} }
rr.Latitude += 1000 * 60 * uint32(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { if i, err := strconv.ParseFloat(l.token, 32); err != nil || l.err {
return &ParseError{"", "bad LOC Latitude seconds", l} return &ParseError{"", "bad LOC Latitude seconds", l}
} else { } else {
rr.Latitude += uint32(1000 * i) rr.Latitude += uint32(1000 * i)
@ -626,7 +627,7 @@ East:
// East // East
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err {
return &ParseError{"", "bad LOC Longitude", l} return &ParseError{"", "bad LOC Longitude", l}
} else { } else {
rr.Longitude = 1000 * 60 * 60 * uint32(i) rr.Longitude = 1000 * 60 * 60 * uint32(i)
@ -637,14 +638,14 @@ East:
if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
goto Altitude goto Altitude
} }
if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err {
return &ParseError{"", "bad LOC Longitude minutes", l} return &ParseError{"", "bad LOC Longitude minutes", l}
} else { } else {
rr.Longitude += 1000 * 60 * uint32(i) rr.Longitude += 1000 * 60 * uint32(i)
} }
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { if i, err := strconv.ParseFloat(l.token, 32); err != nil || l.err {
return &ParseError{"", "bad LOC Longitude seconds", l} return &ParseError{"", "bad LOC Longitude seconds", l}
} else { } else {
rr.Longitude += uint32(1000 * i) rr.Longitude += uint32(1000 * i)
@ -667,7 +668,7 @@ Altitude:
if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' {
l.token = l.token[0 : len(l.token)-1] l.token = l.token[0 : len(l.token)-1]
} }
if i, e := strconv.ParseFloat(l.token, 32); e != nil { if i, err := strconv.ParseFloat(l.token, 32); err != nil {
return &ParseError{"", "bad LOC Altitude", l} return &ParseError{"", "bad LOC Altitude", l}
} else { } else {
rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5)
@ -681,23 +682,23 @@ Altitude:
case zString: case zString:
switch count { switch count {
case 0: // Size case 0: // Size
e, m, ok := stringToCm(l.token) exp, m, ok := stringToCm(l.token)
if !ok { if !ok {
return &ParseError{"", "bad LOC Size", l} return &ParseError{"", "bad LOC Size", l}
} }
rr.Size = e&0x0f | m<<4&0xf0 rr.Size = exp&0x0f | m<<4&0xf0
case 1: // HorizPre case 1: // HorizPre
e, m, ok := stringToCm(l.token) exp, m, ok := stringToCm(l.token)
if !ok { if !ok {
return &ParseError{"", "bad LOC HorizPre", l} return &ParseError{"", "bad LOC HorizPre", l}
} }
rr.HorizPre = e&0x0f | m<<4&0xf0 rr.HorizPre = exp&0x0f | m<<4&0xf0
case 2: // VertPre case 2: // VertPre
e, m, ok := stringToCm(l.token) exp, m, ok := stringToCm(l.token)
if !ok { if !ok {
return &ParseError{"", "bad LOC VertPre", l} return &ParseError{"", "bad LOC VertPre", l}
} }
rr.VertPre = e&0x0f | m<<4&0xf0 rr.VertPre = exp&0x0f | m<<4&0xf0
} }
count++ count++
case zBlank: case zBlank:
@ -762,7 +763,7 @@ func (rr *CERT) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next() l, _ := c.Next()
if v, ok := StringToCertType[l.token]; ok { if v, ok := StringToCertType[l.token]; ok {
rr.Type = v rr.Type = v
} else if i, e := strconv.ParseUint(l.token, 10, 16); e != nil { } else if i, err := strconv.ParseUint(l.token, 10, 16); err != nil {
return &ParseError{"", "bad CERT Type", l} return &ParseError{"", "bad CERT Type", l}
} else { } else {
rr.Type = uint16(i) rr.Type = uint16(i)
@ -778,7 +779,7 @@ func (rr *CERT) parse(c *zlexer, o string) *ParseError {
l, _ = c.Next() // zString l, _ = c.Next() // zString
if v, ok := StringToAlgorithm[l.token]; ok { if v, ok := StringToAlgorithm[l.token]; ok {
rr.Algorithm = v rr.Algorithm = v
} else if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { } else if i, err := strconv.ParseUint(l.token, 10, 8); err != nil {
return &ParseError{"", "bad CERT Algorithm", l} return &ParseError{"", "bad CERT Algorithm", l}
} else { } else {
rr.Algorithm = uint8(i) rr.Algorithm = uint8(i)
@ -812,8 +813,8 @@ func (rr *CSYNC) parse(c *zlexer, o string) *ParseError {
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
j, e = strconv.ParseUint(l.token, 10, 16) j, e1 := strconv.ParseUint(l.token, 10, 16)
if e != nil { if e1 != nil {
// Serial must be a number // Serial must be a number
return &ParseError{"", "bad CSYNC flags", l} return &ParseError{"", "bad CSYNC flags", l}
} }
@ -845,9 +846,7 @@ func (rr *CSYNC) parse(c *zlexer, o string) *ParseError {
return nil return nil
} }
func (rr *SIG) parse(c *zlexer, o string) *ParseError { func (rr *SIG) parse(c *zlexer, o string) *ParseError { return rr.RRSIG.parse(c, o) }
return rr.RRSIG.parse(c, o)
}
func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { func (rr *RRSIG) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next() l, _ := c.Next()
@ -868,24 +867,24 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError {
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
i, err := strconv.ParseUint(l.token, 10, 8) i, e := strconv.ParseUint(l.token, 10, 8)
if err != nil || l.err { if e != nil || l.err {
return &ParseError{"", "bad RRSIG Algorithm", l} return &ParseError{"", "bad RRSIG Algorithm", l}
} }
rr.Algorithm = uint8(i) rr.Algorithm = uint8(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
i, err = strconv.ParseUint(l.token, 10, 8) i, e1 := strconv.ParseUint(l.token, 10, 8)
if err != nil || l.err { if e1 != nil || l.err {
return &ParseError{"", "bad RRSIG Labels", l} return &ParseError{"", "bad RRSIG Labels", l}
} }
rr.Labels = uint8(i) rr.Labels = uint8(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
i, err = strconv.ParseUint(l.token, 10, 32) i, e2 := strconv.ParseUint(l.token, 10, 32)
if err != nil || l.err { if e2 != nil || l.err {
return &ParseError{"", "bad RRSIG OrigTtl", l} return &ParseError{"", "bad RRSIG OrigTtl", l}
} }
rr.OrigTtl = uint32(i) rr.OrigTtl = uint32(i)
@ -918,8 +917,8 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError {
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
i, err = strconv.ParseUint(l.token, 10, 16) i, e3 := strconv.ParseUint(l.token, 10, 16)
if err != nil || l.err { if e3 != nil || l.err {
return &ParseError{"", "bad RRSIG KeyTag", l} return &ParseError{"", "bad RRSIG KeyTag", l}
} }
rr.KeyTag = uint16(i) rr.KeyTag = uint16(i)
@ -933,9 +932,9 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError {
} }
rr.SignerName = name rr.SignerName = name
s, e := endingToString(c, "bad RRSIG Signature") s, e4 := endingToString(c, "bad RRSIG Signature")
if e != nil { if e4 != nil {
return e return e4
} }
rr.Signature = s rr.Signature = s
@ -985,15 +984,15 @@ func (rr *NSEC3) parse(c *zlexer, o string) *ParseError {
rr.Hash = uint8(i) rr.Hash = uint8(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
i, e = strconv.ParseUint(l.token, 10, 8) i, e1 := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err { if e1 != nil || l.err {
return &ParseError{"", "bad NSEC3 Flags", l} return &ParseError{"", "bad NSEC3 Flags", l}
} }
rr.Flags = uint8(i) rr.Flags = uint8(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
i, e = strconv.ParseUint(l.token, 10, 16) i, e2 := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err { if e2 != nil || l.err {
return &ParseError{"", "bad NSEC3 Iterations", l} return &ParseError{"", "bad NSEC3 Iterations", l}
} }
rr.Iterations = uint16(i) rr.Iterations = uint16(i)
@ -1050,22 +1049,22 @@ func (rr *NSEC3PARAM) parse(c *zlexer, o string) *ParseError {
rr.Hash = uint8(i) rr.Hash = uint8(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
i, e = strconv.ParseUint(l.token, 10, 8) i, e1 := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err { if e1 != nil || l.err {
return &ParseError{"", "bad NSEC3PARAM Flags", l} return &ParseError{"", "bad NSEC3PARAM Flags", l}
} }
rr.Flags = uint8(i) rr.Flags = uint8(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
i, e = strconv.ParseUint(l.token, 10, 16) i, e2 := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err { if e2 != nil || l.err {
return &ParseError{"", "bad NSEC3PARAM Iterations", l} return &ParseError{"", "bad NSEC3PARAM Iterations", l}
} }
rr.Iterations = uint16(i) rr.Iterations = uint16(i)
c.Next() c.Next()
l, _ = c.Next() l, _ = c.Next()
if l.token != "-" { if l.token != "-" {
rr.SaltLength = uint8(len(l.token)) rr.SaltLength = uint8(len(l.token) / 2)
rr.Salt = l.token rr.Salt = l.token
} }
return slurpRemainder(c) return slurpRemainder(c)
@ -1132,15 +1131,15 @@ func (rr *SSHFP) parse(c *zlexer, o string) *ParseError {
rr.Algorithm = uint8(i) rr.Algorithm = uint8(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
i, e = strconv.ParseUint(l.token, 10, 8) i, e1 := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err { if e1 != nil || l.err {
return &ParseError{"", "bad SSHFP Type", l} return &ParseError{"", "bad SSHFP Type", l}
} }
rr.Type = uint8(i) rr.Type = uint8(i)
c.Next() // zBlank c.Next() // zBlank
s, e1 := endingToString(c, "bad SSHFP Fingerprint") s, e2 := endingToString(c, "bad SSHFP Fingerprint")
if e1 != nil { if e2 != nil {
return e1 return e2
} }
rr.FingerPrint = s rr.FingerPrint = s
return nil return nil
@ -1155,37 +1154,32 @@ func (rr *DNSKEY) parseDNSKEY(c *zlexer, o, typ string) *ParseError {
rr.Flags = uint16(i) rr.Flags = uint16(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() // zString l, _ = c.Next() // zString
i, e = strconv.ParseUint(l.token, 10, 8) i, e1 := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err { if e1 != nil || l.err {
return &ParseError{"", "bad " + typ + " Protocol", l} return &ParseError{"", "bad " + typ + " Protocol", l}
} }
rr.Protocol = uint8(i) rr.Protocol = uint8(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() // zString l, _ = c.Next() // zString
i, e = strconv.ParseUint(l.token, 10, 8) i, e2 := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err { if e2 != nil || l.err {
return &ParseError{"", "bad " + typ + " Algorithm", l} return &ParseError{"", "bad " + typ + " Algorithm", l}
} }
rr.Algorithm = uint8(i) rr.Algorithm = uint8(i)
s, e1 := endingToString(c, "bad "+typ+" PublicKey") s, e3 := endingToString(c, "bad "+typ+" PublicKey")
if e1 != nil { if e3 != nil {
return e1 return e3
} }
rr.PublicKey = s rr.PublicKey = s
return nil return nil
} }
func (rr *DNSKEY) parse(c *zlexer, o string) *ParseError { func (rr *DNSKEY) parse(c *zlexer, o string) *ParseError { return rr.parseDNSKEY(c, o, "DNSKEY") }
return rr.parseDNSKEY(c, o, "DNSKEY") func (rr *KEY) parse(c *zlexer, o string) *ParseError { return rr.parseDNSKEY(c, o, "KEY") }
} func (rr *CDNSKEY) parse(c *zlexer, o string) *ParseError { return rr.parseDNSKEY(c, o, "CDNSKEY") }
func (rr *DS) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "DS") }
func (rr *KEY) parse(c *zlexer, o string) *ParseError { func (rr *DLV) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "DLV") }
return rr.parseDNSKEY(c, o, "KEY") func (rr *CDS) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "CDS") }
}
func (rr *CDNSKEY) parse(c *zlexer, o string) *ParseError {
return rr.parseDNSKEY(c, o, "CDNSKEY")
}
func (rr *RKEY) parse(c *zlexer, o string) *ParseError { func (rr *RKEY) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next() l, _ := c.Next()
@ -1196,21 +1190,21 @@ func (rr *RKEY) parse(c *zlexer, o string) *ParseError {
rr.Flags = uint16(i) rr.Flags = uint16(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() // zString l, _ = c.Next() // zString
i, e = strconv.ParseUint(l.token, 10, 8) i, e1 := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err { if e1 != nil || l.err {
return &ParseError{"", "bad RKEY Protocol", l} return &ParseError{"", "bad RKEY Protocol", l}
} }
rr.Protocol = uint8(i) rr.Protocol = uint8(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() // zString l, _ = c.Next() // zString
i, e = strconv.ParseUint(l.token, 10, 8) i, e2 := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err { if e2 != nil || l.err {
return &ParseError{"", "bad RKEY Algorithm", l} return &ParseError{"", "bad RKEY Algorithm", l}
} }
rr.Algorithm = uint8(i) rr.Algorithm = uint8(i)
s, e1 := endingToString(c, "bad RKEY PublicKey") s, e3 := endingToString(c, "bad RKEY PublicKey")
if e1 != nil { if e3 != nil {
return e1 return e3
} }
rr.PublicKey = s rr.PublicKey = s
return nil return nil
@ -1243,15 +1237,15 @@ func (rr *GPOS) parse(c *zlexer, o string) *ParseError {
rr.Longitude = l.token rr.Longitude = l.token
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
_, e = strconv.ParseFloat(l.token, 64) _, e1 := strconv.ParseFloat(l.token, 64)
if e != nil || l.err { if e1 != nil || l.err {
return &ParseError{"", "bad GPOS Latitude", l} return &ParseError{"", "bad GPOS Latitude", l}
} }
rr.Latitude = l.token rr.Latitude = l.token
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
_, e = strconv.ParseFloat(l.token, 64) _, e2 := strconv.ParseFloat(l.token, 64)
if e != nil || l.err { if e2 != nil || l.err {
return &ParseError{"", "bad GPOS Altitude", l} return &ParseError{"", "bad GPOS Altitude", l}
} }
rr.Altitude = l.token rr.Altitude = l.token
@ -1267,7 +1261,7 @@ func (rr *DS) parseDS(c *zlexer, o, typ string) *ParseError {
rr.KeyTag = uint16(i) rr.KeyTag = uint16(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
if i, e = strconv.ParseUint(l.token, 10, 8); e != nil { if i, err := strconv.ParseUint(l.token, 10, 8); err != nil {
tokenUpper := strings.ToUpper(l.token) tokenUpper := strings.ToUpper(l.token)
i, ok := StringToAlgorithm[tokenUpper] i, ok := StringToAlgorithm[tokenUpper]
if !ok || l.err { if !ok || l.err {
@ -1279,31 +1273,19 @@ func (rr *DS) parseDS(c *zlexer, o, typ string) *ParseError {
} }
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
i, e = strconv.ParseUint(l.token, 10, 8) i, e1 := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err { if e1 != nil || l.err {
return &ParseError{"", "bad " + typ + " DigestType", l} return &ParseError{"", "bad " + typ + " DigestType", l}
} }
rr.DigestType = uint8(i) rr.DigestType = uint8(i)
s, e1 := endingToString(c, "bad "+typ+" Digest") s, e2 := endingToString(c, "bad "+typ+" Digest")
if e1 != nil { if e2 != nil {
return e1 return e2
} }
rr.Digest = s rr.Digest = s
return nil return nil
} }
func (rr *DS) parse(c *zlexer, o string) *ParseError {
return rr.parseDS(c, o, "DS")
}
func (rr *DLV) parse(c *zlexer, o string) *ParseError {
return rr.parseDS(c, o, "DLV")
}
func (rr *CDS) parse(c *zlexer, o string) *ParseError {
return rr.parseDS(c, o, "CDS")
}
func (rr *TA) parse(c *zlexer, o string) *ParseError { func (rr *TA) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next() l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16) i, e := strconv.ParseUint(l.token, 10, 16)
@ -1313,7 +1295,7 @@ func (rr *TA) parse(c *zlexer, o string) *ParseError {
rr.KeyTag = uint16(i) rr.KeyTag = uint16(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { if i, err := strconv.ParseUint(l.token, 10, 8); err != nil {
tokenUpper := strings.ToUpper(l.token) tokenUpper := strings.ToUpper(l.token)
i, ok := StringToAlgorithm[tokenUpper] i, ok := StringToAlgorithm[tokenUpper]
if !ok || l.err { if !ok || l.err {
@ -1325,14 +1307,14 @@ func (rr *TA) parse(c *zlexer, o string) *ParseError {
} }
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
i, e = strconv.ParseUint(l.token, 10, 8) i, e1 := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err { if e1 != nil || l.err {
return &ParseError{"", "bad TA DigestType", l} return &ParseError{"", "bad TA DigestType", l}
} }
rr.DigestType = uint8(i) rr.DigestType = uint8(i)
s, err := endingToString(c, "bad TA Digest") s, e2 := endingToString(c, "bad TA Digest")
if err != nil { if e2 != nil {
return err return e2
} }
rr.Digest = s rr.Digest = s
return nil return nil
@ -1347,22 +1329,22 @@ func (rr *TLSA) parse(c *zlexer, o string) *ParseError {
rr.Usage = uint8(i) rr.Usage = uint8(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
i, e = strconv.ParseUint(l.token, 10, 8) i, e1 := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err { if e1 != nil || l.err {
return &ParseError{"", "bad TLSA Selector", l} return &ParseError{"", "bad TLSA Selector", l}
} }
rr.Selector = uint8(i) rr.Selector = uint8(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
i, e = strconv.ParseUint(l.token, 10, 8) i, e2 := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err { if e2 != nil || l.err {
return &ParseError{"", "bad TLSA MatchingType", l} return &ParseError{"", "bad TLSA MatchingType", l}
} }
rr.MatchingType = uint8(i) rr.MatchingType = uint8(i)
// So this needs be e2 (i.e. different than e), because...??t // So this needs be e2 (i.e. different than e), because...??t
s, e2 := endingToString(c, "bad TLSA Certificate") s, e3 := endingToString(c, "bad TLSA Certificate")
if e2 != nil { if e3 != nil {
return e2 return e3
} }
rr.Certificate = s rr.Certificate = s
return nil return nil
@ -1377,22 +1359,22 @@ func (rr *SMIMEA) parse(c *zlexer, o string) *ParseError {
rr.Usage = uint8(i) rr.Usage = uint8(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
i, e = strconv.ParseUint(l.token, 10, 8) i, e1 := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err { if e1 != nil || l.err {
return &ParseError{"", "bad SMIMEA Selector", l} return &ParseError{"", "bad SMIMEA Selector", l}
} }
rr.Selector = uint8(i) rr.Selector = uint8(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
i, e = strconv.ParseUint(l.token, 10, 8) i, e2 := strconv.ParseUint(l.token, 10, 8)
if e != nil || l.err { if e2 != nil || l.err {
return &ParseError{"", "bad SMIMEA MatchingType", l} return &ParseError{"", "bad SMIMEA MatchingType", l}
} }
rr.MatchingType = uint8(i) rr.MatchingType = uint8(i)
// So this needs be e2 (i.e. different than e), because...??t // So this needs be e2 (i.e. different than e), because...??t
s, e2 := endingToString(c, "bad SMIMEA Certificate") s, e3 := endingToString(c, "bad SMIMEA Certificate")
if e2 != nil { if e3 != nil {
return e2 return e3
} }
rr.Certificate = s rr.Certificate = s
return nil return nil
@ -1469,16 +1451,16 @@ func (rr *URI) parse(c *zlexer, o string) *ParseError {
rr.Priority = uint16(i) rr.Priority = uint16(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() l, _ = c.Next()
i, e = strconv.ParseUint(l.token, 10, 16) i, e1 := strconv.ParseUint(l.token, 10, 16)
if e != nil || l.err { if e1 != nil || l.err {
return &ParseError{"", "bad URI Weight", l} return &ParseError{"", "bad URI Weight", l}
} }
rr.Weight = uint16(i) rr.Weight = uint16(i)
c.Next() // zBlank c.Next() // zBlank
s, err := endingToTxtSlice(c, "bad URI Target") s, e2 := endingToTxtSlice(c, "bad URI Target")
if err != nil { if e2 != nil {
return err return e2
} }
if len(s) != 1 { if len(s) != 1 {
return &ParseError{"", "bad URI Target", l} return &ParseError{"", "bad URI Target", l}
@ -1506,9 +1488,9 @@ func (rr *NID) parse(c *zlexer, o string) *ParseError {
rr.Preference = uint16(i) rr.Preference = uint16(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() // zString l, _ = c.Next() // zString
u, err := stringToNodeID(l) u, e1 := stringToNodeID(l)
if err != nil || l.err { if e1 != nil || l.err {
return err return e1
} }
rr.NodeID = u rr.NodeID = u
return slurpRemainder(c) return slurpRemainder(c)
@ -1546,7 +1528,6 @@ func (rr *LP) parse(c *zlexer, o string) *ParseError {
return &ParseError{"", "bad LP Fqdn", l} return &ParseError{"", "bad LP Fqdn", l}
} }
rr.Fqdn = name rr.Fqdn = name
return slurpRemainder(c) return slurpRemainder(c)
} }
@ -1559,9 +1540,9 @@ func (rr *L64) parse(c *zlexer, o string) *ParseError {
rr.Preference = uint16(i) rr.Preference = uint16(i)
c.Next() // zBlank c.Next() // zBlank
l, _ = c.Next() // zString l, _ = c.Next() // zString
u, err := stringToNodeID(l) u, e1 := stringToNodeID(l)
if err != nil || l.err { if e1 != nil || l.err {
return err return e1
} }
rr.Locator64 = u rr.Locator64 = u
return slurpRemainder(c) return slurpRemainder(c)
@ -1624,14 +1605,13 @@ func (rr *PX) parse(c *zlexer, o string) *ParseError {
return &ParseError{"", "bad PX Mapx400", l} return &ParseError{"", "bad PX Mapx400", l}
} }
rr.Mapx400 = mapx400 rr.Mapx400 = mapx400
return slurpRemainder(c) return slurpRemainder(c)
} }
func (rr *CAA) parse(c *zlexer, o string) *ParseError { func (rr *CAA) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next() l, _ := c.Next()
i, err := strconv.ParseUint(l.token, 10, 8) i, e := strconv.ParseUint(l.token, 10, 8)
if err != nil || l.err { if e != nil || l.err {
return &ParseError{"", "bad CAA Flag", l} return &ParseError{"", "bad CAA Flag", l}
} }
rr.Flag = uint8(i) rr.Flag = uint8(i)
@ -1644,9 +1624,9 @@ func (rr *CAA) parse(c *zlexer, o string) *ParseError {
rr.Tag = l.token rr.Tag = l.token
c.Next() // zBlank c.Next() // zBlank
s, e := endingToTxtSlice(c, "bad CAA Value") s, e1 := endingToTxtSlice(c, "bad CAA Value")
if e != nil { if e1 != nil {
return e return e1
} }
if len(s) != 1 { if len(s) != 1 {
return &ParseError{"", "bad CAA Value", l} return &ParseError{"", "bad CAA Value", l}
@ -1667,8 +1647,8 @@ func (rr *TKEY) parse(c *zlexer, o string) *ParseError {
// Get the key length and key values // Get the key length and key values
l, _ = c.Next() l, _ = c.Next()
i, err := strconv.ParseUint(l.token, 10, 8) i, e := strconv.ParseUint(l.token, 10, 8)
if err != nil || l.err { if e != nil || l.err {
return &ParseError{"", "bad TKEY key length", l} return &ParseError{"", "bad TKEY key length", l}
} }
rr.KeySize = uint16(i) rr.KeySize = uint16(i)
@ -1682,8 +1662,8 @@ func (rr *TKEY) parse(c *zlexer, o string) *ParseError {
// Get the otherdata length and string data // Get the otherdata length and string data
l, _ = c.Next() l, _ = c.Next()
i, err = strconv.ParseUint(l.token, 10, 8) i, e1 := strconv.ParseUint(l.token, 10, 8)
if err != nil || l.err { if e1 != nil || l.err {
return &ParseError{"", "bad TKEY otherdata length", l} return &ParseError{"", "bad TKEY otherdata length", l}
} }
rr.OtherLen = uint16(i) rr.OtherLen = uint16(i)
@ -1693,6 +1673,71 @@ func (rr *TKEY) parse(c *zlexer, o string) *ParseError {
return &ParseError{"", "bad TKEY otherday", l} return &ParseError{"", "bad TKEY otherday", l}
} }
rr.OtherData = l.token rr.OtherData = l.token
return nil
}
func (rr *APL) parse(c *zlexer, o string) *ParseError {
var prefixes []APLPrefix
for {
l, _ := c.Next()
if l.value == zNewline || l.value == zEOF {
break
}
if l.value == zBlank && prefixes != nil {
continue
}
if l.value != zString {
return &ParseError{"", "unexpected APL field", l}
}
// Expected format: [!]afi:address/prefix
colon := strings.IndexByte(l.token, ':')
if colon == -1 {
return &ParseError{"", "missing colon in APL field", l}
}
family, cidr := l.token[:colon], l.token[colon+1:]
var negation bool
if family != "" && family[0] == '!' {
negation = true
family = family[1:]
}
afi, e := strconv.ParseUint(family, 10, 16)
if e != nil {
return &ParseError{"", "failed to parse APL family: " + e.Error(), l}
}
var addrLen int
switch afi {
case 1:
addrLen = net.IPv4len
case 2:
addrLen = net.IPv6len
default:
return &ParseError{"", "unrecognized APL family", l}
}
ip, subnet, e1 := net.ParseCIDR(cidr)
if e1 != nil {
return &ParseError{"", "failed to parse APL address: " + e1.Error(), l}
}
if !ip.Equal(subnet.IP) {
return &ParseError{"", "extra bits in APL address", l}
}
if len(subnet.IP) != addrLen {
return &ParseError{"", "address mismatch with the APL family", l}
}
prefixes = append(prefixes, APLPrefix{
Negation: negation,
Network: *subnet,
})
}
rr.Prefixes = prefixes
return nil return nil
} }

View File

@ -1,7 +1,6 @@
package dns package dns
import ( import (
"strings"
"sync" "sync"
) )
@ -36,7 +35,7 @@ func (mux *ServeMux) match(q string, t uint16) Handler {
return nil return nil
} }
q = strings.ToLower(q) q = CanonicalName(q)
var handler Handler var handler Handler
for off, end := 0, false; !end; off, end = NextLabel(q, off) { for off, end := 0, false; !end; off, end = NextLabel(q, off) {
@ -66,7 +65,7 @@ func (mux *ServeMux) Handle(pattern string, handler Handler) {
if mux.z == nil { if mux.z == nil {
mux.z = make(map[string]Handler) mux.z = make(map[string]Handler)
} }
mux.z[Fqdn(pattern)] = handler mux.z[CanonicalName(pattern)] = handler
mux.m.Unlock() mux.m.Unlock()
} }
@ -81,7 +80,7 @@ func (mux *ServeMux) HandleRemove(pattern string) {
panic("dns: invalid pattern " + pattern) panic("dns: invalid pattern " + pattern)
} }
mux.m.Lock() mux.m.Lock()
delete(mux.z, Fqdn(pattern)) delete(mux.z, CanonicalName(pattern))
mux.m.Unlock() mux.m.Unlock()
} }

82
vendor/github.com/miekg/dns/tsig.go generated vendored
View File

@ -18,7 +18,9 @@ import (
const ( const (
HmacMD5 = "hmac-md5.sig-alg.reg.int." HmacMD5 = "hmac-md5.sig-alg.reg.int."
HmacSHA1 = "hmac-sha1." HmacSHA1 = "hmac-sha1."
HmacSHA224 = "hmac-sha224."
HmacSHA256 = "hmac-sha256." HmacSHA256 = "hmac-sha256."
HmacSHA384 = "hmac-sha384."
HmacSHA512 = "hmac-sha512." HmacSHA512 = "hmac-sha512."
) )
@ -111,32 +113,35 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }
buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly) buf, err := tsigBuffer(mbuf, rr, requestMAC, timersOnly)
if err != nil {
return nil, "", err
}
t := new(TSIG) t := new(TSIG)
var h hash.Hash var h hash.Hash
switch strings.ToLower(rr.Algorithm) { switch CanonicalName(rr.Algorithm) {
case HmacMD5: case HmacMD5:
h = hmac.New(md5.New, rawsecret) h = hmac.New(md5.New, rawsecret)
case HmacSHA1: case HmacSHA1:
h = hmac.New(sha1.New, rawsecret) h = hmac.New(sha1.New, rawsecret)
case HmacSHA224:
h = hmac.New(sha256.New224, rawsecret)
case HmacSHA256: case HmacSHA256:
h = hmac.New(sha256.New, rawsecret) h = hmac.New(sha256.New, rawsecret)
case HmacSHA384:
h = hmac.New(sha512.New384, rawsecret)
case HmacSHA512: case HmacSHA512:
h = hmac.New(sha512.New, rawsecret) h = hmac.New(sha512.New, rawsecret)
default: default:
return nil, "", ErrKeyAlg return nil, "", ErrKeyAlg
} }
h.Write(buf) h.Write(buf)
// Copy all TSIG fields except MAC and its size, which are filled using the computed digest.
*t = *rr
t.MAC = hex.EncodeToString(h.Sum(nil)) t.MAC = hex.EncodeToString(h.Sum(nil))
t.MACSize = uint16(len(t.MAC) / 2) // Size is half! t.MACSize = uint16(len(t.MAC) / 2) // Size is half!
t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0}
t.Fudge = rr.Fudge
t.TimeSigned = rr.TimeSigned
t.Algorithm = rr.Algorithm
t.OrigId = m.Id
tbuf := make([]byte, Len(t)) tbuf := make([]byte, Len(t))
off, err := PackRR(t, tbuf, 0, nil, false) off, err := PackRR(t, tbuf, 0, nil, false)
if err != nil { if err != nil {
@ -153,6 +158,11 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s
// If the signature does not validate err contains the // If the signature does not validate err contains the
// error, otherwise it is nil. // error, otherwise it is nil.
func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error {
return tsigVerify(msg, secret, requestMAC, timersOnly, uint64(time.Now().Unix()))
}
// actual implementation of TsigVerify, taking the current time ('now') as a parameter for the convenience of tests.
func tsigVerify(msg []byte, secret, requestMAC string, timersOnly bool, now uint64) error {
rawsecret, err := fromBase64([]byte(secret)) rawsecret, err := fromBase64([]byte(secret))
if err != nil { if err != nil {
return err return err
@ -168,27 +178,23 @@ func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error {
return err return err
} }
buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly) buf, err := tsigBuffer(stripped, tsig, requestMAC, timersOnly)
if err != nil {
// Fudge factor works both ways. A message can arrive before it was signed because return err
// of clock skew.
now := uint64(time.Now().Unix())
ti := now - tsig.TimeSigned
if now < tsig.TimeSigned {
ti = tsig.TimeSigned - now
}
if uint64(tsig.Fudge) < ti {
return ErrTime
} }
var h hash.Hash var h hash.Hash
switch strings.ToLower(tsig.Algorithm) { switch CanonicalName(tsig.Algorithm) {
case HmacMD5: case HmacMD5:
h = hmac.New(md5.New, rawsecret) h = hmac.New(md5.New, rawsecret)
case HmacSHA1: case HmacSHA1:
h = hmac.New(sha1.New, rawsecret) h = hmac.New(sha1.New, rawsecret)
case HmacSHA224:
h = hmac.New(sha256.New224, rawsecret)
case HmacSHA256: case HmacSHA256:
h = hmac.New(sha256.New, rawsecret) h = hmac.New(sha256.New, rawsecret)
case HmacSHA384:
h = hmac.New(sha512.New384, rawsecret)
case HmacSHA512: case HmacSHA512:
h = hmac.New(sha512.New, rawsecret) h = hmac.New(sha512.New, rawsecret)
default: default:
@ -198,11 +204,24 @@ func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error {
if !hmac.Equal(h.Sum(nil), msgMAC) { if !hmac.Equal(h.Sum(nil), msgMAC) {
return ErrSig return ErrSig
} }
// Fudge factor works both ways. A message can arrive before it was signed because
// of clock skew.
// We check this after verifying the signature, following draft-ietf-dnsop-rfc2845bis
// instead of RFC2845, in order to prevent a security vulnerability as reported in CVE-2017-3142/3143.
ti := now - tsig.TimeSigned
if now < tsig.TimeSigned {
ti = tsig.TimeSigned - now
}
if uint64(tsig.Fudge) < ti {
return ErrTime
}
return nil return nil
} }
// Create a wiredata buffer for the MAC calculation. // Create a wiredata buffer for the MAC calculation.
func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte { func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) ([]byte, error) {
var buf []byte var buf []byte
if rr.TimeSigned == 0 { if rr.TimeSigned == 0 {
rr.TimeSigned = uint64(time.Now().Unix()) rr.TimeSigned = uint64(time.Now().Unix())
@ -219,7 +238,10 @@ func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []b
m.MACSize = uint16(len(requestMAC) / 2) m.MACSize = uint16(len(requestMAC) / 2)
m.MAC = requestMAC m.MAC = requestMAC
buf = make([]byte, len(requestMAC)) // long enough buf = make([]byte, len(requestMAC)) // long enough
n, _ := packMacWire(m, buf) n, err := packMacWire(m, buf)
if err != nil {
return nil, err
}
buf = buf[:n] buf = buf[:n]
} }
@ -228,20 +250,26 @@ func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []b
tsig := new(timerWireFmt) tsig := new(timerWireFmt)
tsig.TimeSigned = rr.TimeSigned tsig.TimeSigned = rr.TimeSigned
tsig.Fudge = rr.Fudge tsig.Fudge = rr.Fudge
n, _ := packTimerWire(tsig, tsigvar) n, err := packTimerWire(tsig, tsigvar)
if err != nil {
return nil, err
}
tsigvar = tsigvar[:n] tsigvar = tsigvar[:n]
} else { } else {
tsig := new(tsigWireFmt) tsig := new(tsigWireFmt)
tsig.Name = strings.ToLower(rr.Hdr.Name) tsig.Name = CanonicalName(rr.Hdr.Name)
tsig.Class = ClassANY tsig.Class = ClassANY
tsig.Ttl = rr.Hdr.Ttl tsig.Ttl = rr.Hdr.Ttl
tsig.Algorithm = strings.ToLower(rr.Algorithm) tsig.Algorithm = CanonicalName(rr.Algorithm)
tsig.TimeSigned = rr.TimeSigned tsig.TimeSigned = rr.TimeSigned
tsig.Fudge = rr.Fudge tsig.Fudge = rr.Fudge
tsig.Error = rr.Error tsig.Error = rr.Error
tsig.OtherLen = rr.OtherLen tsig.OtherLen = rr.OtherLen
tsig.OtherData = rr.OtherData tsig.OtherData = rr.OtherData
n, _ := packTsigWire(tsig, tsigvar) n, err := packTsigWire(tsig, tsigvar)
if err != nil {
return nil, err
}
tsigvar = tsigvar[:n] tsigvar = tsigvar[:n]
} }
@ -251,7 +279,7 @@ func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []b
} else { } else {
buf = append(msgbuf, tsigvar...) buf = append(msgbuf, tsigvar...)
} }
return buf return buf, nil
} }
// Strip the TSIG from the raw message. // Strip the TSIG from the raw message.

177
vendor/github.com/miekg/dns/types.go generated vendored
View File

@ -1,6 +1,7 @@
package dns package dns
import ( import (
"bytes"
"fmt" "fmt"
"net" "net"
"strconv" "strconv"
@ -61,6 +62,7 @@ const (
TypeCERT uint16 = 37 TypeCERT uint16 = 37
TypeDNAME uint16 = 39 TypeDNAME uint16 = 39
TypeOPT uint16 = 41 // EDNS TypeOPT uint16 = 41 // EDNS
TypeAPL uint16 = 42
TypeDS uint16 = 43 TypeDS uint16 = 43
TypeSSHFP uint16 = 44 TypeSSHFP uint16 = 44
TypeRRSIG uint16 = 46 TypeRRSIG uint16 = 46
@ -163,11 +165,11 @@ const (
_RD = 1 << 8 // recursion desired _RD = 1 << 8 // recursion desired
_RA = 1 << 7 // recursion available _RA = 1 << 7 // recursion available
_Z = 1 << 6 // Z _Z = 1 << 6 // Z
_AD = 1 << 5 // authticated data _AD = 1 << 5 // authenticated data
_CD = 1 << 4 // checking disabled _CD = 1 << 4 // checking disabled
) )
// Various constants used in the LOC RR, See RFC 1887. // Various constants used in the LOC RR. See RFC 1887.
const ( const (
LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2.
LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2.
@ -207,8 +209,11 @@ var CertTypeToString = map[uint16]string{
//go:generate go run types_generate.go //go:generate go run types_generate.go
// Question holds a DNS question. There can be multiple questions in the // Question holds a DNS question. Usually there is just one. While the
// question section of a message. Usually there is just one. // original DNS RFCs allow multiple questions in the question section of a
// message, in practice it never works. Because most DNS servers see multiple
// questions as an error, it is recommended to only have one question per
// message.
type Question struct { type Question struct {
Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed) Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed)
Qtype uint16 Qtype uint16
@ -229,7 +234,7 @@ func (q *Question) String() (s string) {
return s return s
} }
// ANY is a wildcard record. See RFC 1035, Section 3.2.3. ANY // ANY is a wild card record. See RFC 1035, Section 3.2.3. ANY
// is named "*" there. // is named "*" there.
type ANY struct { type ANY struct {
Hdr RR_Header Hdr RR_Header
@ -440,45 +445,38 @@ func sprintName(s string) string {
var dst strings.Builder var dst strings.Builder
for i := 0; i < len(s); { for i := 0; i < len(s); {
if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' { if s[i] == '.' {
if dst.Len() != 0 { if dst.Len() != 0 {
dst.WriteString(s[i : i+2]) dst.WriteByte('.')
} }
i += 2 i++
continue continue
} }
b, n := nextByte(s, i) b, n := nextByte(s, i)
if n == 0 { if n == 0 {
i++ // Drop "dangling" incomplete escapes.
continue if dst.Len() == 0 {
} return s[:i]
if b == '.' {
if dst.Len() != 0 {
dst.WriteByte('.')
} }
i += n break
continue
} }
switch b { if isDomainNameLabelSpecial(b) {
case ' ', '\'', '@', ';', '(', ')', '"', '\\': // additional chars to escape
if dst.Len() == 0 { if dst.Len() == 0 {
dst.Grow(len(s) * 2) dst.Grow(len(s) * 2)
dst.WriteString(s[:i]) dst.WriteString(s[:i])
} }
dst.WriteByte('\\') dst.WriteByte('\\')
dst.WriteByte(b) dst.WriteByte(b)
default: } else if b < ' ' || b > '~' { // unprintable, use \DDD
if ' ' <= b && b <= '~' { if dst.Len() == 0 {
if dst.Len() != 0 { dst.Grow(len(s) * 2)
dst.WriteByte(b) dst.WriteString(s[:i])
} }
} else { dst.WriteString(escapeByte(b))
if dst.Len() == 0 { } else {
dst.Grow(len(s) * 2) if dst.Len() != 0 {
dst.WriteString(s[:i]) dst.WriteByte(b)
}
dst.WriteString(escapeByte(b))
} }
} }
i += n i += n
@ -501,15 +499,10 @@ func sprintTxtOctet(s string) string {
} }
b, n := nextByte(s, i) b, n := nextByte(s, i)
switch { if n == 0 {
case n == 0:
i++ // dangling back slash i++ // dangling back slash
case b == '.': } else {
dst.WriteByte('.') writeTXTStringByte(&dst, b)
case b < ' ' || b > '~':
dst.WriteString(escapeByte(b))
default:
dst.WriteByte(b)
} }
i += n i += n
} }
@ -585,6 +578,17 @@ func escapeByte(b byte) string {
return escapedByteLarge[int(b)*4 : int(b)*4+4] return escapedByteLarge[int(b)*4 : int(b)*4+4]
} }
// isDomainNameLabelSpecial returns true if
// a domain name label byte should be prefixed
// with an escaping backslash.
func isDomainNameLabelSpecial(b byte) bool {
switch b {
case '.', ' ', '\'', '@', ';', '(', ')', '"', '\\':
return true
}
return false
}
func nextByte(s string, offset int) (byte, int) { func nextByte(s string, offset int) (byte, int) {
if offset >= len(s) { if offset >= len(s) {
return 0, 0 return 0, 0
@ -757,8 +761,8 @@ type LOC struct {
Altitude uint32 Altitude uint32
} }
// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent // cmToM takes a cm value expressed in RFC 1876 SIZE mantissa/exponent
// format and returns a string in m (two decimals for the cm) // format and returns a string in m (two decimals for the cm).
func cmToM(m, e uint8) string { func cmToM(m, e uint8) string {
if e < 2 { if e < 2 {
if e == 1 { if e == 1 {
@ -1116,6 +1120,7 @@ type URI struct {
Target string `dns:"octet"` Target string `dns:"octet"`
} }
// rr.Target to be parsed as a sequence of character encoded octets according to RFC 3986
func (rr *URI) String() string { func (rr *URI) String() string {
return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) +
" " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target) " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target)
@ -1277,6 +1282,7 @@ type CAA struct {
Value string `dns:"octet"` Value string `dns:"octet"`
} }
// rr.Value Is the character-string encoding of the value field as specified in RFC 1035, Section 5.1.
func (rr *CAA) String() string { func (rr *CAA) String() string {
return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value) return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value)
} }
@ -1353,6 +1359,88 @@ func (rr *CSYNC) len(off int, compression map[string]struct{}) int {
return l return l
} }
// APL RR. See RFC 3123.
type APL struct {
Hdr RR_Header
Prefixes []APLPrefix `dns:"apl"`
}
// APLPrefix is an address prefix hold by an APL record.
type APLPrefix struct {
Negation bool
Network net.IPNet
}
// String returns presentation form of the APL record.
func (rr *APL) String() string {
var sb strings.Builder
sb.WriteString(rr.Hdr.String())
for i, p := range rr.Prefixes {
if i > 0 {
sb.WriteByte(' ')
}
sb.WriteString(p.str())
}
return sb.String()
}
// str returns presentation form of the APL prefix.
func (p *APLPrefix) str() string {
var sb strings.Builder
if p.Negation {
sb.WriteByte('!')
}
switch len(p.Network.IP) {
case net.IPv4len:
sb.WriteByte('1')
case net.IPv6len:
sb.WriteByte('2')
}
sb.WriteByte(':')
switch len(p.Network.IP) {
case net.IPv4len:
sb.WriteString(p.Network.IP.String())
case net.IPv6len:
// add prefix for IPv4-mapped IPv6
if v4 := p.Network.IP.To4(); v4 != nil {
sb.WriteString("::ffff:")
}
sb.WriteString(p.Network.IP.String())
}
sb.WriteByte('/')
prefix, _ := p.Network.Mask.Size()
sb.WriteString(strconv.Itoa(prefix))
return sb.String()
}
// equals reports whether two APL prefixes are identical.
func (a *APLPrefix) equals(b *APLPrefix) bool {
return a.Negation == b.Negation &&
bytes.Equal(a.Network.IP, b.Network.IP) &&
bytes.Equal(a.Network.Mask, b.Network.Mask)
}
// copy returns a copy of the APL prefix.
func (p *APLPrefix) copy() APLPrefix {
return APLPrefix{
Negation: p.Negation,
Network: copyNet(p.Network),
}
}
// len returns size of the prefix in wire format.
func (p *APLPrefix) len() int {
// 4-byte header and the network address prefix (see Section 4 of RFC 3123)
prefix, _ := p.Network.Mask.Size()
return 4 + (prefix+7)/8
}
// TimeToString translates the RRSIG's incep. and expir. times to the // TimeToString translates the RRSIG's incep. and expir. times to the
// string representation used when printing the record. // string representation used when printing the record.
// It takes serial arithmetic (RFC 1982) into account. // It takes serial arithmetic (RFC 1982) into account.
@ -1409,6 +1497,17 @@ func copyIP(ip net.IP) net.IP {
return p return p
} }
// copyNet returns a copy of a subnet.
func copyNet(n net.IPNet) net.IPNet {
m := make(net.IPMask, len(n.Mask))
copy(m, n.Mask)
return net.IPNet{
IP: copyIP(n.IP),
Mask: m,
}
}
// SplitN splits a string into N sized string chunks. // SplitN splits a string into N sized string chunks.
// This might become an exported function once. // This might become an exported function once.
func splitN(s string, n int) []string { func splitN(s string, n int) []string {

View File

@ -3,13 +3,13 @@ package dns
import "fmt" import "fmt"
// Version is current version of this library. // Version is current version of this library.
var Version = V{1, 1, 26} var Version = v{1, 1, 31}
// V holds the version of this library. // v holds the version of this library.
type V struct { type v struct {
Major, Minor, Patch int Major, Minor, Patch int
} }
func (v V) String() string { func (v v) String() string {
return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
} }

View File

@ -52,6 +52,23 @@ func (r1 *ANY) isDuplicate(_r2 RR) bool {
return true return true
} }
func (r1 *APL) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*APL)
if !ok {
return false
}
_ = r2
if len(r1.Prefixes) != len(r2.Prefixes) {
return false
}
for i := 0; i < len(r1.Prefixes); i++ {
if !r1.Prefixes[i].equals(&r2.Prefixes[i]) {
return false
}
}
return true
}
func (r1 *AVC) isDuplicate(_r2 RR) bool { func (r1 *AVC) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*AVC) r2, ok := _r2.(*AVC)
if !ok { if !ok {
@ -87,6 +104,48 @@ func (r1 *CAA) isDuplicate(_r2 RR) bool {
return true return true
} }
func (r1 *CDNSKEY) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*CDNSKEY)
if !ok {
return false
}
_ = r2
if r1.Flags != r2.Flags {
return false
}
if r1.Protocol != r2.Protocol {
return false
}
if r1.Algorithm != r2.Algorithm {
return false
}
if r1.PublicKey != r2.PublicKey {
return false
}
return true
}
func (r1 *CDS) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*CDS)
if !ok {
return false
}
_ = r2
if r1.KeyTag != r2.KeyTag {
return false
}
if r1.Algorithm != r2.Algorithm {
return false
}
if r1.DigestType != r2.DigestType {
return false
}
if r1.Digest != r2.Digest {
return false
}
return true
}
func (r1 *CERT) isDuplicate(_r2 RR) bool { func (r1 *CERT) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*CERT) r2, ok := _r2.(*CERT)
if !ok { if !ok {
@ -155,6 +214,27 @@ func (r1 *DHCID) isDuplicate(_r2 RR) bool {
return true return true
} }
func (r1 *DLV) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*DLV)
if !ok {
return false
}
_ = r2
if r1.KeyTag != r2.KeyTag {
return false
}
if r1.Algorithm != r2.Algorithm {
return false
}
if r1.DigestType != r2.DigestType {
return false
}
if r1.Digest != r2.Digest {
return false
}
return true
}
func (r1 *DNAME) isDuplicate(_r2 RR) bool { func (r1 *DNAME) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*DNAME) r2, ok := _r2.(*DNAME)
if !ok { if !ok {
@ -322,6 +402,27 @@ func (r1 *HIP) isDuplicate(_r2 RR) bool {
return true return true
} }
func (r1 *KEY) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*KEY)
if !ok {
return false
}
_ = r2
if r1.Flags != r2.Flags {
return false
}
if r1.Protocol != r2.Protocol {
return false
}
if r1.Algorithm != r2.Algorithm {
return false
}
if r1.PublicKey != r2.PublicKey {
return false
}
return true
}
func (r1 *KX) isDuplicate(_r2 RR) bool { func (r1 *KX) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*KX) r2, ok := _r2.(*KX)
if !ok { if !ok {
@ -832,6 +933,42 @@ func (r1 *RT) isDuplicate(_r2 RR) bool {
return true return true
} }
func (r1 *SIG) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*SIG)
if !ok {
return false
}
_ = r2
if r1.TypeCovered != r2.TypeCovered {
return false
}
if r1.Algorithm != r2.Algorithm {
return false
}
if r1.Labels != r2.Labels {
return false
}
if r1.OrigTtl != r2.OrigTtl {
return false
}
if r1.Expiration != r2.Expiration {
return false
}
if r1.Inception != r2.Inception {
return false
}
if r1.KeyTag != r2.KeyTag {
return false
}
if !isDuplicateName(r1.SignerName, r2.SignerName) {
return false
}
if r1.Signature != r2.Signature {
return false
}
return true
}
func (r1 *SMIMEA) isDuplicate(_r2 RR) bool { func (r1 *SMIMEA) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*SMIMEA) r2, ok := _r2.(*SMIMEA)
if !ok { if !ok {

19
vendor/github.com/miekg/dns/zmsg.go generated vendored
View File

@ -36,6 +36,14 @@ func (rr *ANY) pack(msg []byte, off int, compression compressionMap, compress bo
return off, nil return off, nil
} }
func (rr *APL) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
off, err = packDataApl(rr.Prefixes, msg, off)
if err != nil {
return off, err
}
return off, nil
}
func (rr *AVC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { func (rr *AVC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
off, err = packStringTxt(rr.Txt, msg, off) off, err = packStringTxt(rr.Txt, msg, off)
if err != nil { if err != nil {
@ -1127,6 +1135,17 @@ func (rr *ANY) unpack(msg []byte, off int) (off1 int, err error) {
return off, nil return off, nil
} }
func (rr *APL) unpack(msg []byte, off int) (off1 int, err error) {
rdStart := off
_ = rdStart
rr.Prefixes, off, err = unpackDataApl(msg, off)
if err != nil {
return off, err
}
return off, nil
}
func (rr *AVC) unpack(msg []byte, off int) (off1 int, err error) { func (rr *AVC) unpack(msg []byte, off int) (off1 int, err error) {
rdStart := off rdStart := off
_ = rdStart _ = rdStart

View File

@ -13,6 +13,7 @@ var TypeToRR = map[uint16]func() RR{
TypeAAAA: func() RR { return new(AAAA) }, TypeAAAA: func() RR { return new(AAAA) },
TypeAFSDB: func() RR { return new(AFSDB) }, TypeAFSDB: func() RR { return new(AFSDB) },
TypeANY: func() RR { return new(ANY) }, TypeANY: func() RR { return new(ANY) },
TypeAPL: func() RR { return new(APL) },
TypeAVC: func() RR { return new(AVC) }, TypeAVC: func() RR { return new(AVC) },
TypeCAA: func() RR { return new(CAA) }, TypeCAA: func() RR { return new(CAA) },
TypeCDNSKEY: func() RR { return new(CDNSKEY) }, TypeCDNSKEY: func() RR { return new(CDNSKEY) },
@ -87,6 +88,7 @@ var TypeToString = map[uint16]string{
TypeAAAA: "AAAA", TypeAAAA: "AAAA",
TypeAFSDB: "AFSDB", TypeAFSDB: "AFSDB",
TypeANY: "ANY", TypeANY: "ANY",
TypeAPL: "APL",
TypeATMA: "ATMA", TypeATMA: "ATMA",
TypeAVC: "AVC", TypeAVC: "AVC",
TypeAXFR: "AXFR", TypeAXFR: "AXFR",
@ -169,6 +171,7 @@ func (rr *A) Header() *RR_Header { return &rr.Hdr }
func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } func (rr *AAAA) Header() *RR_Header { return &rr.Hdr }
func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr }
func (rr *ANY) Header() *RR_Header { return &rr.Hdr } func (rr *ANY) Header() *RR_Header { return &rr.Hdr }
func (rr *APL) Header() *RR_Header { return &rr.Hdr }
func (rr *AVC) Header() *RR_Header { return &rr.Hdr } func (rr *AVC) Header() *RR_Header { return &rr.Hdr }
func (rr *CAA) Header() *RR_Header { return &rr.Hdr } func (rr *CAA) Header() *RR_Header { return &rr.Hdr }
func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr } func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr }
@ -262,6 +265,13 @@ func (rr *ANY) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression) l := rr.Hdr.len(off, compression)
return l return l
} }
func (rr *APL) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
for _, x := range rr.Prefixes {
l += x.len()
}
return l
}
func (rr *AVC) len(off int, compression map[string]struct{}) int { func (rr *AVC) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression) l := rr.Hdr.len(off, compression)
for _, x := range rr.Txt { for _, x := range rr.Txt {
@ -673,6 +683,13 @@ func (rr *AFSDB) copy() RR {
func (rr *ANY) copy() RR { func (rr *ANY) copy() RR {
return &ANY{rr.Hdr} return &ANY{rr.Hdr}
} }
func (rr *APL) copy() RR {
Prefixes := make([]APLPrefix, len(rr.Prefixes))
for i, e := range rr.Prefixes {
Prefixes[i] = e.copy()
}
return &APL{rr.Hdr, Prefixes}
}
func (rr *AVC) copy() RR { func (rr *AVC) copy() RR {
Txt := make([]string, len(rr.Txt)) Txt := make([]string, len(rr.Txt))
copy(Txt, rr.Txt) copy(Txt, rr.Txt)
@ -681,6 +698,12 @@ func (rr *AVC) copy() RR {
func (rr *CAA) copy() RR { func (rr *CAA) copy() RR {
return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value} return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value}
} }
func (rr *CDNSKEY) copy() RR {
return &CDNSKEY{*rr.DNSKEY.copy().(*DNSKEY)}
}
func (rr *CDS) copy() RR {
return &CDS{*rr.DS.copy().(*DS)}
}
func (rr *CERT) copy() RR { func (rr *CERT) copy() RR {
return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate}
} }
@ -695,6 +718,9 @@ func (rr *CSYNC) copy() RR {
func (rr *DHCID) copy() RR { func (rr *DHCID) copy() RR {
return &DHCID{rr.Hdr, rr.Digest} return &DHCID{rr.Hdr, rr.Digest}
} }
func (rr *DLV) copy() RR {
return &DLV{*rr.DS.copy().(*DS)}
}
func (rr *DNAME) copy() RR { func (rr *DNAME) copy() RR {
return &DNAME{rr.Hdr, rr.Target} return &DNAME{rr.Hdr, rr.Target}
} }
@ -727,6 +753,9 @@ func (rr *HIP) copy() RR {
copy(RendezvousServers, rr.RendezvousServers) copy(RendezvousServers, rr.RendezvousServers)
return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers}
} }
func (rr *KEY) copy() RR {
return &KEY{*rr.DNSKEY.copy().(*DNSKEY)}
}
func (rr *KX) copy() RR { func (rr *KX) copy() RR {
return &KX{rr.Hdr, rr.Preference, rr.Exchanger} return &KX{rr.Hdr, rr.Preference, rr.Exchanger}
} }
@ -830,6 +859,9 @@ func (rr *RRSIG) copy() RR {
func (rr *RT) copy() RR { func (rr *RT) copy() RR {
return &RT{rr.Hdr, rr.Preference, rr.Host} return &RT{rr.Hdr, rr.Preference, rr.Host}
} }
func (rr *SIG) copy() RR {
return &SIG{*rr.RRSIG.copy().(*RRSIG)}
}
func (rr *SMIMEA) copy() RR { func (rr *SMIMEA) copy() RR {
return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
} }

View File

@ -557,8 +557,6 @@ type dhGEXSHA struct {
hashFunc crypto.Hash hashFunc crypto.Hash
} }
const numMRTests = 64
const ( const (
dhGroupExchangeMinimumBits = 2048 dhGroupExchangeMinimumBits = 2048
dhGroupExchangePreferredBits = 2048 dhGroupExchangePreferredBits = 2048
@ -602,15 +600,8 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake
gex.p = kexDHGexGroup.P gex.p = kexDHGexGroup.P
gex.g = kexDHGexGroup.G gex.g = kexDHGexGroup.G
// Check if p is safe by verifing that p and (p-1)/2 are primes
one := big.NewInt(1)
var pHalf = &big.Int{}
pHalf.Rsh(gex.p, 1)
if !gex.p.ProbablyPrime(numMRTests) || !pHalf.ProbablyPrime(numMRTests) {
return nil, fmt.Errorf("ssh: server provided gex p is not safe")
}
// Check if g is safe by verifing that g > 1 and g < p - 1 // Check if g is safe by verifing that g > 1 and g < p - 1
one := big.NewInt(1)
var pMinusOne = &big.Int{} var pMinusOne = &big.Int{}
pMinusOne.Sub(gex.p, one) pMinusOne.Sub(gex.p, one)
if gex.g.Cmp(one) != 1 && gex.g.Cmp(pMinusOne) != -1 { if gex.g.Cmp(one) != 1 && gex.g.Cmp(pMinusOne) != -1 {
@ -618,6 +609,8 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake
} }
// Send GexInit // Send GexInit
var pHalf = &big.Int{}
pHalf.Rsh(gex.p, 1)
x, err := rand.Int(randSource, pHalf) x, err := rand.Int(randSource, pHalf)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -1694,6 +1694,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
if len(data) > 0 { if len(data) > 0 {
wrote, err := st.body.Write(data) wrote, err := st.body.Write(data)
if err != nil { if err != nil {
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
return streamError(id, ErrCodeStreamClosed) return streamError(id, ErrCodeStreamClosed)
} }
if wrote != len(data) { if wrote != len(data) {

View File

@ -12,6 +12,9 @@ import (
) )
func probeProtocolStack() int { func probeProtocolStack() int {
if runtime.GOOS == "netbsd" && runtime.GOARCH == "arm64" {
return 16
}
if (runtime.GOOS == "netbsd" || runtime.GOOS == "openbsd") && runtime.GOARCH == "arm" { if (runtime.GOOS == "netbsd" || runtime.GOOS == "openbsd") && runtime.GOARCH == "arm" {
return 8 return 8
} }

View File

@ -1,33 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.12
package socket
import (
"syscall"
"unsafe"
)
func getsockopt(s uintptr, level, name int, b []byte) (int, error) {
l := uint32(len(b))
_, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0)
return int(l), errnoErr(errno)
}
func setsockopt(s uintptr, level, name int, b []byte) error {
_, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0)
return errnoErr(errno)
}
func recvmsg(s uintptr, h *msghdr, flags int) (int, error) {
n, _, errno := syscall.Syscall(syscall.SYS_RECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags))
return int(n), errnoErr(errno)
}
func sendmsg(s uintptr, h *msghdr, flags int) (int, error) {
n, _, errno := syscall.Syscall(syscall.SYS_SENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags))
return int(n), errnoErr(errno)
}

View File

@ -67,7 +67,7 @@ func (h *Header) Marshal() ([]byte, error) {
b[1] = byte(h.TOS) b[1] = byte(h.TOS)
flagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13) flagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13)
switch runtime.GOOS { switch runtime.GOOS {
case "darwin", "dragonfly", "netbsd": case "darwin", "ios", "dragonfly", "netbsd":
socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen))
socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff))
case "freebsd": case "freebsd":
@ -126,7 +126,7 @@ func (h *Header) Parse(b []byte) error {
h.Src = net.IPv4(b[12], b[13], b[14], b[15]) h.Src = net.IPv4(b[12], b[13], b[14], b[15])
h.Dst = net.IPv4(b[16], b[17], b[18], b[19]) h.Dst = net.IPv4(b[16], b[17], b[18], b[19])
switch runtime.GOOS { switch runtime.GOOS {
case "darwin", "dragonfly", "netbsd": case "darwin", "ios", "dragonfly", "netbsd":
h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + hdrlen h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + hdrlen
h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) h.FragOff = int(socket.NativeEndian.Uint16(b[6:8]))
case "freebsd": case "freebsd":

24
vendor/modules.txt vendored
View File

@ -204,7 +204,7 @@ github.com/gophercloud/gophercloud/pagination
github.com/hashicorp/errwrap github.com/hashicorp/errwrap
# github.com/hashicorp/go-bexpr v0.1.2 # github.com/hashicorp/go-bexpr v0.1.2
github.com/hashicorp/go-bexpr github.com/hashicorp/go-bexpr
# github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de # github.com/hashicorp/go-checkpoint v0.5.0
github.com/hashicorp/go-checkpoint github.com/hashicorp/go-checkpoint
# github.com/hashicorp/go-cleanhttp v0.5.1 # github.com/hashicorp/go-cleanhttp v0.5.1
github.com/hashicorp/go-cleanhttp github.com/hashicorp/go-cleanhttp
@ -229,9 +229,9 @@ github.com/hashicorp/go-discover/provider/triton
github.com/hashicorp/go-discover/provider/vsphere github.com/hashicorp/go-discover/provider/vsphere
# github.com/hashicorp/go-hclog v0.12.0 # github.com/hashicorp/go-hclog v0.12.0
github.com/hashicorp/go-hclog github.com/hashicorp/go-hclog
# github.com/hashicorp/go-immutable-radix v1.2.0 # github.com/hashicorp/go-immutable-radix v1.3.0
github.com/hashicorp/go-immutable-radix github.com/hashicorp/go-immutable-radix
# github.com/hashicorp/go-memdb v1.1.0 # github.com/hashicorp/go-memdb v1.3.0
github.com/hashicorp/go-memdb github.com/hashicorp/go-memdb
# github.com/hashicorp/go-msgpack v0.5.5 # github.com/hashicorp/go-msgpack v0.5.5
github.com/hashicorp/go-msgpack/codec github.com/hashicorp/go-msgpack/codec
@ -251,7 +251,7 @@ github.com/hashicorp/go-sockaddr/template
github.com/hashicorp/go-syslog github.com/hashicorp/go-syslog
# github.com/hashicorp/go-uuid v1.0.2 # github.com/hashicorp/go-uuid v1.0.2
github.com/hashicorp/go-uuid github.com/hashicorp/go-uuid
# github.com/hashicorp/go-version v1.2.0 # github.com/hashicorp/go-version v1.2.1
github.com/hashicorp/go-version github.com/hashicorp/go-version
# github.com/hashicorp/golang-lru v0.5.4 # github.com/hashicorp/golang-lru v0.5.4
github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru
@ -267,10 +267,12 @@ github.com/hashicorp/hcl/hcl/token
github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/parser
github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/scanner
github.com/hashicorp/hcl/json/token github.com/hashicorp/hcl/json/token
# github.com/hashicorp/hil v0.0.0-20160711231837-1e86c6b523c5 # github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038
github.com/hashicorp/hil github.com/hashicorp/hil
github.com/hashicorp/hil/ast github.com/hashicorp/hil/ast
# github.com/hashicorp/mdns v1.0.1 github.com/hashicorp/hil/parser
github.com/hashicorp/hil/scanner
# github.com/hashicorp/mdns v1.0.3
github.com/hashicorp/mdns github.com/hashicorp/mdns
# github.com/hashicorp/memberlist v0.2.2 # github.com/hashicorp/memberlist v0.2.2
github.com/hashicorp/memberlist github.com/hashicorp/memberlist
@ -294,7 +296,7 @@ github.com/hashicorp/vault/sdk/helper/parseutil
github.com/hashicorp/vault/sdk/helper/strutil github.com/hashicorp/vault/sdk/helper/strutil
# github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 # github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443
github.com/hashicorp/vic/pkg/vsphere/tags github.com/hashicorp/vic/pkg/vsphere/tags
# github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d # github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce
github.com/hashicorp/yamux github.com/hashicorp/yamux
# github.com/imdario/mergo v0.3.6 # github.com/imdario/mergo v0.3.6
github.com/imdario/mergo github.com/imdario/mergo
@ -314,13 +316,13 @@ github.com/konsorten/go-windows-terminal-sequences
github.com/kr/text github.com/kr/text
# github.com/linode/linodego v0.7.1 # github.com/linode/linodego v0.7.1
github.com/linode/linodego github.com/linode/linodego
# github.com/mattn/go-colorable v0.1.6 # github.com/mattn/go-colorable v0.1.7
github.com/mattn/go-colorable github.com/mattn/go-colorable
# github.com/mattn/go-isatty v0.0.12 # github.com/mattn/go-isatty v0.0.12
github.com/mattn/go-isatty github.com/mattn/go-isatty
# github.com/matttproud/golang_protobuf_extensions v1.0.1 # github.com/matttproud/golang_protobuf_extensions v1.0.1
github.com/matttproud/golang_protobuf_extensions/pbutil github.com/matttproud/golang_protobuf_extensions/pbutil
# github.com/miekg/dns v1.1.26 # github.com/miekg/dns v1.1.31
github.com/miekg/dns github.com/miekg/dns
# github.com/mitchellh/cli v1.1.0 # github.com/mitchellh/cli v1.1.0
github.com/mitchellh/cli github.com/mitchellh/cli
@ -457,7 +459,7 @@ go.opencensus.io/trace/tracestate
# go.uber.org/goleak v1.0.0 # go.uber.org/goleak v1.0.0
go.uber.org/goleak go.uber.org/goleak
go.uber.org/goleak/internal/stack go.uber.org/goleak/internal/stack
# golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a # golang.org/x/crypto v0.0.0-20200930160638-afb6bcd081ae
golang.org/x/crypto/blake2b golang.org/x/crypto/blake2b
golang.org/x/crypto/blowfish golang.org/x/crypto/blowfish
golang.org/x/crypto/chacha20 golang.org/x/crypto/chacha20
@ -476,7 +478,7 @@ golang.org/x/crypto/ssh/terminal
# golang.org/x/lint v0.0.0-20190930215403-16217165b5de # golang.org/x/lint v0.0.0-20190930215403-16217165b5de
golang.org/x/lint golang.org/x/lint
golang.org/x/lint/golint golang.org/x/lint/golint
# golang.org/x/net v0.0.0-20200904194848-62affa334b73 # golang.org/x/net v0.0.0-20200930145003-4acb6c075d10
golang.org/x/net/bpf golang.org/x/net/bpf
golang.org/x/net/context golang.org/x/net/context
golang.org/x/net/context/ctxhttp golang.org/x/net/context/ctxhttp