Update to google.golang.org/grpc v1.23.0 (#6320)

This commit is contained in:
Matt Keeler 2019-08-14 10:41:27 -04:00 committed by GitHub
parent 72a3eec1bf
commit 0cf3271a68
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
50 changed files with 1955 additions and 1077 deletions

3
go.mod
View File

@ -41,7 +41,6 @@ require (
github.com/gogo/protobuf v1.2.1 github.com/gogo/protobuf v1.2.1
github.com/golang/protobuf v1.3.1 github.com/golang/protobuf v1.3.1
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
github.com/google/go-cmp v0.2.0 // indirect
github.com/google/go-github v17.0.0+incompatible // indirect github.com/google/go-github v17.0.0+incompatible // indirect
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
@ -108,7 +107,7 @@ require (
golang.org/x/sync v0.0.0-20190423024810-112230192c58 golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5 golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
google.golang.org/grpc v1.19.1 google.golang.org/grpc v1.23.0
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528 // indirect gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528 // indirect
gopkg.in/ory-am/dockertest.v3 v3.3.4 // indirect gopkg.in/ory-am/dockertest.v3 v3.3.4 // indirect

9
go.sum
View File

@ -145,8 +145,6 @@ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-bexpr v0.1.1 h1:WvjUsC7elNIAwpFGj7tfqXocJDBQbep0Py9hSNmtrFk=
github.com/hashicorp/go-bexpr v0.1.1/go.mod h1:ANbpTX1oAql27TZkKVeW8p1w8NTdnyzPe/0qqPCKohU=
github.com/hashicorp/go-bexpr v0.1.2 h1:ijMXI4qERbzxbCnkxmfUtwMyjrrk3y+Vt0MxojNCbBs= github.com/hashicorp/go-bexpr v0.1.2 h1:ijMXI4qERbzxbCnkxmfUtwMyjrrk3y+Vt0MxojNCbBs=
github.com/hashicorp/go-bexpr v0.1.2/go.mod h1:ANbpTX1oAql27TZkKVeW8p1w8NTdnyzPe/0qqPCKohU= github.com/hashicorp/go-bexpr v0.1.2/go.mod h1:ANbpTX1oAql27TZkKVeW8p1w8NTdnyzPe/0qqPCKohU=
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de h1:XDCSythtg8aWSRSO29uwhgh7b127fWr+m5SemqjSUL8= github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de h1:XDCSythtg8aWSRSO29uwhgh7b127fWr+m5SemqjSUL8=
@ -364,12 +362,14 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c h1:Vj5n4GlwjmQteupaxJ9+0FNOmBrHfq7vN4btdGoDZgI= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c h1:Vj5n4GlwjmQteupaxJ9+0FNOmBrHfq7vN4btdGoDZgI=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519 h1:x6rhz8Y9CjbgQkccRGmELH6K+LJj7tOoh3XWeC1yaQM= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519 h1:x6rhz8Y9CjbgQkccRGmELH6K+LJj7tOoh3XWeC1yaQM=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -399,6 +399,8 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
google.golang.org/api v0.0.0-20180829000535-087779f1d2c9 h1:z1TeLUmxf9ws9KLICfmX+KGXTs+rjm+aGWzfsv7MZ9w= google.golang.org/api v0.0.0-20180829000535-087779f1d2c9 h1:z1TeLUmxf9ws9KLICfmX+KGXTs+rjm+aGWzfsv7MZ9w=
google.golang.org/api v0.0.0-20180829000535-087779f1d2c9/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20180829000535-087779f1d2c9/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
@ -407,6 +409,8 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM=
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
@ -432,6 +436,7 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
istio.io/gogo-genproto v0.0.0-20190124151557-6d926a6e6feb/go.mod h1:eIDJ6jNk/IeJz6ODSksHl5Aiczy5JUq6vFhJWI5OtiI= istio.io/gogo-genproto v0.0.0-20190124151557-6d926a6e6feb/go.mod h1:eIDJ6jNk/IeJz6ODSksHl5Aiczy5JUq6vFhJWI5OtiI=
k8s.io/api v0.0.0-20180806132203-61b11ee65332/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/api v0.0.0-20180806132203-61b11ee65332/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
k8s.io/api v0.0.0-20190325185214-7544f9db76f6 h1:9MWtbqhwTyDvF4cS1qAhxDb9Mi8taXiAu+5nEacl7gY= k8s.io/api v0.0.0-20190325185214-7544f9db76f6 h1:9MWtbqhwTyDvF4cS1qAhxDb9Mi8taXiAu+5nEacl7gY=

View File

@ -2,16 +2,16 @@ language: go
matrix: matrix:
include: include:
- go: 1.12beta2 - go: 1.12.x
env: GO111MODULE=on
- go: 1.11.x
env: VET=1 GO111MODULE=on env: VET=1 GO111MODULE=on
- go: 1.11.x - go: 1.12.x
env: RACE=1 GO111MODULE=on env: RACE=1 GO111MODULE=on
- go: 1.11.x - go: 1.12.x
env: RUN386=1 env: RUN386=1
- go: 1.11.x - go: 1.12.x
env: GRPC_GO_RETRY=on env: GRPC_GO_RETRY=on
- go: 1.11.x
env: GO111MODULE=on
- go: 1.10.x - go: 1.10.x
- go: 1.9.x - go: 1.9.x
- go: 1.9.x - go: 1.9.x

View File

@ -12,21 +12,45 @@ In order to protect both you and ourselves, you will need to sign the
## Guidelines for Pull Requests ## Guidelines for Pull Requests
How to get your contributions merged smoothly and quickly. How to get your contributions merged smoothly and quickly.
- Create **small PRs** that are narrowly focused on **addressing a single concern**. We often times receive PRs that are trying to fix several things at a time, but only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. - Create **small PRs** that are narrowly focused on **addressing a single
concern**. We often times receive PRs that are trying to fix several things at
a time, but only one fix is considered acceptable, nothing gets merged and
both author's & review's time is wasted. Create more PRs to address different
concerns and everyone will be happy.
- For speculative changes, consider opening an issue and discussing it first. If you are suggesting a behavioral or API change, consider starting with a [gRFC proposal](https://github.com/grpc/proposal). - The grpc package should only depend on standard Go packages and a small number
of exceptions. If your contribution introduces new dependencies which are NOT
in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a
discussion with gRPC-Go authors and consultants.
- Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. - For speculative changes, consider opening an issue and discussing it first. If
you are suggesting a behavioral or API change, consider starting with a [gRFC
proposal](https://github.com/grpc/proposal).
- Don't fix code style and formatting unless you are already changing that line to address an issue. PRs with irrelevant changes won't be merged. If you do want to fix formatting or style, do that in a separate PR. - Provide a good **PR description** as a record of **what** change is being made
and **why** it was made. Link to a github issue if it exists.
- Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity. - Don't fix code style and formatting unless you are already changing that line
to address an issue. PRs with irrelevant changes won't be merged. If you do
want to fix formatting or style, do that in a separate PR.
- Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use `rebase -i upstream/master` to curate your commit history and/or to bring in latest changes from master (but avoid rebasing in the middle of a code review). - Unless your PR is trivial, you should expect there will be reviewer comments
that you'll need to address before merging. We expect you to be reasonably
responsive to those comments, otherwise the PR will be closed after 2-3 weeks
of inactivity.
- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change). - Maintain **clean commit history** and use **meaningful commit messages**. PRs
with messy commit history are difficult to review and won't be merged. Use
`rebase -i upstream/master` to curate your commit history and/or to bring in
latest changes from master (but avoid rebasing in the middle of a code
review).
- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - Keep your PR up to date with upstream/master (if there are merge conflicts, we
can't really merge your change).
- **All tests need to be passing** before your change can be merged. We
recommend you **run tests locally** before creating your PR to catch breakages
early on.
- `make all` to test everything, OR - `make all` to test everything, OR
- `make vet` to catch vet errors - `make vet` to catch vet errors
- `make test` to run the tests - `make test` to run the tests
@ -34,4 +58,3 @@ How to get your contributions merged smoothly and quickly.
- optional `make testappengine` to run tests with appengine - optional `make testappengine` to run tests with appengine
- Exceptions to the rules can be made if there's a compelling reason for doing so. - Exceptions to the rules can be made if there's a compelling reason for doing so.

View File

@ -1,42 +1,96 @@
# gRPC-Go # gRPC-Go
[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) [![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go)
[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide. The Go implementation of [gRPC](https://grpc.io/): A high performance, open
source, general RPC framework that puts mobile and HTTP/2 first. For more
information see the [gRPC Quick Start:
Go](https://grpc.io/docs/quickstart/go.html) guide.
Installation Installation
------------ ------------
To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: To install this package, you need to install Go and setup your Go workspace on
your computer. The simplest way to install the library is to run:
``` ```
$ go get -u google.golang.org/grpc $ go get -u google.golang.org/grpc
``` ```
With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in
your source code and `go [build|run|test]` will automatically download the
necessary dependencies ([Go modules
ref](https://github.com/golang/go/wiki/Modules)).
If you are trying to access grpc-go from within China, please see the
[FAQ](#FAQ) below.
Prerequisites Prerequisites
------------- -------------
gRPC-Go requires Go 1.9 or later. gRPC-Go requires Go 1.9 or later.
Constraints
-----------
The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
Documentation Documentation
------------- -------------
See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). - See [godoc](https://godoc.org/google.golang.org/grpc) for package and API
descriptions.
- Documentation on specific topics can be found in the [Documentation
directory](Documentation/).
- Examples can be found in the [examples directory](examples/).
Performance Performance
----------- -----------
See the current benchmarks for some of the languages supported in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). Performance benchmark data for grpc-go and other languages is maintained in
[this
dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696).
Status Status
------ ------
General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). General Availability [Google Cloud Platform Launch
Stages](https://cloud.google.com/terms/launch-stages).
FAQ FAQ
--- ---
#### I/O Timeout Errors
The `golang.org` domain may be blocked from some countries. `go get` usually
produces an error like the following when this happens:
```
$ go get -u google.golang.org/grpc
package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout)
```
To build Go code, there are several options:
- Set up a VPN and access google.golang.org through that.
- Without Go module support: `git clone` the repo manually:
```
git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc
```
You will need to do the same for all of grpc's dependencies in `golang.org`,
e.g. `golang.org/x/net`.
- With Go module support: it is possible to use the `replace` feature of `go
mod` to create aliases for golang.org packages. In your project's directory:
```
go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest
go mod tidy
go mod vendor
go build -mod=vendor
```
Again, this will need to be done for all transitive dependencies hosted on
golang.org as well. Please refer to [this
issue](https://github.com/golang/go/issues/28652) in the golang repo regarding
this concern.
#### Compiling error, undefined: grpc.SupportPackageIsVersion #### Compiling error, undefined: grpc.SupportPackageIsVersion
Please update proto package, gRPC package and rebuild the proto files: Please update proto package, gRPC package and rebuild the proto files:

View File

@ -43,7 +43,7 @@ type Address struct {
// BalancerConfig specifies the configurations for Balancer. // BalancerConfig specifies the configurations for Balancer.
// //
// Deprecated: please use package balancer. // Deprecated: please use package balancer. May be removed in a future 1.x release.
type BalancerConfig struct { type BalancerConfig struct {
// DialCreds is the transport credential the Balancer implementation can // DialCreds is the transport credential the Balancer implementation can
// use to dial to a remote load balancer server. The Balancer implementations // use to dial to a remote load balancer server. The Balancer implementations
@ -57,7 +57,7 @@ type BalancerConfig struct {
// BalancerGetOptions configures a Get call. // BalancerGetOptions configures a Get call.
// //
// Deprecated: please use package balancer. // Deprecated: please use package balancer. May be removed in a future 1.x release.
type BalancerGetOptions struct { type BalancerGetOptions struct {
// BlockingWait specifies whether Get should block when there is no // BlockingWait specifies whether Get should block when there is no
// connected address. // connected address.
@ -66,7 +66,7 @@ type BalancerGetOptions struct {
// Balancer chooses network addresses for RPCs. // Balancer chooses network addresses for RPCs.
// //
// Deprecated: please use package balancer. // Deprecated: please use package balancer. May be removed in a future 1.x release.
type Balancer interface { type Balancer interface {
// Start does the initialization work to bootstrap a Balancer. For example, // Start does the initialization work to bootstrap a Balancer. For example,
// this function may start the name resolution and watch the updates. It will // this function may start the name resolution and watch the updates. It will
@ -120,7 +120,7 @@ type Balancer interface {
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch // RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
// the name resolution updates and updates the addresses available correspondingly. // the name resolution updates and updates the addresses available correspondingly.
// //
// Deprecated: please use package balancer/roundrobin. // Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release.
func RoundRobin(r naming.Resolver) Balancer { func RoundRobin(r naming.Resolver) Balancer {
return &roundRobin{r: r} return &roundRobin{r: r}
} }

View File

@ -22,6 +22,7 @@ package balancer
import ( import (
"context" "context"
"encoding/json"
"errors" "errors"
"net" "net"
"strings" "strings"
@ -31,6 +32,7 @@ import (
"google.golang.org/grpc/internal" "google.golang.org/grpc/internal"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
) )
var ( var (
@ -39,7 +41,10 @@ var (
) )
// Register registers the balancer builder to the balancer map. b.Name // Register registers the balancer builder to the balancer map. b.Name
// (lowercased) will be used as the name registered with this builder. // (lowercased) will be used as the name registered with this builder. If the
// Builder implements ConfigParser, ParseConfig will be called when new service
// configs are received by the resolver, and the result will be provided to the
// Balancer in UpdateClientConnState.
// //
// NOTE: this function must only be called during initialization time (i.e. in // NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. If multiple Balancers are // an init() function), and is not thread-safe. If multiple Balancers are
@ -138,6 +143,8 @@ type ClientConn interface {
ResolveNow(resolver.ResolveNowOption) ResolveNow(resolver.ResolveNowOption)
// Target returns the dial target for this ClientConn. // Target returns the dial target for this ClientConn.
//
// Deprecated: Use the Target field in the BuildOptions instead.
Target() string Target() string
} }
@ -155,6 +162,10 @@ type BuildOptions struct {
Dialer func(context.Context, string) (net.Conn, error) Dialer func(context.Context, string) (net.Conn, error)
// ChannelzParentID is the entity parent's channelz unique identification number. // ChannelzParentID is the entity parent's channelz unique identification number.
ChannelzParentID int64 ChannelzParentID int64
// Target contains the parsed address info of the dial target. It is the same resolver.Target as
// passed to the resolver.
// See the documentation for the resolver.Target type for details about what it contains.
Target resolver.Target
} }
// Builder creates a balancer. // Builder creates a balancer.
@ -166,14 +177,19 @@ type Builder interface {
Name() string Name() string
} }
// ConfigParser parses load balancer configs.
type ConfigParser interface {
// ParseConfig parses the JSON load balancer config provided into an
// internal form or returns an error if the config is invalid. For future
// compatibility reasons, unknown fields in the config should be ignored.
ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error)
}
// PickOptions contains addition information for the Pick operation. // PickOptions contains addition information for the Pick operation.
type PickOptions struct { type PickOptions struct {
// FullMethodName is the method name that NewClientStream() is called // FullMethodName is the method name that NewClientStream() is called
// with. The canonical format is /service/Method. // with. The canonical format is /service/Method.
FullMethodName string FullMethodName string
// Header contains the metadata from the RPC's client header. The metadata
// should not be modified; make a copy first if needed.
Header metadata.MD
} }
// DoneInfo contains additional information for done. // DoneInfo contains additional information for done.
@ -186,6 +202,11 @@ type DoneInfo struct {
BytesSent bool BytesSent bool
// BytesReceived indicates if any byte has been received from the server. // BytesReceived indicates if any byte has been received from the server.
BytesReceived bool BytesReceived bool
// ServerLoad is the load received from server. It's usually sent as part of
// trailing metadata.
//
// The only supported type now is *orca_v1.LoadReport.
ServerLoad interface{}
} }
var ( var (
@ -215,8 +236,10 @@ type Picker interface {
// //
// If a SubConn is returned: // If a SubConn is returned:
// - If it is READY, gRPC will send the RPC on it; // - If it is READY, gRPC will send the RPC on it;
// - If it is not ready, or becomes not ready after it's returned, gRPC will block // - If it is not ready, or becomes not ready after it's returned, gRPC will
// until UpdateBalancerState() is called and will call pick on the new picker. // block until UpdateBalancerState() is called and will call pick on the
// new picker. The done function returned from Pick(), if not nil, will be
// called with nil error, no bytes sent and no bytes received.
// //
// If the returned error is not nil: // If the returned error is not nil:
// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
@ -249,18 +272,55 @@ type Balancer interface {
// that back to gRPC. // that back to gRPC.
// Balancer should also generate and update Pickers when its internal state has // Balancer should also generate and update Pickers when its internal state has
// been changed by the new state. // been changed by the new state.
//
// Deprecated: if V2Balancer is implemented by the Balancer,
// UpdateSubConnState will be called instead.
HandleSubConnStateChange(sc SubConn, state connectivity.State) HandleSubConnStateChange(sc SubConn, state connectivity.State)
// HandleResolvedAddrs is called by gRPC to send updated resolved addresses to // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
// balancers. // balancers.
// Balancer can create new SubConn or remove SubConn with the addresses. // Balancer can create new SubConn or remove SubConn with the addresses.
// An empty address slice and a non-nil error will be passed if the resolver returns // An empty address slice and a non-nil error will be passed if the resolver returns
// non-nil error to gRPC. // non-nil error to gRPC.
//
// Deprecated: if V2Balancer is implemented by the Balancer,
// UpdateClientConnState will be called instead.
HandleResolvedAddrs([]resolver.Address, error) HandleResolvedAddrs([]resolver.Address, error)
// Close closes the balancer. The balancer is not required to call // Close closes the balancer. The balancer is not required to call
// ClientConn.RemoveSubConn for its existing SubConns. // ClientConn.RemoveSubConn for its existing SubConns.
Close() Close()
} }
// SubConnState describes the state of a SubConn.
type SubConnState struct {
ConnectivityState connectivity.State
// TODO: add last connection error
}
// ClientConnState describes the state of a ClientConn relevant to the
// balancer.
type ClientConnState struct {
ResolverState resolver.State
// The parsed load balancing configuration returned by the builder's
// ParseConfig method, if implemented.
BalancerConfig serviceconfig.LoadBalancingConfig
}
// V2Balancer is defined for documentation purposes. If a Balancer also
// implements V2Balancer, its UpdateClientConnState method will be called
// instead of HandleResolvedAddrs and its UpdateSubConnState will be called
// instead of HandleSubConnStateChange.
type V2Balancer interface {
// UpdateClientConnState is called by gRPC when the state of the ClientConn
// changes.
UpdateClientConnState(ClientConnState)
// UpdateSubConnState is called by gRPC when the state of a SubConn
// changes.
UpdateSubConnState(SubConn, SubConnState)
// Close closes the balancer. The balancer is not required to call
// ClientConn.RemoveSubConn for its existing SubConns.
Close()
}
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns // ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
// and returns one aggregated connectivity state. // and returns one aggregated connectivity state.
// //

View File

@ -67,14 +67,18 @@ type baseBalancer struct {
} }
func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
if err != nil { panic("not implemented")
grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err) }
return
func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) {
// TODO: handle s.ResolverState.Err (log if not nil) once implemented.
// TODO: handle s.ResolverState.ServiceConfig?
if grpclog.V(2) {
grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
} }
grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs)
// addrsSet is the set converted from addrs, it's used for quick lookup of an address. // addrsSet is the set converted from addrs, it's used for quick lookup of an address.
addrsSet := make(map[resolver.Address]struct{}) addrsSet := make(map[resolver.Address]struct{})
for _, a := range addrs { for _, a := range s.ResolverState.Addresses {
addrsSet[a] = struct{}{} addrsSet[a] = struct{}{}
if _, ok := b.subConns[a]; !ok { if _, ok := b.subConns[a]; !ok {
// a is a new address (not existing in b.subConns). // a is a new address (not existing in b.subConns).
@ -120,10 +124,19 @@ func (b *baseBalancer) regeneratePicker() {
} }
func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) panic("not implemented")
}
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
s := state.ConnectivityState
if grpclog.V(2) {
grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
}
oldS, ok := b.scStates[sc] oldS, ok := b.scStates[sc]
if !ok { if !ok {
grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) if grpclog.V(2) {
grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
}
return return
} }
b.scStates[sc] = s b.scStates[sc] = s

View File

@ -82,20 +82,13 @@ func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate {
return b.c return b.c
} }
// resolverUpdate contains the new resolved addresses or error if there's
// any.
type resolverUpdate struct {
addrs []resolver.Address
err error
}
// ccBalancerWrapper is a wrapper on top of cc for balancers. // ccBalancerWrapper is a wrapper on top of cc for balancers.
// It implements balancer.ClientConn interface. // It implements balancer.ClientConn interface.
type ccBalancerWrapper struct { type ccBalancerWrapper struct {
cc *ClientConn cc *ClientConn
balancer balancer.Balancer balancer balancer.Balancer
stateChangeQueue *scStateUpdateBuffer stateChangeQueue *scStateUpdateBuffer
resolverUpdateCh chan *resolverUpdate ccUpdateCh chan *balancer.ClientConnState
done chan struct{} done chan struct{}
mu sync.Mutex mu sync.Mutex
@ -106,7 +99,7 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui
ccb := &ccBalancerWrapper{ ccb := &ccBalancerWrapper{
cc: cc, cc: cc,
stateChangeQueue: newSCStateUpdateBuffer(), stateChangeQueue: newSCStateUpdateBuffer(),
resolverUpdateCh: make(chan *resolverUpdate, 1), ccUpdateCh: make(chan *balancer.ClientConnState, 1),
done: make(chan struct{}), done: make(chan struct{}),
subConns: make(map[*acBalancerWrapper]struct{}), subConns: make(map[*acBalancerWrapper]struct{}),
} }
@ -128,15 +121,23 @@ func (ccb *ccBalancerWrapper) watcher() {
return return
default: default:
} }
ccb.balancer.HandleSubConnStateChange(t.sc, t.state) if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
case t := <-ccb.resolverUpdateCh: ub.UpdateSubConnState(t.sc, balancer.SubConnState{ConnectivityState: t.state})
} else {
ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
}
case s := <-ccb.ccUpdateCh:
select { select {
case <-ccb.done: case <-ccb.done:
ccb.balancer.Close() ccb.balancer.Close()
return return
default: default:
} }
ccb.balancer.HandleResolvedAddrs(t.addrs, t.err) if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
ub.UpdateClientConnState(*s)
} else {
ccb.balancer.HandleResolvedAddrs(s.ResolverState.Addresses, nil)
}
case <-ccb.done: case <-ccb.done:
} }
@ -150,9 +151,11 @@ func (ccb *ccBalancerWrapper) watcher() {
for acbw := range scs { for acbw := range scs {
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
} }
ccb.UpdateBalancerState(connectivity.Connecting, nil)
return return
default: default:
} }
ccb.cc.firstResolveEvent.Fire()
} }
} }
@ -177,37 +180,24 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
}) })
} }
func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) { func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) {
if ccb.cc.curBalancerName != grpclbName { if ccb.cc.curBalancerName != grpclbName {
var containsGRPCLB bool // Filter any grpclb addresses since we don't have the grpclb balancer.
for _, a := range addrs { s := &ccs.ResolverState
if a.Type == resolver.GRPCLB { for i := 0; i < len(s.Addresses); {
containsGRPCLB = true if s.Addresses[i].Type == resolver.GRPCLB {
break copy(s.Addresses[i:], s.Addresses[i+1:])
s.Addresses = s.Addresses[:len(s.Addresses)-1]
continue
} }
} i++
if containsGRPCLB {
// The current balancer is not grpclb, but addresses contain grpclb
// address. This means we failed to switch to grpclb, most likely
// because grpclb is not registered. Filter out all grpclb addresses
// from addrs before sending to balancer.
tempAddrs := make([]resolver.Address, 0, len(addrs))
for _, a := range addrs {
if a.Type != resolver.GRPCLB {
tempAddrs = append(tempAddrs, a)
}
}
addrs = tempAddrs
} }
} }
select { select {
case <-ccb.resolverUpdateCh: case <-ccb.ccUpdateCh:
default: default:
} }
ccb.resolverUpdateCh <- &resolverUpdate{ ccb.ccUpdateCh <- ccs
addrs: addrs,
err: err,
}
} }
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {

View File

@ -20,7 +20,6 @@ package grpc
import ( import (
"context" "context"
"strings"
"sync" "sync"
"google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer"
@ -34,13 +33,7 @@ type balancerWrapperBuilder struct {
} }
func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
targetAddr := cc.Target() bwb.b.Start(opts.Target.Endpoint, BalancerConfig{
targetSplitted := strings.Split(targetAddr, ":///")
if len(targetSplitted) >= 2 {
targetAddr = targetSplitted[1]
}
bwb.b.Start(targetAddr, BalancerConfig{
DialCreds: opts.DialCreds, DialCreds: opts.DialCreds,
Dialer: opts.Dialer, Dialer: opts.Dialer,
}) })
@ -49,7 +42,7 @@ func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.B
balancer: bwb.b, balancer: bwb.b,
pickfirst: pickfirst, pickfirst: pickfirst,
cc: cc, cc: cc,
targetAddr: targetAddr, targetAddr: opts.Target.Endpoint,
startCh: make(chan struct{}), startCh: make(chan struct{}),
conns: make(map[resolver.Address]balancer.SubConn), conns: make(map[resolver.Address]balancer.SubConn),
connSt: make(map[balancer.SubConn]*scState), connSt: make(map[balancer.SubConn]*scState),
@ -120,7 +113,7 @@ func (bw *balancerWrapper) lbWatcher() {
} }
for addrs := range notifyCh { for addrs := range notifyCh {
grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs) grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs)
if bw.pickfirst { if bw.pickfirst {
var ( var (
oldA resolver.Address oldA resolver.Address

File diff suppressed because it is too large Load Diff

View File

@ -132,7 +132,8 @@ const (
// Unavailable indicates the service is currently unavailable. // Unavailable indicates the service is currently unavailable.
// This is a most likely a transient condition and may be corrected // This is a most likely a transient condition and may be corrected
// by retrying with a backoff. // by retrying with a backoff. Note that it is not always safe to retry
// non-idempotent operations.
// //
// See litmus test above for deciding between FailedPrecondition, // See litmus test above for deciding between FailedPrecondition,
// Aborted, and Unavailable. // Aborted, and Unavailable.

View File

@ -36,9 +36,6 @@ import (
"google.golang.org/grpc/credentials/internal" "google.golang.org/grpc/credentials/internal"
) )
// alpnProtoStr are the specified application level protocols for gRPC.
var alpnProtoStr = []string{"h2"}
// PerRPCCredentials defines the common interface for the credentials which need to // PerRPCCredentials defines the common interface for the credentials which need to
// attach security information to every RPC (e.g., oauth2). // attach security information to every RPC (e.g., oauth2).
type PerRPCCredentials interface { type PerRPCCredentials interface {
@ -208,10 +205,23 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
return nil return nil
} }
const alpnProtoStrH2 = "h2"
func appendH2ToNextProtos(ps []string) []string {
for _, p := range ps {
if p == alpnProtoStrH2 {
return ps
}
}
ret := make([]string, 0, len(ps)+1)
ret = append(ret, ps...)
return append(ret, alpnProtoStrH2)
}
// NewTLS uses c to construct a TransportCredentials based on TLS. // NewTLS uses c to construct a TransportCredentials based on TLS.
func NewTLS(c *tls.Config) TransportCredentials { func NewTLS(c *tls.Config) TransportCredentials {
tc := &tlsCreds{cloneTLSConfig(c)} tc := &tlsCreds{cloneTLSConfig(c)}
tc.config.NextProtos = alpnProtoStr tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
return tc return tc
} }
@ -268,24 +278,22 @@ type ChannelzSecurityValue interface {
// TLSChannelzSecurityValue defines the struct that TLS protocol should return // TLSChannelzSecurityValue defines the struct that TLS protocol should return
// from GetSecurityValue(), containing security info like cipher and certificate used. // from GetSecurityValue(), containing security info like cipher and certificate used.
type TLSChannelzSecurityValue struct { type TLSChannelzSecurityValue struct {
ChannelzSecurityValue
StandardName string StandardName string
LocalCertificate []byte LocalCertificate []byte
RemoteCertificate []byte RemoteCertificate []byte
} }
func (*TLSChannelzSecurityValue) isChannelzSecurityValue() {}
// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return // OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
// from GetSecurityValue(), which contains protocol specific security info. Note // from GetSecurityValue(), which contains protocol specific security info. Note
// the Value field will be sent to users of channelz requesting channel info, and // the Value field will be sent to users of channelz requesting channel info, and
// thus sensitive info should better be avoided. // thus sensitive info should better be avoided.
type OtherChannelzSecurityValue struct { type OtherChannelzSecurityValue struct {
ChannelzSecurityValue
Name string Name string
Value proto.Message Value proto.Message
} }
func (*OtherChannelzSecurityValue) isChannelzSecurityValue() {}
var cipherSuiteLookup = map[uint16]string{ var cipherSuiteLookup = map[uint16]string{
tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",

View File

@ -39,8 +39,12 @@ import (
// dialOptions configure a Dial call. dialOptions are set by the DialOption // dialOptions configure a Dial call. dialOptions are set by the DialOption
// values passed to Dial. // values passed to Dial.
type dialOptions struct { type dialOptions struct {
unaryInt UnaryClientInterceptor unaryInt UnaryClientInterceptor
streamInt StreamClientInterceptor streamInt StreamClientInterceptor
chainUnaryInts []UnaryClientInterceptor
chainStreamInts []StreamClientInterceptor
cp Compressor cp Compressor
dc Decompressor dc Decompressor
bs backoff.Strategy bs backoff.Strategy
@ -55,13 +59,15 @@ type dialOptions struct {
// balancer, and also by WithBalancerName dial option. // balancer, and also by WithBalancerName dial option.
balancerBuilder balancer.Builder balancerBuilder balancer.Builder
// This is to support grpclb. // This is to support grpclb.
resolverBuilder resolver.Builder resolverBuilder resolver.Builder
reqHandshake envconfig.RequireHandshakeSetting channelzParentID int64
channelzParentID int64 disableServiceConfig bool
disableServiceConfig bool disableRetry bool
disableRetry bool disableHealthCheck bool
disableHealthCheck bool healthCheckFunc internal.HealthChecker
healthCheckFunc internal.HealthChecker minConnectTimeout func() time.Duration
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
defaultServiceConfigRawJSON *string
} }
// DialOption configures how we set up the connection. // DialOption configures how we set up the connection.
@ -93,17 +99,6 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
} }
} }
// WithWaitForHandshake blocks until the initial settings frame is received from
// the server before assigning RPCs to the connection.
//
// Deprecated: this is the default behavior, and this option will be removed
// after the 1.18 release.
func WithWaitForHandshake() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.reqHandshake = envconfig.RequireHandshakeOn
})
}
// WithWriteBufferSize determines how much data can be batched before doing a // WithWriteBufferSize determines how much data can be batched before doing a
// write on the wire. The corresponding memory allocation for this buffer will // write on the wire. The corresponding memory allocation for this buffer will
// be twice the size to keep syscalls low. The default value for this buffer is // be twice the size to keep syscalls low. The default value for this buffer is
@ -149,7 +144,8 @@ func WithInitialConnWindowSize(s int32) DialOption {
// WithMaxMsgSize returns a DialOption which sets the maximum message size the // WithMaxMsgSize returns a DialOption which sets the maximum message size the
// client can receive. // client can receive.
// //
// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. // Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will
// be supported throughout 1.x.
func WithMaxMsgSize(s int) DialOption { func WithMaxMsgSize(s int) DialOption {
return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
} }
@ -165,7 +161,8 @@ func WithDefaultCallOptions(cos ...CallOption) DialOption {
// WithCodec returns a DialOption which sets a codec for message marshaling and // WithCodec returns a DialOption which sets a codec for message marshaling and
// unmarshaling. // unmarshaling.
// //
// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. // Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be
// supported throughout 1.x.
func WithCodec(c Codec) DialOption { func WithCodec(c Codec) DialOption {
return WithDefaultCallOptions(CallCustomCodec(c)) return WithDefaultCallOptions(CallCustomCodec(c))
} }
@ -174,7 +171,7 @@ func WithCodec(c Codec) DialOption {
// message compression. It has lower priority than the compressor set by the // message compression. It has lower priority than the compressor set by the
// UseCompressor CallOption. // UseCompressor CallOption.
// //
// Deprecated: use UseCompressor instead. // Deprecated: use UseCompressor instead. Will be supported throughout 1.x.
func WithCompressor(cp Compressor) DialOption { func WithCompressor(cp Compressor) DialOption {
return newFuncDialOption(func(o *dialOptions) { return newFuncDialOption(func(o *dialOptions) {
o.cp = cp o.cp = cp
@ -189,7 +186,8 @@ func WithCompressor(cp Compressor) DialOption {
// message. If no compressor is registered for the encoding, an Unimplemented // message. If no compressor is registered for the encoding, an Unimplemented
// status error will be returned. // status error will be returned.
// //
// Deprecated: use encoding.RegisterCompressor instead. // Deprecated: use encoding.RegisterCompressor instead. Will be supported
// throughout 1.x.
func WithDecompressor(dc Decompressor) DialOption { func WithDecompressor(dc Decompressor) DialOption {
return newFuncDialOption(func(o *dialOptions) { return newFuncDialOption(func(o *dialOptions) {
o.dc = dc o.dc = dc
@ -200,7 +198,7 @@ func WithDecompressor(dc Decompressor) DialOption {
// Name resolver will be ignored if this DialOption is specified. // Name resolver will be ignored if this DialOption is specified.
// //
// Deprecated: use the new balancer APIs in balancer package and // Deprecated: use the new balancer APIs in balancer package and
// WithBalancerName. // WithBalancerName. Will be removed in a future 1.x release.
func WithBalancer(b Balancer) DialOption { func WithBalancer(b Balancer) DialOption {
return newFuncDialOption(func(o *dialOptions) { return newFuncDialOption(func(o *dialOptions) {
o.balancerBuilder = &balancerWrapperBuilder{ o.balancerBuilder = &balancerWrapperBuilder{
@ -216,7 +214,8 @@ func WithBalancer(b Balancer) DialOption {
// The balancer cannot be overridden by balancer option specified by service // The balancer cannot be overridden by balancer option specified by service
// config. // config.
// //
// This is an EXPERIMENTAL API. // Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig
// instead. Will be removed in a future 1.x release.
func WithBalancerName(balancerName string) DialOption { func WithBalancerName(balancerName string) DialOption {
builder := balancer.Get(balancerName) builder := balancer.Get(balancerName)
if builder == nil { if builder == nil {
@ -237,9 +236,10 @@ func withResolverBuilder(b resolver.Builder) DialOption {
// WithServiceConfig returns a DialOption which has a channel to read the // WithServiceConfig returns a DialOption which has a channel to read the
// service configuration. // service configuration.
// //
// Deprecated: service config should be received through name resolver, as // Deprecated: service config should be received through name resolver or via
// specified here. // WithDefaultServiceConfig, as specified at
// https://github.com/grpc/grpc/blob/master/doc/service_config.md // https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be
// removed in a future 1.x release.
func WithServiceConfig(c <-chan ServiceConfig) DialOption { func WithServiceConfig(c <-chan ServiceConfig) DialOption {
return newFuncDialOption(func(o *dialOptions) { return newFuncDialOption(func(o *dialOptions) {
o.scChan = c o.scChan = c
@ -322,7 +322,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption {
// WithTimeout returns a DialOption that configures a timeout for dialing a // WithTimeout returns a DialOption that configures a timeout for dialing a
// ClientConn initially. This is valid if and only if WithBlock() is present. // ClientConn initially. This is valid if and only if WithBlock() is present.
// //
// Deprecated: use DialContext and context.WithTimeout instead. // Deprecated: use DialContext and context.WithTimeout instead. Will be
// supported throughout 1.x.
func WithTimeout(d time.Duration) DialOption { func WithTimeout(d time.Duration) DialOption {
return newFuncDialOption(func(o *dialOptions) { return newFuncDialOption(func(o *dialOptions) {
o.timeout = d o.timeout = d
@ -349,7 +350,8 @@ func init() {
// is returned by f, gRPC checks the error's Temporary() method to decide if it // is returned by f, gRPC checks the error's Temporary() method to decide if it
// should try to reconnect to the network address. // should try to reconnect to the network address.
// //
// Deprecated: use WithContextDialer instead // Deprecated: use WithContextDialer instead. Will be supported throughout
// 1.x.
func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
return WithContextDialer( return WithContextDialer(
func(ctx context.Context, addr string) (net.Conn, error) { func(ctx context.Context, addr string) (net.Conn, error) {
@ -411,6 +413,17 @@ func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
}) })
} }
// WithChainUnaryInterceptor returns a DialOption that specifies the chained
// interceptor for unary RPCs. The first interceptor will be the outer most,
// while the last interceptor will be the inner most wrapper around the real call.
// All interceptors added by this method will be chained, and the interceptor
// defined by WithUnaryInterceptor will always be prepended to the chain.
func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
})
}
// WithStreamInterceptor returns a DialOption that specifies the interceptor for // WithStreamInterceptor returns a DialOption that specifies the interceptor for
// streaming RPCs. // streaming RPCs.
func WithStreamInterceptor(f StreamClientInterceptor) DialOption { func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
@ -419,6 +432,17 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
}) })
} }
// WithChainStreamInterceptor returns a DialOption that specifies the chained
// interceptor for unary RPCs. The first interceptor will be the outer most,
// while the last interceptor will be the inner most wrapper around the real call.
// All interceptors added by this method will be chained, and the interceptor
// defined by WithStreamInterceptor will always be prepended to the chain.
func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.chainStreamInts = append(o.chainStreamInts, interceptors...)
})
}
// WithAuthority returns a DialOption that specifies the value to be used as the // WithAuthority returns a DialOption that specifies the value to be used as the
// :authority pseudo-header. This value only works with WithInsecure and has no // :authority pseudo-header. This value only works with WithInsecure and has no
// effect if TransportCredentials are present. // effect if TransportCredentials are present.
@ -437,15 +461,32 @@ func WithChannelzParentID(id int64) DialOption {
}) })
} }
// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any // WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any
// service config provided by the resolver and provides a hint to the resolver // service config provided by the resolver and provides a hint to the resolver
// to not fetch service configs. // to not fetch service configs.
//
// Note that this dial option only disables service config from resolver. If
// default service config is provided, gRPC will use the default service config.
func WithDisableServiceConfig() DialOption { func WithDisableServiceConfig() DialOption {
return newFuncDialOption(func(o *dialOptions) { return newFuncDialOption(func(o *dialOptions) {
o.disableServiceConfig = true o.disableServiceConfig = true
}) })
} }
// WithDefaultServiceConfig returns a DialOption that configures the default
// service config, which will be used in cases where:
//
// 1. WithDisableServiceConfig is also used.
// 2. Resolver does not return a service config or if the resolver returns an
// invalid service config.
//
// This API is EXPERIMENTAL.
func WithDefaultServiceConfig(s string) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.defaultServiceConfigRawJSON = &s
})
}
// WithDisableRetry returns a DialOption that disables retries, even if the // WithDisableRetry returns a DialOption that disables retries, even if the
// service config enables them. This does not impact transparent retries, which // service config enables them. This does not impact transparent retries, which
// will happen automatically if no data is written to the wire or if the RPC is // will happen automatically if no data is written to the wire or if the RPC is
@ -470,7 +511,8 @@ func WithMaxHeaderListSize(s uint32) DialOption {
}) })
} }
// WithDisableHealthCheck disables the LB channel health checking for all SubConns of this ClientConn. // WithDisableHealthCheck disables the LB channel health checking for all
// SubConns of this ClientConn.
// //
// This API is EXPERIMENTAL. // This API is EXPERIMENTAL.
func WithDisableHealthCheck() DialOption { func WithDisableHealthCheck() DialOption {
@ -479,8 +521,8 @@ func WithDisableHealthCheck() DialOption {
}) })
} }
// withHealthCheckFunc replaces the default health check function with the provided one. It makes // withHealthCheckFunc replaces the default health check function with the
// tests easier to change the health check function. // provided one. It makes tests easier to change the health check function.
// //
// For testing purpose only. // For testing purpose only.
func withHealthCheckFunc(f internal.HealthChecker) DialOption { func withHealthCheckFunc(f internal.HealthChecker) DialOption {
@ -492,7 +534,6 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption {
func defaultDialOptions() dialOptions { func defaultDialOptions() dialOptions {
return dialOptions{ return dialOptions{
disableRetry: !envconfig.Retry, disableRetry: !envconfig.Retry,
reqHandshake: envconfig.RequireHandshake,
healthCheckFunc: internal.HealthCheckFunc, healthCheckFunc: internal.HealthCheckFunc,
copts: transport.ConnectOptions{ copts: transport.ConnectOptions{
WriteBufferSize: defaultWriteBufSize, WriteBufferSize: defaultWriteBufSize,
@ -500,3 +541,14 @@ func defaultDialOptions() dialOptions {
}, },
} }
} }
// withGetMinConnectDeadline specifies the function that clientconn uses to
// get minConnectDeadline. This can be used to make connection attempts happen
// faster/slower.
//
// For testing purpose only.
func withMinConnectDeadline(f func() time.Duration) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.minConnectTimeout = f
})
}

View File

@ -102,10 +102,10 @@ func RegisterCodec(codec Codec) {
if codec == nil { if codec == nil {
panic("cannot register a nil Codec") panic("cannot register a nil Codec")
} }
contentSubtype := strings.ToLower(codec.Name()) if codec.Name() == "" {
if contentSubtype == "" { panic("cannot register Codec with empty string result for Name()")
panic("cannot register Codec with empty string result for String()")
} }
contentSubtype := strings.ToLower(codec.Name())
registeredCodecs[contentSubtype] = codec registeredCodecs[contentSubtype] = codec
} }

13
vendor/google.golang.org/grpc/go.mod generated vendored
View File

@ -7,14 +7,13 @@ require (
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
github.com/golang/mock v1.1.1 github.com/golang/mock v1.1.1
github.com/golang/protobuf v1.2.0 github.com/golang/protobuf v1.2.0
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3 github.com/google/go-cmp v0.2.0
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3
golang.org/x/net v0.0.0-20190311183353-d8887717615a
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135
golang.org/x/text v0.3.0 // indirect
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b
google.golang.org/appengine v1.1.0 // indirect google.golang.org/appengine v1.1.0 // indirect
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099 honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc
) )

29
vendor/google.golang.org/grpc/go.sum generated vendored
View File

@ -10,23 +10,28 @@ github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3 h1:x/bBzNauLQAlE3fLku/xy92Y8QwKX5HZymrMz2IiKFc= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b h1:qMK98NmNCRVDIYFycQ5yVRkvgDUFfdP8Ip4KqmDEB7g= golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099 h1:XJP7lxbSxWLOMNdBE4B/STaqVy6L73o0knwj2vIlxnw= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -18,7 +18,7 @@
// Package grpclog defines logging for grpc. // Package grpclog defines logging for grpc.
// //
// All logs in transport package only go to verbose level 2. // All logs in transport and grpclb packages only go to verbose level 2.
// All logs in other packages in grpc are logged in spite of the verbosity level. // All logs in other packages in grpc are logged in spite of the verbosity level.
// //
// In the default logger, // In the default logger,

View File

@ -26,6 +26,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
healthpb "google.golang.org/grpc/health/grpc_health_v1" healthpb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/internal" "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/backoff"
@ -51,7 +52,11 @@ func init() {
internal.HealthCheckFunc = clientHealthCheck internal.HealthCheckFunc = clientHealthCheck
} }
func clientHealthCheck(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), service string) error { const healthCheckMethod = "/grpc.health.v1.Health/Watch"
// This function implements the protocol defined at:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), service string) error {
tryCnt := 0 tryCnt := 0
retryConnection: retryConnection:
@ -65,7 +70,8 @@ retryConnection:
if ctx.Err() != nil { if ctx.Err() != nil {
return nil return nil
} }
rawS, err := newStream() setConnectivityState(connectivity.Connecting)
rawS, err := newStream(healthCheckMethod)
if err != nil { if err != nil {
continue retryConnection continue retryConnection
} }
@ -73,7 +79,7 @@ retryConnection:
s, ok := rawS.(grpc.ClientStream) s, ok := rawS.(grpc.ClientStream)
// Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes. // Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes.
if !ok { if !ok {
reportHealth(true) setConnectivityState(connectivity.Ready)
return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS) return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS)
} }
@ -89,19 +95,23 @@ retryConnection:
// Reports healthy for the LBing purposes if health check is not implemented in the server. // Reports healthy for the LBing purposes if health check is not implemented in the server.
if status.Code(err) == codes.Unimplemented { if status.Code(err) == codes.Unimplemented {
reportHealth(true) setConnectivityState(connectivity.Ready)
return err return err
} }
// Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED. // Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED.
if err != nil { if err != nil {
reportHealth(false) setConnectivityState(connectivity.TransientFailure)
continue retryConnection continue retryConnection
} }
// As a message has been received, removes the need for backoff for the next retry by reseting the try count. // As a message has been received, removes the need for backoff for the next retry by reseting the try count.
tryCnt = 0 tryCnt = 0
reportHealth(resp.Status == healthpb.HealthCheckResponse_SERVING) if resp.Status == healthpb.HealthCheckResponse_SERVING {
setConnectivityState(connectivity.Ready)
} else {
setConnectivityState(connectivity.TransientFailure)
}
} }
} }
} }

View File

@ -0,0 +1,46 @@
/*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package balancerload defines APIs to parse server loads in trailers. The
// parsed loads are sent to balancers in DoneInfo.
package balancerload
import (
"google.golang.org/grpc/metadata"
)
// Parser converts loads from metadata into a concrete type.
type Parser interface {
// Parse parses loads from metadata.
Parse(md metadata.MD) interface{}
}
var parser Parser
// SetParser sets the load parser.
//
// Not mutex-protected, should be called before any gRPC functions.
func SetParser(lr Parser) {
parser = lr
}
// Parse calls parser.Read().
func Parse(md metadata.MD) interface{} {
if parser == nil {
return nil
}
return parser.Parse(md)
}

View File

@ -24,6 +24,7 @@
package channelz package channelz
import ( import (
"fmt"
"sort" "sort"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -95,9 +96,14 @@ func (d *dbWrapper) get() *channelMap {
// NewChannelzStorage initializes channelz data storage and id generator. // NewChannelzStorage initializes channelz data storage and id generator.
// //
// This function returns a cleanup function to wait for all channelz state to be reset by the
// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests
// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen
// to remove some entity just register by the new test, since the id space is the same.
//
// Note: This function is exported for testing purpose only. User should not call // Note: This function is exported for testing purpose only. User should not call
// it in most cases. // it in most cases.
func NewChannelzStorage() { func NewChannelzStorage() (cleanup func() error) {
db.set(&channelMap{ db.set(&channelMap{
topLevelChannels: make(map[int64]struct{}), topLevelChannels: make(map[int64]struct{}),
channels: make(map[int64]*channel), channels: make(map[int64]*channel),
@ -107,6 +113,28 @@ func NewChannelzStorage() {
subChannels: make(map[int64]*subChannel), subChannels: make(map[int64]*subChannel),
}) })
idGen.reset() idGen.reset()
return func() error {
var err error
cm := db.get()
if cm == nil {
return nil
}
for i := 0; i < 1000; i++ {
cm.mu.Lock()
if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 {
cm.mu.Unlock()
// all things stored in the channelz map have been cleared.
return nil
}
cm.mu.Unlock()
time.Sleep(10 * time.Millisecond)
}
cm.mu.Lock()
err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets))
cm.mu.Unlock()
return err
}
} }
// GetTopChannels returns a slice of top channel's ChannelMetric, along with a // GetTopChannels returns a slice of top channel's ChannelMetric, along with a

View File

@ -25,47 +25,11 @@ import (
) )
const ( const (
prefix = "GRPC_GO_" prefix = "GRPC_GO_"
retryStr = prefix + "RETRY" retryStr = prefix + "RETRY"
requireHandshakeStr = prefix + "REQUIRE_HANDSHAKE"
)
// RequireHandshakeSetting describes the settings for handshaking.
type RequireHandshakeSetting int
const (
// RequireHandshakeHybrid (default, deprecated) indicates to not wait for
// handshake before considering a connection ready, but wait before
// considering successful.
RequireHandshakeHybrid RequireHandshakeSetting = iota
// RequireHandshakeOn (default after the 1.17 release) indicates to wait
// for handshake before considering a connection ready/successful.
RequireHandshakeOn
// RequireHandshakeOff indicates to not wait for handshake before
// considering a connection ready/successful.
RequireHandshakeOff
) )
var ( var (
// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
Retry = strings.EqualFold(os.Getenv(retryStr), "on") Retry = strings.EqualFold(os.Getenv(retryStr), "on")
// RequireHandshake is set based upon the GRPC_GO_REQUIRE_HANDSHAKE
// environment variable.
//
// Will be removed after the 1.18 release.
RequireHandshake RequireHandshakeSetting
) )
func init() {
switch strings.ToLower(os.Getenv(requireHandshakeStr)) {
case "on":
fallthrough
default:
RequireHandshake = RequireHandshakeOn
case "off":
RequireHandshake = RequireHandshakeOff
case "hybrid":
// Will be removed after the 1.17 release.
RequireHandshake = RequireHandshakeHybrid
}
}

View File

@ -23,6 +23,8 @@ package internal
import ( import (
"context" "context"
"time" "time"
"google.golang.org/grpc/connectivity"
) )
var ( var (
@ -37,10 +39,25 @@ var (
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
// default, but tests may wish to set it lower for convenience. // default, but tests may wish to set it lower for convenience.
KeepaliveMinPingTime = 10 * time.Second KeepaliveMinPingTime = 10 * time.Second
// ParseServiceConfig is a function to parse JSON service configs into
// opaque data structures.
ParseServiceConfig func(sc string) (interface{}, error)
// StatusRawProto is exported by status/status.go. This func returns a
// pointer to the wrapped Status proto for a given status.Status without a
// call to proto.Clone(). The returned Status proto should not be mutated by
// the caller.
StatusRawProto interface{} // func (*status.Status) *spb.Status
) )
// HealthChecker defines the signature of the client-side LB channel health checking function. // HealthChecker defines the signature of the client-side LB channel health checking function.
type HealthChecker func(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), serviceName string) error //
// The implementation is expected to create a health checking RPC stream by
// calling newStream(), watch for the health status of serviceName, and report
// it's health back by calling setConnectivityState().
//
// The health checking protocol is defined at:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error
const ( const (
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.

View File

@ -22,18 +22,24 @@ package syscall
import ( import (
"net" "net"
"sync"
"time" "time"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
) )
func init() { var once sync.Once
grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
func log() {
once.Do(func() {
grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
})
} }
// GetCPUTime returns the how much CPU time has passed since the start of this process. // GetCPUTime returns the how much CPU time has passed since the start of this process.
// It always returns 0 under non-linux or appengine environment. // It always returns 0 under non-linux or appengine environment.
func GetCPUTime() int64 { func GetCPUTime() int64 {
log()
return 0 return 0
} }
@ -42,22 +48,26 @@ type Rusage struct{}
// GetRusage is a no-op function under non-linux or appengine environment. // GetRusage is a no-op function under non-linux or appengine environment.
func GetRusage() (rusage *Rusage) { func GetRusage() (rusage *Rusage) {
log()
return nil return nil
} }
// CPUTimeDiff returns the differences of user CPU time and system CPU time used // CPUTimeDiff returns the differences of user CPU time and system CPU time used
// between two Rusage structs. It a no-op function for non-linux or appengine environment. // between two Rusage structs. It a no-op function for non-linux or appengine environment.
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
log()
return 0, 0 return 0, 0
} }
// SetTCPUserTimeout is a no-op function under non-linux or appengine environments // SetTCPUserTimeout is a no-op function under non-linux or appengine environments
func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
log()
return nil return nil
} }
// GetTCPUserTimeout is a no-op function under non-linux or appengine environments // GetTCPUserTimeout is a no-op function under non-linux or appengine environments
// a negative return value indicates the operation is not supported // a negative return value indicates the operation is not supported
func GetTCPUserTimeout(conn net.Conn) (int, error) { func GetTCPUserTimeout(conn net.Conn) (int, error) {
log()
return -1, nil return -1, nil
} }

View File

@ -23,6 +23,7 @@ import (
"fmt" "fmt"
"runtime" "runtime"
"sync" "sync"
"sync/atomic"
"golang.org/x/net/http2" "golang.org/x/net/http2"
"golang.org/x/net/http2/hpack" "golang.org/x/net/http2/hpack"
@ -84,12 +85,24 @@ func (il *itemList) isEmpty() bool {
// the control buffer of transport. They represent different aspects of // the control buffer of transport. They represent different aspects of
// control tasks, e.g., flow control, settings, streaming resetting, etc. // control tasks, e.g., flow control, settings, streaming resetting, etc.
// maxQueuedTransportResponseFrames is the most queued "transport response"
// frames we will buffer before preventing new reads from occurring on the
// transport. These are control frames sent in response to client requests,
// such as RST_STREAM due to bad headers or settings acks.
const maxQueuedTransportResponseFrames = 50
type cbItem interface {
isTransportResponseFrame() bool
}
// registerStream is used to register an incoming stream with loopy writer. // registerStream is used to register an incoming stream with loopy writer.
type registerStream struct { type registerStream struct {
streamID uint32 streamID uint32
wq *writeQuota wq *writeQuota
} }
func (*registerStream) isTransportResponseFrame() bool { return false }
// headerFrame is also used to register stream on the client-side. // headerFrame is also used to register stream on the client-side.
type headerFrame struct { type headerFrame struct {
streamID uint32 streamID uint32
@ -102,6 +115,10 @@ type headerFrame struct {
onOrphaned func(error) // Valid on client-side onOrphaned func(error) // Valid on client-side
} }
func (h *headerFrame) isTransportResponseFrame() bool {
return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM
}
type cleanupStream struct { type cleanupStream struct {
streamID uint32 streamID uint32
rst bool rst bool
@ -109,6 +126,8 @@ type cleanupStream struct {
onWrite func() onWrite func()
} }
func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
type dataFrame struct { type dataFrame struct {
streamID uint32 streamID uint32
endStream bool endStream bool
@ -119,27 +138,41 @@ type dataFrame struct {
onEachWrite func() onEachWrite func()
} }
func (*dataFrame) isTransportResponseFrame() bool { return false }
type incomingWindowUpdate struct { type incomingWindowUpdate struct {
streamID uint32 streamID uint32
increment uint32 increment uint32
} }
func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false }
type outgoingWindowUpdate struct { type outgoingWindowUpdate struct {
streamID uint32 streamID uint32
increment uint32 increment uint32
} }
func (*outgoingWindowUpdate) isTransportResponseFrame() bool {
return false // window updates are throttled by thresholds
}
type incomingSettings struct { type incomingSettings struct {
ss []http2.Setting ss []http2.Setting
} }
func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK
type outgoingSettings struct { type outgoingSettings struct {
ss []http2.Setting ss []http2.Setting
} }
func (*outgoingSettings) isTransportResponseFrame() bool { return false }
type incomingGoAway struct { type incomingGoAway struct {
} }
func (*incomingGoAway) isTransportResponseFrame() bool { return false }
type goAway struct { type goAway struct {
code http2.ErrCode code http2.ErrCode
debugData []byte debugData []byte
@ -147,15 +180,21 @@ type goAway struct {
closeConn bool closeConn bool
} }
func (*goAway) isTransportResponseFrame() bool { return false }
type ping struct { type ping struct {
ack bool ack bool
data [8]byte data [8]byte
} }
func (*ping) isTransportResponseFrame() bool { return true }
type outFlowControlSizeRequest struct { type outFlowControlSizeRequest struct {
resp chan uint32 resp chan uint32
} }
func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
type outStreamState int type outStreamState int
const ( const (
@ -238,6 +277,14 @@ type controlBuffer struct {
consumerWaiting bool consumerWaiting bool
list *itemList list *itemList
err error err error
// transportResponseFrames counts the number of queued items that represent
// the response of an action initiated by the peer. trfChan is created
// when transportResponseFrames >= maxQueuedTransportResponseFrames and is
// closed and nilled when transportResponseFrames drops below the
// threshold. Both fields are protected by mu.
transportResponseFrames int
trfChan atomic.Value // *chan struct{}
} }
func newControlBuffer(done <-chan struct{}) *controlBuffer { func newControlBuffer(done <-chan struct{}) *controlBuffer {
@ -248,12 +295,24 @@ func newControlBuffer(done <-chan struct{}) *controlBuffer {
} }
} }
func (c *controlBuffer) put(it interface{}) error { // throttle blocks if there are too many incomingSettings/cleanupStreams in the
// controlbuf.
func (c *controlBuffer) throttle() {
ch, _ := c.trfChan.Load().(*chan struct{})
if ch != nil {
select {
case <-*ch:
case <-c.done:
}
}
}
func (c *controlBuffer) put(it cbItem) error {
_, err := c.executeAndPut(nil, it) _, err := c.executeAndPut(nil, it)
return err return err
} }
func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) { func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) {
var wakeUp bool var wakeUp bool
c.mu.Lock() c.mu.Lock()
if c.err != nil { if c.err != nil {
@ -271,6 +330,15 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{
c.consumerWaiting = false c.consumerWaiting = false
} }
c.list.enqueue(it) c.list.enqueue(it)
if it.isTransportResponseFrame() {
c.transportResponseFrames++
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
// We are adding the frame that puts us over the threshold; create
// a throttling channel.
ch := make(chan struct{})
c.trfChan.Store(&ch)
}
}
c.mu.Unlock() c.mu.Unlock()
if wakeUp { if wakeUp {
select { select {
@ -304,7 +372,17 @@ func (c *controlBuffer) get(block bool) (interface{}, error) {
return nil, c.err return nil, c.err
} }
if !c.list.isEmpty() { if !c.list.isEmpty() {
h := c.list.dequeue() h := c.list.dequeue().(cbItem)
if h.isTransportResponseFrame() {
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
// We are removing the frame that put us over the
// threshold; close and clear the throttling channel.
ch := c.trfChan.Load().(*chan struct{})
close(*ch)
c.trfChan.Store((*chan struct{})(nil))
}
c.transportResponseFrames--
}
c.mu.Unlock() c.mu.Unlock()
return h, nil return h, nil
} }

View File

@ -149,6 +149,7 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
n = uint32(math.MaxInt32) n = uint32(math.MaxInt32)
} }
f.mu.Lock() f.mu.Lock()
defer f.mu.Unlock()
// estSenderQuota is the receiver's view of the maximum number of bytes the sender // estSenderQuota is the receiver's view of the maximum number of bytes the sender
// can send without a window update. // can send without a window update.
estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
@ -169,10 +170,8 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 {
// is padded; We will fallback on the current available window(at least a 1/4th of the limit). // is padded; We will fallback on the current available window(at least a 1/4th of the limit).
f.delta = n f.delta = n
} }
f.mu.Unlock()
return f.delta return f.delta
} }
f.mu.Unlock()
return 0 return 0
} }

View File

@ -24,6 +24,7 @@
package transport package transport
import ( import (
"bytes"
"context" "context"
"errors" "errors"
"fmt" "fmt"
@ -63,9 +64,6 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta
if _, ok := w.(http.Flusher); !ok { if _, ok := w.(http.Flusher); !ok {
return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
} }
if _, ok := w.(http.CloseNotifier); !ok {
return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier")
}
st := &serverHandlerTransport{ st := &serverHandlerTransport{
rw: w, rw: w,
@ -176,17 +174,11 @@ func (a strAddr) String() string { return string(a) }
// do runs fn in the ServeHTTP goroutine. // do runs fn in the ServeHTTP goroutine.
func (ht *serverHandlerTransport) do(fn func()) error { func (ht *serverHandlerTransport) do(fn func()) error {
// Avoid a panic writing to closed channel. Imperfect but maybe good enough.
select { select {
case <-ht.closedCh: case <-ht.closedCh:
return ErrConnClosing return ErrConnClosing
default: case ht.writes <- fn:
select { return nil
case ht.writes <- fn:
return nil
case <-ht.closedCh:
return ErrConnClosing
}
} }
} }
@ -237,7 +229,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
if ht.stats != nil { if ht.stats != nil {
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
} }
close(ht.writes)
} }
ht.Close() ht.Close()
return err return err
@ -315,19 +306,13 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
ctx, cancel = context.WithCancel(ctx) ctx, cancel = context.WithCancel(ctx)
} }
// requestOver is closed when either the request's context is done // requestOver is closed when the status has been written via WriteStatus.
// or the status has been written via WriteStatus.
requestOver := make(chan struct{}) requestOver := make(chan struct{})
// clientGone receives a single value if peer is gone, either
// because the underlying connection is dead or because the
// peer sends an http2 RST_STREAM.
clientGone := ht.rw.(http.CloseNotifier).CloseNotify()
go func() { go func() {
select { select {
case <-requestOver: case <-requestOver:
case <-ht.closedCh: case <-ht.closedCh:
case <-clientGone: case <-ht.req.Context().Done():
} }
cancel() cancel()
ht.Close() ht.Close()
@ -363,7 +348,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
ht.stats.HandleRPC(s.ctx, inHeader) ht.stats.HandleRPC(s.ctx, inHeader)
} }
s.trReader = &transportReader{ s.trReader = &transportReader{
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
windowHandler: func(int) {}, windowHandler: func(int) {},
} }
@ -377,7 +362,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
for buf := make([]byte, readSize); ; { for buf := make([]byte, readSize); ; {
n, err := req.Body.Read(buf) n, err := req.Body.Read(buf)
if n > 0 { if n > 0 {
s.buf.put(recvMsg{data: buf[:n:n]}) s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])})
buf = buf[n:] buf = buf[n:]
} }
if err != nil { if err != nil {
@ -407,10 +392,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
func (ht *serverHandlerTransport) runStream() { func (ht *serverHandlerTransport) runStream() {
for { for {
select { select {
case fn, ok := <-ht.writes: case fn := <-ht.writes:
if !ok {
return
}
fn() fn()
case <-ht.closedCh: case <-ht.closedCh:
return return

View File

@ -117,6 +117,8 @@ type http2Client struct {
onGoAway func(GoAwayReason) onGoAway func(GoAwayReason)
onClose func() onClose func()
bufferPool *bufferPool
} }
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
@ -249,6 +251,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
onGoAway: onGoAway, onGoAway: onGoAway,
onClose: onClose, onClose: onClose,
keepaliveEnabled: keepaliveEnabled, keepaliveEnabled: keepaliveEnabled,
bufferPool: newBufferPool(),
} }
t.controlBuf = newControlBuffer(t.ctxDone) t.controlBuf = newControlBuffer(t.ctxDone)
if opts.InitialWindowSize >= defaultWindowSize { if opts.InitialWindowSize >= defaultWindowSize {
@ -367,6 +370,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
closeStream: func(err error) { closeStream: func(err error) {
t.CloseStream(s, err) t.CloseStream(s, err)
}, },
freeBuffer: t.bufferPool.put,
}, },
windowHandler: func(n int) { windowHandler: func(n int) {
t.updateWindow(s, uint32(n)) t.updateWindow(s, uint32(n))
@ -437,6 +441,15 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
var k string var k string
for k, vv := range md {
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
if isReservedHeader(k) {
continue
}
for _, v := range vv {
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
for _, vv := range added { for _, vv := range added {
for i, v := range vv { for i, v := range vv {
if i%2 == 0 { if i%2 == 0 {
@ -450,15 +463,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)})
} }
} }
for k, vv := range md {
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
if isReservedHeader(k) {
continue
}
for _, v := range vv {
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
} }
if md, ok := t.md.(*metadata.MD); ok { if md, ok := t.md.(*metadata.MD); ok {
for k, vv := range *md { for k, vv := range *md {
@ -489,6 +493,9 @@ func (t *http2Client) createAudience(callHdr *CallHdr) string {
} }
func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) {
if len(t.perRPCCreds) == 0 {
return nil, nil
}
authData := map[string]string{} authData := map[string]string{}
for _, c := range t.perRPCCreds { for _, c := range t.perRPCCreds {
data, err := c.GetRequestMetadata(ctx, audience) data, err := c.GetRequestMetadata(ctx, audience)
@ -509,7 +516,7 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s
} }
func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) {
callAuthData := map[string]string{} var callAuthData map[string]string
// Check if credentials.PerRPCCredentials were provided via call options. // Check if credentials.PerRPCCredentials were provided via call options.
// Note: if these credentials are provided both via dial options and call // Note: if these credentials are provided both via dial options and call
// options, then both sets of credentials will be applied. // options, then both sets of credentials will be applied.
@ -521,6 +528,7 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
if err != nil { if err != nil {
return nil, status.Errorf(codes.Internal, "transport: %v", err) return nil, status.Errorf(codes.Internal, "transport: %v", err)
} }
callAuthData = make(map[string]string, len(data))
for k, v := range data { for k, v := range data {
// Capital header names are illegal in HTTP/2 // Capital header names are illegal in HTTP/2
k = strings.ToLower(k) k = strings.ToLower(k)
@ -549,10 +557,9 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
s.write(recvMsg{err: err}) s.write(recvMsg{err: err})
close(s.done) close(s.done)
// If headerChan isn't closed, then close it. // If headerChan isn't closed, then close it.
if atomic.SwapUint32(&s.headerDone, 1) == 0 { if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
close(s.headerChan) close(s.headerChan)
} }
} }
hdr := &headerFrame{ hdr := &headerFrame{
hf: headerFields, hf: headerFields,
@ -713,7 +720,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
s.write(recvMsg{err: err}) s.write(recvMsg{err: err})
} }
// If headerChan isn't closed, then close it. // If headerChan isn't closed, then close it.
if atomic.SwapUint32(&s.headerDone, 1) == 0 { if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
s.noHeaders = true s.noHeaders = true
close(s.headerChan) close(s.headerChan)
} }
@ -765,6 +772,9 @@ func (t *http2Client) Close() error {
t.mu.Unlock() t.mu.Unlock()
return nil return nil
} }
// Call t.onClose before setting the state to closing to prevent the client
// from attempting to create new streams ASAP.
t.onClose()
t.state = closing t.state = closing
streams := t.activeStreams streams := t.activeStreams
t.activeStreams = nil t.activeStreams = nil
@ -785,7 +795,6 @@ func (t *http2Client) Close() error {
} }
t.statsHandler.HandleConn(t.ctx, connEnd) t.statsHandler.HandleConn(t.ctx, connEnd)
} }
t.onClose()
return err return err
} }
@ -794,21 +803,21 @@ func (t *http2Client) Close() error {
// stream is closed. If there are no active streams, the transport is closed // stream is closed. If there are no active streams, the transport is closed
// immediately. This does nothing if the transport is already draining or // immediately. This does nothing if the transport is already draining or
// closing. // closing.
func (t *http2Client) GracefulClose() error { func (t *http2Client) GracefulClose() {
t.mu.Lock() t.mu.Lock()
// Make sure we move to draining only from active. // Make sure we move to draining only from active.
if t.state == draining || t.state == closing { if t.state == draining || t.state == closing {
t.mu.Unlock() t.mu.Unlock()
return nil return
} }
t.state = draining t.state = draining
active := len(t.activeStreams) active := len(t.activeStreams)
t.mu.Unlock() t.mu.Unlock()
if active == 0 { if active == 0 {
return t.Close() t.Close()
return
} }
t.controlBuf.put(&incomingGoAway{}) t.controlBuf.put(&incomingGoAway{})
return nil
} }
// Write formats the data into HTTP2 data frame(s) and sends it out. The caller // Write formats the data into HTTP2 data frame(s) and sends it out. The caller
@ -946,9 +955,10 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame. // guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated? // Can this copy be eliminated?
if len(f.Data()) > 0 { if len(f.Data()) > 0 {
data := make([]byte, len(f.Data())) buffer := t.bufferPool.get()
copy(data, f.Data()) buffer.Reset()
s.write(recvMsg{data: data}) buffer.Write(f.Data())
s.write(recvMsg{buffer: buffer})
} }
} }
// The server has closed the stream without sending trailers. Record that // The server has closed the stream without sending trailers. Record that
@ -973,9 +983,9 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
statusCode = codes.Unknown statusCode = codes.Unknown
} }
if statusCode == codes.Canceled { if statusCode == codes.Canceled {
// Our deadline was already exceeded, and that was likely the cause of if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) {
// this cancelation. Alter the status code accordingly. // Our deadline was already exceeded, and that was likely the cause
if d, ok := s.ctx.Deadline(); ok && d.After(time.Now()) { // of this cancelation. Alter the status code accordingly.
statusCode = codes.DeadlineExceeded statusCode = codes.DeadlineExceeded
} }
} }
@ -1080,11 +1090,12 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
default: default:
t.setGoAwayReason(f) t.setGoAwayReason(f)
close(t.goAway) close(t.goAway)
t.state = draining
t.controlBuf.put(&incomingGoAway{}) t.controlBuf.put(&incomingGoAway{})
// Notify the clientconn about the GOAWAY before we set the state to
// This has to be a new goroutine because we're still using the current goroutine to read in the transport. // draining, to allow the client to stop attempting to create streams
// before disallowing new streams on this connection.
t.onGoAway(t.goAwayReason) t.onGoAway(t.goAwayReason)
t.state = draining
} }
// All streams with IDs greater than the GoAwayId // All streams with IDs greater than the GoAwayId
// and smaller than the previous GoAway ID should be killed. // and smaller than the previous GoAway ID should be killed.
@ -1140,16 +1151,26 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
if !ok { if !ok {
return return
} }
endStream := frame.StreamEnded()
atomic.StoreUint32(&s.bytesReceived, 1) atomic.StoreUint32(&s.bytesReceived, 1)
var state decodeState initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0
if err := state.decodeHeader(frame); err != nil {
t.closeStream(s, err, true, http2.ErrCodeProtocol, status.New(codes.Internal, err.Error()), nil, false) if !initialHeader && !endStream {
// Something wrong. Stops reading even when there is remaining. // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set.
st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream")
t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false)
return return
} }
endStream := frame.StreamEnded() state := &decodeState{}
var isHeader bool // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode.
state.data.isGRPC = !initialHeader
if err := state.decodeHeader(frame); err != nil {
t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream)
return
}
isHeader := false
defer func() { defer func() {
if t.statsHandler != nil { if t.statsHandler != nil {
if isHeader { if isHeader {
@ -1167,29 +1188,33 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
} }
} }
}() }()
// If headers haven't been received yet.
if atomic.SwapUint32(&s.headerDone, 1) == 0 { // If headerChan hasn't been closed yet
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
if !endStream { if !endStream {
// Headers frame is not actually a trailers-only frame. // HEADERS frame block carries a Response-Headers.
isHeader = true isHeader = true
// These values can be set without any synchronization because // These values can be set without any synchronization because
// stream goroutine will read it only after seeing a closed // stream goroutine will read it only after seeing a closed
// headerChan which we'll close after setting this. // headerChan which we'll close after setting this.
s.recvCompress = state.encoding s.recvCompress = state.data.encoding
if len(state.mdata) > 0 { if len(state.data.mdata) > 0 {
s.header = state.mdata s.header = state.data.mdata
} }
} else { } else {
// HEADERS frame block carries a Trailers-Only.
s.noHeaders = true s.noHeaders = true
} }
close(s.headerChan) close(s.headerChan)
} }
if !endStream { if !endStream {
return return
} }
// if client received END_STREAM from server while stream was still active, send RST_STREAM // if client received END_STREAM from server while stream was still active, send RST_STREAM
rst := s.getState() == streamActive rst := s.getState() == streamActive
t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.mdata, true) t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true)
} }
// reader runs as a separate goroutine in charge of reading data from network // reader runs as a separate goroutine in charge of reading data from network
@ -1220,6 +1245,7 @@ func (t *http2Client) reader() {
// loop to keep reading incoming messages on this transport. // loop to keep reading incoming messages on this transport.
for { for {
t.controlBuf.throttle()
frame, err := t.framer.fr.ReadFrame() frame, err := t.framer.fr.ReadFrame()
if t.keepaliveEnabled { if t.keepaliveEnabled {
atomic.CompareAndSwapUint32(&t.activity, 0, 1) atomic.CompareAndSwapUint32(&t.activity, 0, 1)
@ -1307,6 +1333,7 @@ func (t *http2Client) keepalive() {
timer.Reset(t.kp.Time) timer.Reset(t.kp.Time)
continue continue
} }
infof("transport: closing client transport due to idleness.")
t.Close() t.Close()
return return
case <-t.ctx.Done(): case <-t.ctx.Done():
@ -1356,6 +1383,8 @@ func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric {
return &s return &s
} }
func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
func (t *http2Client) IncrMsgSent() { func (t *http2Client) IncrMsgSent() {
atomic.AddInt64(&t.czData.msgSent, 1) atomic.AddInt64(&t.czData.msgSent, 1)
atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())

View File

@ -35,9 +35,11 @@ import (
"golang.org/x/net/http2" "golang.org/x/net/http2"
"golang.org/x/net/http2/hpack" "golang.org/x/net/http2/hpack"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/keepalive" "google.golang.org/grpc/keepalive"
@ -55,6 +57,9 @@ var (
// ErrHeaderListSizeLimitViolation indicates that the header list size is larger // ErrHeaderListSizeLimitViolation indicates that the header list size is larger
// than the limit set by peer. // than the limit set by peer.
ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
// statusRawProto is a function to get to the raw status proto wrapped in a
// status.Status without a proto.Clone().
statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status)
) )
// http2Server implements the ServerTransport interface with HTTP2. // http2Server implements the ServerTransport interface with HTTP2.
@ -119,6 +124,7 @@ type http2Server struct {
// Fields below are for channelz metric collection. // Fields below are for channelz metric collection.
channelzID int64 // channelz unique identification number channelzID int64 // channelz unique identification number
czData *channelzData czData *channelzData
bufferPool *bufferPool
} }
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
@ -220,6 +226,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
kep: kep, kep: kep,
initialWindowSize: iwz, initialWindowSize: iwz,
czData: new(channelzData), czData: new(channelzData),
bufferPool: newBufferPool(),
} }
t.controlBuf = newControlBuffer(t.ctxDone) t.controlBuf = newControlBuffer(t.ctxDone)
if dynamicWindow { if dynamicWindow {
@ -286,7 +293,9 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
// operateHeader takes action on the decoded headers. // operateHeader takes action on the decoded headers.
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
streamID := frame.Header().StreamID streamID := frame.Header().StreamID
state := decodeState{serverSide: true} state := &decodeState{
serverSide: true,
}
if err := state.decodeHeader(frame); err != nil { if err := state.decodeHeader(frame); err != nil {
if se, ok := status.FromError(err); ok { if se, ok := status.FromError(err); ok {
t.controlBuf.put(&cleanupStream{ t.controlBuf.put(&cleanupStream{
@ -305,16 +314,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
st: t, st: t,
buf: buf, buf: buf,
fc: &inFlow{limit: uint32(t.initialWindowSize)}, fc: &inFlow{limit: uint32(t.initialWindowSize)},
recvCompress: state.encoding, recvCompress: state.data.encoding,
method: state.method, method: state.data.method,
contentSubtype: state.contentSubtype, contentSubtype: state.data.contentSubtype,
} }
if frame.StreamEnded() { if frame.StreamEnded() {
// s is just created by the caller. No lock needed. // s is just created by the caller. No lock needed.
s.state = streamReadDone s.state = streamReadDone
} }
if state.timeoutSet { if state.data.timeoutSet {
s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout) s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout)
} else { } else {
s.ctx, s.cancel = context.WithCancel(t.ctx) s.ctx, s.cancel = context.WithCancel(t.ctx)
} }
@ -327,19 +336,19 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
} }
s.ctx = peer.NewContext(s.ctx, pr) s.ctx = peer.NewContext(s.ctx, pr)
// Attach the received metadata to the context. // Attach the received metadata to the context.
if len(state.mdata) > 0 { if len(state.data.mdata) > 0 {
s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata) s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata)
} }
if state.statsTags != nil { if state.data.statsTags != nil {
s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags) s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags)
} }
if state.statsTrace != nil { if state.data.statsTrace != nil {
s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace) s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace)
} }
if t.inTapHandle != nil { if t.inTapHandle != nil {
var err error var err error
info := &tap.Info{ info := &tap.Info{
FullMethodName: state.method, FullMethodName: state.data.method,
} }
s.ctx, err = t.inTapHandle(s.ctx, info) s.ctx, err = t.inTapHandle(s.ctx, info)
if err != nil { if err != nil {
@ -403,9 +412,10 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
s.trReader = &transportReader{ s.trReader = &transportReader{
reader: &recvBufferReader{ reader: &recvBufferReader{
ctx: s.ctx, ctx: s.ctx,
ctxDone: s.ctxDone, ctxDone: s.ctxDone,
recv: s.buf, recv: s.buf,
freeBuffer: t.bufferPool.put,
}, },
windowHandler: func(n int) { windowHandler: func(n int) {
t.updateWindow(s, uint32(n)) t.updateWindow(s, uint32(n))
@ -426,6 +436,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
defer close(t.readerDone) defer close(t.readerDone)
for { for {
t.controlBuf.throttle()
frame, err := t.framer.fr.ReadFrame() frame, err := t.framer.fr.ReadFrame()
atomic.StoreUint32(&t.activity, 1) atomic.StoreUint32(&t.activity, 1)
if err != nil { if err != nil {
@ -435,7 +446,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
s := t.activeStreams[se.StreamID] s := t.activeStreams[se.StreamID]
t.mu.Unlock() t.mu.Unlock()
if s != nil { if s != nil {
t.closeStream(s, true, se.Code, nil, false) t.closeStream(s, true, se.Code, false)
} else { } else {
t.controlBuf.put(&cleanupStream{ t.controlBuf.put(&cleanupStream{
streamID: se.StreamID, streamID: se.StreamID,
@ -577,7 +588,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
} }
if size > 0 { if size > 0 {
if err := s.fc.onData(size); err != nil { if err := s.fc.onData(size); err != nil {
t.closeStream(s, true, http2.ErrCodeFlowControl, nil, false) t.closeStream(s, true, http2.ErrCodeFlowControl, false)
return return
} }
if f.Header().Flags.Has(http2.FlagDataPadded) { if f.Header().Flags.Has(http2.FlagDataPadded) {
@ -589,9 +600,10 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame. // guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated? // Can this copy be eliminated?
if len(f.Data()) > 0 { if len(f.Data()) > 0 {
data := make([]byte, len(f.Data())) buffer := t.bufferPool.get()
copy(data, f.Data()) buffer.Reset()
s.write(recvMsg{data: data}) buffer.Write(f.Data())
s.write(recvMsg{buffer: buffer})
} }
} }
if f.Header().Flags.Has(http2.FlagDataEndStream) { if f.Header().Flags.Has(http2.FlagDataEndStream) {
@ -602,11 +614,18 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
} }
func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
s, ok := t.getStream(f) // If the stream is not deleted from the transport's active streams map, then do a regular close stream.
if !ok { if s, ok := t.getStream(f); ok {
t.closeStream(s, false, 0, false)
return return
} }
t.closeStream(s, false, 0, nil, false) // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map.
t.controlBuf.put(&cleanupStream{
streamID: f.Header().StreamID,
rst: false,
rstCode: 0,
onWrite: func() {},
})
} }
func (t *http2Server) handleSettings(f *http2.SettingsFrame) { func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
@ -748,6 +767,10 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
return nil return nil
} }
func (t *http2Server) setResetPingStrikes() {
atomic.StoreUint32(&t.resetPingStrikes, 1)
}
func (t *http2Server) writeHeaderLocked(s *Stream) error { func (t *http2Server) writeHeaderLocked(s *Stream) error {
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
// first and create a slice of that exact size. // first and create a slice of that exact size.
@ -762,15 +785,13 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
streamID: s.id, streamID: s.id,
hf: headerFields, hf: headerFields,
endStream: false, endStream: false,
onWrite: func() { onWrite: t.setResetPingStrikes,
atomic.StoreUint32(&t.resetPingStrikes, 1)
},
}) })
if !success { if !success {
if err != nil { if err != nil {
return err return err
} }
t.closeStream(s, true, http2.ErrCodeInternal, nil, false) t.closeStream(s, true, http2.ErrCodeInternal, false)
return ErrHeaderListSizeLimitViolation return ErrHeaderListSizeLimitViolation
} }
if t.stats != nil { if t.stats != nil {
@ -808,7 +829,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
if p := st.Proto(); p != nil && len(p.Details) > 0 { if p := statusRawProto(st); p != nil && len(p.Details) > 0 {
stBytes, err := proto.Marshal(p) stBytes, err := proto.Marshal(p)
if err != nil { if err != nil {
// TODO: return error instead, when callers are able to handle it. // TODO: return error instead, when callers are able to handle it.
@ -824,9 +845,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
streamID: s.id, streamID: s.id,
hf: headerFields, hf: headerFields,
endStream: true, endStream: true,
onWrite: func() { onWrite: t.setResetPingStrikes,
atomic.StoreUint32(&t.resetPingStrikes, 1)
},
} }
s.hdrMu.Unlock() s.hdrMu.Unlock()
success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
@ -834,10 +853,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
if err != nil { if err != nil {
return err return err
} }
t.closeStream(s, true, http2.ErrCodeInternal, nil, false) t.closeStream(s, true, http2.ErrCodeInternal, false)
return ErrHeaderListSizeLimitViolation return ErrHeaderListSizeLimitViolation
} }
t.closeStream(s, false, 0, trailingHeader, true) // Send a RST_STREAM after the trailers if the client has not already half-closed.
rst := s.getState() == streamActive
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
if t.stats != nil { if t.stats != nil {
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
} }
@ -849,6 +870,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
if !s.isHeaderSent() { // Headers haven't been written yet. if !s.isHeaderSent() { // Headers haven't been written yet.
if err := t.WriteHeader(s, nil); err != nil { if err := t.WriteHeader(s, nil); err != nil {
if _, ok := err.(ConnectionError); ok {
return err
}
// TODO(mmukhi, dfawley): Make sure this is the right code to return. // TODO(mmukhi, dfawley): Make sure this is the right code to return.
return status.Errorf(codes.Internal, "transport: %v", err) return status.Errorf(codes.Internal, "transport: %v", err)
} }
@ -873,12 +897,10 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
hdr = append(hdr, data[:emptyLen]...) hdr = append(hdr, data[:emptyLen]...)
data = data[emptyLen:] data = data[emptyLen:]
df := &dataFrame{ df := &dataFrame{
streamID: s.id, streamID: s.id,
h: hdr, h: hdr,
d: data, d: data,
onEachWrite: func() { onEachWrite: t.setResetPingStrikes,
atomic.StoreUint32(&t.resetPingStrikes, 1)
},
} }
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
select { select {
@ -944,6 +966,7 @@ func (t *http2Server) keepalive() {
select { select {
case <-maxAge.C: case <-maxAge.C:
// Close the connection after grace period. // Close the connection after grace period.
infof("transport: closing server transport due to maximum connection age.")
t.Close() t.Close()
// Resetting the timer so that the clean-up doesn't deadlock. // Resetting the timer so that the clean-up doesn't deadlock.
maxAge.Reset(infinity) maxAge.Reset(infinity)
@ -957,6 +980,7 @@ func (t *http2Server) keepalive() {
continue continue
} }
if pingSent { if pingSent {
infof("transport: closing server transport due to idleness.")
t.Close() t.Close()
// Resetting the timer so that the clean-up doesn't deadlock. // Resetting the timer so that the clean-up doesn't deadlock.
keepalive.Reset(infinity) keepalive.Reset(infinity)
@ -1006,15 +1030,17 @@ func (t *http2Server) Close() error {
// deleteStream deletes the stream s from transport's active streams. // deleteStream deletes the stream s from transport's active streams.
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
t.mu.Lock() // In case stream sending and receiving are invoked in separate
if _, ok := t.activeStreams[s.id]; !ok { // goroutines (e.g., bi-directional streaming), cancel needs to be
t.mu.Unlock() // called to interrupt the potential blocking on other goroutines.
return s.cancel()
}
delete(t.activeStreams, s.id) t.mu.Lock()
if len(t.activeStreams) == 0 { if _, ok := t.activeStreams[s.id]; ok {
t.idle = time.Now() delete(t.activeStreams, s.id)
if len(t.activeStreams) == 0 {
t.idle = time.Now()
}
} }
t.mu.Unlock() t.mu.Unlock()
@ -1027,51 +1053,36 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
} }
} }
// closeStream clears the footprint of a stream when the stream is not needed // finishStream closes the stream and puts the trailing headerFrame into controlbuf.
// any more. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
// Mark the stream as done
oldState := s.swapState(streamDone) oldState := s.swapState(streamDone)
if oldState == streamDone {
// If the stream was already done, return.
return
}
// In case stream sending and receiving are invoked in separate hdr.cleanup = &cleanupStream{
// goroutines (e.g., bi-directional streaming), cancel needs to be streamID: s.id,
// called to interrupt the potential blocking on other goroutines. rst: rst,
s.cancel() rstCode: rstCode,
onWrite: func() {
t.deleteStream(s, eosReceived)
},
}
t.controlBuf.put(hdr)
}
// Deletes the stream from active streams // closeStream clears the footprint of a stream when the stream is not needed any more.
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
s.swapState(streamDone)
t.deleteStream(s, eosReceived) t.deleteStream(s, eosReceived)
cleanup := &cleanupStream{ t.controlBuf.put(&cleanupStream{
streamID: s.id, streamID: s.id,
rst: rst, rst: rst,
rstCode: rstCode, rstCode: rstCode,
onWrite: func() {}, onWrite: func() {},
} })
// No trailer. Puts cleanupFrame into transport's control buffer.
if hdr == nil {
t.controlBuf.put(cleanup)
return
}
// We do the check here, because of the following scenario:
// 1. closeStream is called first with a trailer. A trailer item with a piggybacked cleanup item
// is put to control buffer.
// 2. Loopy writer is waiting on a stream quota. It will never get it because client errored at
// some point. So loopy can't act on trailer
// 3. Client sends a RST_STREAM due to the error. Then closeStream is called without a trailer as
// the result of the received RST_STREAM.
// If we do this check at the beginning of the closeStream, then we won't put a cleanup item in
// response to received RST_STREAM into the control buffer and outStream in loopy writer will
// never get cleaned up.
// If the stream is already done, don't send the trailer.
if oldState == streamDone {
return
}
hdr.cleanup = cleanup
t.controlBuf.put(hdr)
} }
func (t *http2Server) RemoteAddr() net.Addr { func (t *http2Server) RemoteAddr() net.Addr {

View File

@ -78,7 +78,8 @@ var (
codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
codes.PermissionDenied: http2.ErrCodeInadequateSecurity, codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
} }
httpStatusConvTab = map[int]codes.Code{ // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table.
HTTPStatusConvTab = map[int]codes.Code{
// 400 Bad Request - INTERNAL. // 400 Bad Request - INTERNAL.
http.StatusBadRequest: codes.Internal, http.StatusBadRequest: codes.Internal,
// 401 Unauthorized - UNAUTHENTICATED. // 401 Unauthorized - UNAUTHENTICATED.
@ -98,9 +99,7 @@ var (
} }
) )
// Records the states during HPACK decoding. Must be reset once the type parsedHeaderData struct {
// decoding of the entire headers are finished.
type decodeState struct {
encoding string encoding string
// statusGen caches the stream status received from the trailer the server // statusGen caches the stream status received from the trailer the server
// sent. Client side only. Do not access directly. After all trailers are // sent. Client side only. Do not access directly. After all trailers are
@ -120,8 +119,30 @@ type decodeState struct {
statsTags []byte statsTags []byte
statsTrace []byte statsTrace []byte
contentSubtype string contentSubtype string
// isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP).
//
// We are in gRPC mode (peer speaking gRPC) if:
// * We are client side and have already received a HEADER frame that indicates gRPC peer.
// * The header contains valid a content-type, i.e. a string starts with "application/grpc"
// And we should handle error specific to gRPC.
//
// Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we
// are in HTTP fallback mode, and should handle error specific to HTTP.
isGRPC bool
grpcErr error
httpErr error
contentTypeErr string
}
// decodeState configures decoding criteria and records the decoded data.
type decodeState struct {
// whether decoding on server side or not // whether decoding on server side or not
serverSide bool serverSide bool
// Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS
// frame once decodeHeader function has been invoked and returned.
data parsedHeaderData
} }
// isReservedHeader checks whether hdr belongs to HTTP2 headers // isReservedHeader checks whether hdr belongs to HTTP2 headers
@ -202,11 +223,11 @@ func contentType(contentSubtype string) string {
} }
func (d *decodeState) status() *status.Status { func (d *decodeState) status() *status.Status {
if d.statusGen == nil { if d.data.statusGen == nil {
// No status-details were provided; generate status using code/msg. // No status-details were provided; generate status using code/msg.
d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg) d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg)
} }
return d.statusGen return d.data.statusGen
} }
const binHdrSuffix = "-bin" const binHdrSuffix = "-bin"
@ -244,113 +265,146 @@ func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
if frame.Truncated { if frame.Truncated {
return status.Error(codes.Internal, "peer header list size exceeded limit") return status.Error(codes.Internal, "peer header list size exceeded limit")
} }
for _, hf := range frame.Fields { for _, hf := range frame.Fields {
if err := d.processHeaderField(hf); err != nil { d.processHeaderField(hf)
return err }
if d.data.isGRPC {
if d.data.grpcErr != nil {
return d.data.grpcErr
}
if d.serverSide {
return nil
}
if d.data.rawStatusCode == nil && d.data.statusGen == nil {
// gRPC status doesn't exist.
// Set rawStatusCode to be unknown and return nil error.
// So that, if the stream has ended this Unknown status
// will be propagated to the user.
// Otherwise, it will be ignored. In which case, status from
// a later trailer, that has StreamEnded flag set, is propagated.
code := int(codes.Unknown)
d.data.rawStatusCode = &code
} }
}
if d.serverSide {
return nil return nil
} }
// If grpc status exists, no need to check further. // HTTP fallback mode
if d.rawStatusCode != nil || d.statusGen != nil { if d.data.httpErr != nil {
return nil return d.data.httpErr
} }
// If grpc status doesn't exist and http status doesn't exist, var (
// then it's a malformed header. code = codes.Internal // when header does not include HTTP status, return INTERNAL
if d.httpStatus == nil { ok bool
return status.Error(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)") )
}
if *(d.httpStatus) != http.StatusOK { if d.data.httpStatus != nil {
code, ok := httpStatusConvTab[*(d.httpStatus)] code, ok = HTTPStatusConvTab[*(d.data.httpStatus)]
if !ok { if !ok {
code = codes.Unknown code = codes.Unknown
} }
return status.Error(code, http.StatusText(*(d.httpStatus)))
} }
// gRPC status doesn't exist and http status is OK. return status.Error(code, d.constructHTTPErrMsg())
// Set rawStatusCode to be unknown and return nil error. }
// So that, if the stream has ended this Unknown status
// will be propagated to the user. // constructErrMsg constructs error message to be returned in HTTP fallback mode.
// Otherwise, it will be ignored. In which case, status from // Format: HTTP status code and its corresponding message + content-type error message.
// a later trailer, that has StreamEnded flag set, is propagated. func (d *decodeState) constructHTTPErrMsg() string {
code := int(codes.Unknown) var errMsgs []string
d.rawStatusCode = &code
return nil if d.data.httpStatus == nil {
errMsgs = append(errMsgs, "malformed header: missing HTTP status")
} else {
errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus))
}
if d.data.contentTypeErr == "" {
errMsgs = append(errMsgs, "transport: missing content-type field")
} else {
errMsgs = append(errMsgs, d.data.contentTypeErr)
}
return strings.Join(errMsgs, "; ")
} }
func (d *decodeState) addMetadata(k, v string) { func (d *decodeState) addMetadata(k, v string) {
if d.mdata == nil { if d.data.mdata == nil {
d.mdata = make(map[string][]string) d.data.mdata = make(map[string][]string)
} }
d.mdata[k] = append(d.mdata[k], v) d.data.mdata[k] = append(d.data.mdata[k], v)
} }
func (d *decodeState) processHeaderField(f hpack.HeaderField) error { func (d *decodeState) processHeaderField(f hpack.HeaderField) {
switch f.Name { switch f.Name {
case "content-type": case "content-type":
contentSubtype, validContentType := contentSubtype(f.Value) contentSubtype, validContentType := contentSubtype(f.Value)
if !validContentType { if !validContentType {
return status.Errorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value) d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value)
return
} }
d.contentSubtype = contentSubtype d.data.contentSubtype = contentSubtype
// TODO: do we want to propagate the whole content-type in the metadata, // TODO: do we want to propagate the whole content-type in the metadata,
// or come up with a way to just propagate the content-subtype if it was set? // or come up with a way to just propagate the content-subtype if it was set?
// ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
// in the metadata? // in the metadata?
d.addMetadata(f.Name, f.Value) d.addMetadata(f.Name, f.Value)
d.data.isGRPC = true
case "grpc-encoding": case "grpc-encoding":
d.encoding = f.Value d.data.encoding = f.Value
case "grpc-status": case "grpc-status":
code, err := strconv.Atoi(f.Value) code, err := strconv.Atoi(f.Value)
if err != nil { if err != nil {
return status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err)
return
} }
d.rawStatusCode = &code d.data.rawStatusCode = &code
case "grpc-message": case "grpc-message":
d.rawStatusMsg = decodeGrpcMessage(f.Value) d.data.rawStatusMsg = decodeGrpcMessage(f.Value)
case "grpc-status-details-bin": case "grpc-status-details-bin":
v, err := decodeBinHeader(f.Value) v, err := decodeBinHeader(f.Value)
if err != nil { if err != nil {
return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
return
} }
s := &spb.Status{} s := &spb.Status{}
if err := proto.Unmarshal(v, s); err != nil { if err := proto.Unmarshal(v, s); err != nil {
return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
return
} }
d.statusGen = status.FromProto(s) d.data.statusGen = status.FromProto(s)
case "grpc-timeout": case "grpc-timeout":
d.timeoutSet = true d.data.timeoutSet = true
var err error var err error
if d.timeout, err = decodeTimeout(f.Value); err != nil { if d.data.timeout, err = decodeTimeout(f.Value); err != nil {
return status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err)
} }
case ":path": case ":path":
d.method = f.Value d.data.method = f.Value
case ":status": case ":status":
code, err := strconv.Atoi(f.Value) code, err := strconv.Atoi(f.Value)
if err != nil { if err != nil {
return status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err)
return
} }
d.httpStatus = &code d.data.httpStatus = &code
case "grpc-tags-bin": case "grpc-tags-bin":
v, err := decodeBinHeader(f.Value) v, err := decodeBinHeader(f.Value)
if err != nil { if err != nil {
return status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
return
} }
d.statsTags = v d.data.statsTags = v
d.addMetadata(f.Name, string(v)) d.addMetadata(f.Name, string(v))
case "grpc-trace-bin": case "grpc-trace-bin":
v, err := decodeBinHeader(f.Value) v, err := decodeBinHeader(f.Value)
if err != nil { if err != nil {
return status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
return
} }
d.statsTrace = v d.data.statsTrace = v
d.addMetadata(f.Name, string(v)) d.addMetadata(f.Name, string(v))
default: default:
if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
@ -359,11 +413,10 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
v, err := decodeMetadataHeader(f.Name, f.Value) v, err := decodeMetadataHeader(f.Name, f.Value)
if err != nil { if err != nil {
errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
return nil return
} }
d.addMetadata(f.Name, v) d.addMetadata(f.Name, v)
} }
return nil
} }
type timeoutUnit uint8 type timeoutUnit uint8

View File

@ -22,6 +22,7 @@
package transport package transport
import ( import (
"bytes"
"context" "context"
"errors" "errors"
"fmt" "fmt"
@ -39,10 +40,32 @@ import (
"google.golang.org/grpc/tap" "google.golang.org/grpc/tap"
) )
type bufferPool struct {
pool sync.Pool
}
func newBufferPool() *bufferPool {
return &bufferPool{
pool: sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
},
}
}
func (p *bufferPool) get() *bytes.Buffer {
return p.pool.Get().(*bytes.Buffer)
}
func (p *bufferPool) put(b *bytes.Buffer) {
p.pool.Put(b)
}
// recvMsg represents the received msg from the transport. All transport // recvMsg represents the received msg from the transport. All transport
// protocol specific info has been removed. // protocol specific info has been removed.
type recvMsg struct { type recvMsg struct {
data []byte buffer *bytes.Buffer
// nil: received some data // nil: received some data
// io.EOF: stream is completed. data is nil. // io.EOF: stream is completed. data is nil.
// other non-nil error: transport failure. data is nil. // other non-nil error: transport failure. data is nil.
@ -117,8 +140,9 @@ type recvBufferReader struct {
ctx context.Context ctx context.Context
ctxDone <-chan struct{} // cache of ctx.Done() (for performance). ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
recv *recvBuffer recv *recvBuffer
last []byte // Stores the remaining data in the previous calls. last *bytes.Buffer // Stores the remaining data in the previous calls.
err error err error
freeBuffer func(*bytes.Buffer)
} }
// Read reads the next len(p) bytes from last. If last is drained, it tries to // Read reads the next len(p) bytes from last. If last is drained, it tries to
@ -128,10 +152,13 @@ func (r *recvBufferReader) Read(p []byte) (n int, err error) {
if r.err != nil { if r.err != nil {
return 0, r.err return 0, r.err
} }
if r.last != nil && len(r.last) > 0 { if r.last != nil {
// Read remaining data left in last call. // Read remaining data left in last call.
copied := copy(p, r.last) copied, _ := r.last.Read(p)
r.last = r.last[copied:] if r.last.Len() == 0 {
r.freeBuffer(r.last)
r.last = nil
}
return copied, nil return copied, nil
} }
if r.closeStream != nil { if r.closeStream != nil {
@ -157,6 +184,19 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
// r.readAdditional acts on that message and returns the necessary error. // r.readAdditional acts on that message and returns the necessary error.
select { select {
case <-r.ctxDone: case <-r.ctxDone:
// Note that this adds the ctx error to the end of recv buffer, and
// reads from the head. This will delay the error until recv buffer is
// empty, thus will delay ctx cancellation in Recv().
//
// It's done this way to fix a race between ctx cancel and trailer. The
// race was, stream.Recv() may return ctx error if ctxDone wins the
// race, but stream.Trailer() may return a non-nil md because the stream
// was not marked as done when trailer is received. This closeStream
// call will mark stream as done, thus fix the race.
//
// TODO: delaying ctx error seems like a unnecessary side effect. What
// we really want is to mark the stream as done, and return ctx error
// faster.
r.closeStream(ContextErr(r.ctx.Err())) r.closeStream(ContextErr(r.ctx.Err()))
m := <-r.recv.get() m := <-r.recv.get()
return r.readAdditional(m, p) return r.readAdditional(m, p)
@ -170,8 +210,13 @@ func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error
if m.err != nil { if m.err != nil {
return 0, m.err return 0, m.err
} }
copied := copy(p, m.data) copied, _ := m.buffer.Read(p)
r.last = m.data[copied:] if m.buffer.Len() == 0 {
r.freeBuffer(m.buffer)
r.last = nil
} else {
r.last = m.buffer
}
return copied, nil return copied, nil
} }
@ -204,8 +249,8 @@ type Stream struct {
// is used to adjust flow control, if needed. // is used to adjust flow control, if needed.
requestRead func(int) requestRead func(int)
headerChan chan struct{} // closed to indicate the end of header metadata. headerChan chan struct{} // closed to indicate the end of header metadata.
headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
// hdrMu protects header and trailer metadata on the server-side. // hdrMu protects header and trailer metadata on the server-side.
hdrMu sync.Mutex hdrMu sync.Mutex
@ -266,6 +311,14 @@ func (s *Stream) waitOnHeader() error {
} }
select { select {
case <-s.ctx.Done(): case <-s.ctx.Done():
// We prefer success over failure when reading messages because we delay
// context error in stream.Read(). To keep behavior consistent, we also
// prefer success here.
select {
case <-s.headerChan:
return nil
default:
}
return ContextErr(s.ctx.Err()) return ContextErr(s.ctx.Err())
case <-s.headerChan: case <-s.headerChan:
return nil return nil
@ -327,8 +380,7 @@ func (s *Stream) TrailersOnly() (bool, error) {
if err != nil { if err != nil {
return false, err return false, err
} }
// if !headerDone, some other connection error occurred. return s.noHeaders, nil
return s.noHeaders && atomic.LoadUint32(&s.headerDone) == 1, nil
} }
// Trailer returns the cached trailer metedata. Note that if it is not called // Trailer returns the cached trailer metedata. Note that if it is not called
@ -579,9 +631,12 @@ type ClientTransport interface {
// is called only once. // is called only once.
Close() error Close() error
// GracefulClose starts to tear down the transport. It stops accepting // GracefulClose starts to tear down the transport: the transport will stop
// new RPCs and wait the completion of the pending RPCs. // accepting new RPCs and NewStream will return error. Once all streams are
GracefulClose() error // finished, the transport will close.
//
// It does not block.
GracefulClose()
// Write sends the data for the given stream. A nil stream indicates // Write sends the data for the given stream. A nil stream indicates
// the write is to be performed on the transport as a whole. // the write is to be performed on the transport as a whole.
@ -611,6 +666,9 @@ type ClientTransport interface {
// GetGoAwayReason returns the reason why GoAway frame was received. // GetGoAwayReason returns the reason why GoAway frame was received.
GetGoAwayReason() GoAwayReason GetGoAwayReason() GoAwayReason
// RemoteAddr returns the remote network address.
RemoteAddr() net.Addr
// IncrMsgSent increments the number of message sent through this transport. // IncrMsgSent increments the number of message sent through this transport.
IncrMsgSent() IncrMsgSent()

View File

@ -17,9 +17,8 @@
*/ */
// Package naming defines the naming API and related data structures for gRPC. // Package naming defines the naming API and related data structures for gRPC.
// The interface is EXPERIMENTAL and may be subject to change.
// //
// Deprecated: please use package resolver. // This package is deprecated: please use package resolver instead.
package naming package naming
// Operation defines the corresponding operations for a name resolution change. // Operation defines the corresponding operations for a name resolution change.

View File

@ -120,6 +120,14 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
bp.mu.Unlock() bp.mu.Unlock()
select { select {
case <-ctx.Done(): case <-ctx.Done():
if connectionErr := bp.connectionError(); connectionErr != nil {
switch ctx.Err() {
case context.DeadlineExceeded:
return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr)
case context.Canceled:
return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr)
}
}
return nil, nil, ctx.Err() return nil, nil, ctx.Err()
case <-ch: case <-ch:
} }
@ -165,6 +173,11 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
} }
return t, done, nil return t, done, nil
} }
if done != nil {
// Calling done with nil error, no bytes sent and no bytes received.
// DoneInfo with default value works.
done(balancer.DoneInfo{})
}
grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
// If ok == false, ac.state is not READY. // If ok == false, ac.state is not READY.
// A valid picker always returns READY subConn. This means the state of ac // A valid picker always returns READY subConn. This means the state of ac

View File

@ -51,14 +51,18 @@ type pickfirstBalancer struct {
func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
if err != nil { if err != nil {
grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) if grpclog.V(2) {
grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err)
}
return return
} }
if b.sc == nil { if b.sc == nil {
b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
if err != nil { if err != nil {
//TODO(yuxuanli): why not change the cc state to Idle? //TODO(yuxuanli): why not change the cc state to Idle?
grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) if grpclog.V(2) {
grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
}
return return
} }
b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
@ -70,9 +74,13 @@ func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err er
} }
func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) if grpclog.V(2) {
grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
}
if b.sc != sc { if b.sc != sc {
grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") if grpclog.V(2) {
grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
}
return return
} }
if s == connectivity.Shutdown { if s == connectivity.Shutdown {

64
vendor/google.golang.org/grpc/preloader.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// PreparedMsg is responsible for creating a Marshalled and Compressed object.
//
// This API is EXPERIMENTAL.
type PreparedMsg struct {
// Struct for preparing msg before sending them
encodedData []byte
hdr []byte
payload []byte
}
// Encode marshalls and compresses the message using the codec and compressor for the stream.
func (p *PreparedMsg) Encode(s Stream, msg interface{}) error {
ctx := s.Context()
rpcInfo, ok := rpcInfoFromContext(ctx)
if !ok {
return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo")
}
// check if the context has the relevant information to prepareMsg
if rpcInfo.preloaderInfo == nil {
return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil")
}
if rpcInfo.preloaderInfo.codec == nil {
return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil")
}
// prepare the msg
data, err := encode(rpcInfo.preloaderInfo.codec, msg)
if err != nil {
return err
}
p.encodedData = data
compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp)
if err != nil {
return err
}
p.hdr, p.payload = msgHeader(data, compData)
return nil
}

View File

@ -47,6 +47,8 @@ const (
defaultFreq = time.Minute * 30 defaultFreq = time.Minute * 30
defaultDNSSvrPort = "53" defaultDNSSvrPort = "53"
golang = "GO" golang = "GO"
// txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
txtPrefix = "_grpc_config."
// In DNS, service config is encoded in a TXT record via the mechanism // In DNS, service config is encoded in a TXT record via the mechanism
// described in RFC-1464 using the attribute name grpc_config. // described in RFC-1464 using the attribute name grpc_config.
txtAttribute = "grpc_config=" txtAttribute = "grpc_config="
@ -64,6 +66,9 @@ var (
var ( var (
defaultResolver netResolver = net.DefaultResolver defaultResolver netResolver = net.DefaultResolver
// To prevent excessive re-resolution, we enforce a rate limit on DNS
// resolution requests.
minDNSResRate = 30 * time.Second
) )
var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
@ -239,7 +244,13 @@ func (d *dnsResolver) watcher() {
return return
case <-d.t.C: case <-d.t.C:
case <-d.rn: case <-d.rn:
if !d.t.Stop() {
// Before resetting a timer, it should be stopped to prevent racing with
// reads on it's channel.
<-d.t.C
}
} }
result, sc := d.lookup() result, sc := d.lookup()
// Next lookup should happen within an interval defined by d.freq. It may be // Next lookup should happen within an interval defined by d.freq. It may be
// more often due to exponential retry on empty address list. // more often due to exponential retry on empty address list.
@ -252,6 +263,16 @@ func (d *dnsResolver) watcher() {
} }
d.cc.NewServiceConfig(sc) d.cc.NewServiceConfig(sc)
d.cc.NewAddress(result) d.cc.NewAddress(result)
// Sleep to prevent excessive re-resolutions. Incoming resolution requests
// will be queued in d.rn.
t := time.NewTimer(minDNSResRate)
select {
case <-t.C:
case <-d.ctx.Done():
t.Stop()
return
}
} }
} }
@ -282,7 +303,7 @@ func (d *dnsResolver) lookupSRV() []resolver.Address {
} }
func (d *dnsResolver) lookupTXT() string { func (d *dnsResolver) lookupTXT() string {
ss, err := d.resolver.LookupTXT(d.ctx, d.host) ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host)
if err != nil { if err != nil {
grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err) grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
return "" return ""

View File

@ -45,7 +45,7 @@ type passthroughResolver struct {
} }
func (r *passthroughResolver) start() { func (r *passthroughResolver) start() {
r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}}) r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
} }
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {} func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}

View File

@ -20,6 +20,10 @@
// All APIs in this package are experimental. // All APIs in this package are experimental.
package resolver package resolver
import (
"google.golang.org/grpc/serviceconfig"
)
var ( var (
// m is a map from scheme to resolver builder. // m is a map from scheme to resolver builder.
m = make(map[string]Builder) m = make(map[string]Builder)
@ -98,6 +102,16 @@ type BuildOption struct {
DisableServiceConfig bool DisableServiceConfig bool
} }
// State contains the current Resolver state relevant to the ClientConn.
type State struct {
Addresses []Address // Resolved addresses for the target
// ServiceConfig is the parsed service config; obtained from
// serviceconfig.Parse.
ServiceConfig serviceconfig.Config
// TODO: add Err error
}
// ClientConn contains the callbacks for resolver to notify any updates // ClientConn contains the callbacks for resolver to notify any updates
// to the gRPC ClientConn. // to the gRPC ClientConn.
// //
@ -106,17 +120,38 @@ type BuildOption struct {
// testing, the new implementation should embed this interface. This allows // testing, the new implementation should embed this interface. This allows
// gRPC to add new methods to this interface. // gRPC to add new methods to this interface.
type ClientConn interface { type ClientConn interface {
// UpdateState updates the state of the ClientConn appropriately.
UpdateState(State)
// NewAddress is called by resolver to notify ClientConn a new list // NewAddress is called by resolver to notify ClientConn a new list
// of resolved addresses. // of resolved addresses.
// The address list should be the complete list of resolved addresses. // The address list should be the complete list of resolved addresses.
//
// Deprecated: Use UpdateState instead.
NewAddress(addresses []Address) NewAddress(addresses []Address)
// NewServiceConfig is called by resolver to notify ClientConn a new // NewServiceConfig is called by resolver to notify ClientConn a new
// service config. The service config should be provided as a json string. // service config. The service config should be provided as a json string.
//
// Deprecated: Use UpdateState instead.
NewServiceConfig(serviceConfig string) NewServiceConfig(serviceConfig string)
} }
// Target represents a target for gRPC, as specified in: // Target represents a target for gRPC, as specified in:
// https://github.com/grpc/grpc/blob/master/doc/naming.md. // https://github.com/grpc/grpc/blob/master/doc/naming.md.
// It is parsed from the target string that gets passed into Dial or DialContext by the user. And
// grpc passes it to the resolver and the balancer.
//
// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will
// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed
// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
//
// If the target does not contain a scheme, we will apply the default scheme, and set the Target to
// be the full target string. e.g. "foo.bar" will be parsed into
// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}.
//
// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the
// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target
// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into
// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}.
type Target struct { type Target struct {
Scheme string Scheme string
Authority string Authority string

View File

@ -21,6 +21,7 @@ package grpc
import ( import (
"fmt" "fmt"
"strings" "strings"
"sync/atomic"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/channelz"
@ -30,12 +31,12 @@ import (
// ccResolverWrapper is a wrapper on top of cc for resolvers. // ccResolverWrapper is a wrapper on top of cc for resolvers.
// It implements resolver.ClientConnection interface. // It implements resolver.ClientConnection interface.
type ccResolverWrapper struct { type ccResolverWrapper struct {
cc *ClientConn cc *ClientConn
resolver resolver.Resolver resolver resolver.Resolver
addrCh chan []resolver.Address addrCh chan []resolver.Address
scCh chan string scCh chan string
done chan struct{} done uint32 // accessed atomically; set to 1 when closed.
lastAddressesCount int curState resolver.State
} }
// split2 returns the values from strings.SplitN(s, sep, 2). // split2 returns the values from strings.SplitN(s, sep, 2).
@ -82,7 +83,6 @@ func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
cc: cc, cc: cc,
addrCh: make(chan []resolver.Address, 1), addrCh: make(chan []resolver.Address, 1),
scCh: make(chan string, 1), scCh: make(chan string, 1),
done: make(chan struct{}),
} }
var err error var err error
@ -99,57 +99,70 @@ func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
func (ccr *ccResolverWrapper) close() { func (ccr *ccResolverWrapper) close() {
ccr.resolver.Close() ccr.resolver.Close()
close(ccr.done) atomic.StoreUint32(&ccr.done, 1)
} }
// NewAddress is called by the resolver implemenetion to send addresses to gRPC. func (ccr *ccResolverWrapper) isDone() bool {
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { return atomic.LoadUint32(&ccr.done) == 1
select { }
case <-ccr.done:
func (ccr *ccResolverWrapper) UpdateState(s resolver.State) {
if ccr.isDone() {
return
}
grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s)
if channelz.IsOn() {
ccr.addChannelzTraceEvent(s)
}
ccr.cc.updateResolverState(s)
ccr.curState = s
}
// NewAddress is called by the resolver implementation to send addresses to gRPC.
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
if ccr.isDone() {
return return
default:
} }
grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
if channelz.IsOn() { if channelz.IsOn() {
ccr.addChannelzTraceEvent(addrs) ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
} }
ccr.cc.handleResolvedAddrs(addrs, nil) ccr.curState.Addresses = addrs
ccr.cc.updateResolverState(ccr.curState)
} }
// NewServiceConfig is called by the resolver implemenetion to send service // NewServiceConfig is called by the resolver implementation to send service
// configs to gRPC. // configs to gRPC.
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
select { if ccr.isDone() {
case <-ccr.done:
return return
default:
} }
grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
ccr.cc.handleServiceConfig(sc) c, err := parseServiceConfig(sc)
if err != nil {
return
}
if channelz.IsOn() {
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: c})
}
ccr.curState.ServiceConfig = c
ccr.cc.updateResolverState(ccr.curState)
} }
func (ccr *ccResolverWrapper) addChannelzTraceEvent(addrs []resolver.Address) { func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
if len(addrs) == 0 && ccr.lastAddressesCount != 0 { var updates []string
channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ oldSC, oldOK := ccr.curState.ServiceConfig.(*ServiceConfig)
Desc: "Resolver returns an empty address list", newSC, newOK := s.ServiceConfig.(*ServiceConfig)
Severity: channelz.CtWarning, if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
}) updates = append(updates, "service config updated")
} else if len(addrs) != 0 && ccr.lastAddressesCount == 0 {
var s string
for i, a := range addrs {
if a.ServerName != "" {
s += a.Addr + "(" + a.ServerName + ")"
} else {
s += a.Addr
}
if i != len(addrs)-1 {
s += " "
}
}
channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Resolver returns a non-empty address list (previous one was empty) %q", s),
Severity: channelz.CtINFO,
})
} }
ccr.lastAddressesCount = len(addrs) if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
updates = append(updates, "resolver returned an empty address list")
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
updates = append(updates, "resolver returned new addresses")
}
channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")),
Severity: channelz.CtINFO,
})
} }

View File

@ -694,14 +694,34 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf
return nil return nil
} }
// Information about RPC
type rpcInfo struct { type rpcInfo struct {
failfast bool failfast bool
preloaderInfo *compressorInfo
}
// Information about Preloader
// Responsible for storing codec, and compressors
// If stream (s) has context s.Context which stores rpcInfo that has non nil
// pointers to codec, and compressors, then we can use preparedMsg for Async message prep
// and reuse marshalled bytes
type compressorInfo struct {
codec baseCodec
cp Compressor
comp encoding.Compressor
} }
type rpcInfoContextKey struct{} type rpcInfoContextKey struct{}
func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context { func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context {
return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast}) return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{
failfast: failfast,
preloaderInfo: &compressorInfo{
codec: codec,
cp: cp,
comp: comp,
},
})
} }
func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {

View File

@ -42,6 +42,7 @@ import (
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/transport" "google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive" "google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
@ -56,6 +57,8 @@ const (
defaultServerMaxSendMessageSize = math.MaxInt32 defaultServerMaxSendMessageSize = math.MaxInt32
) )
var statusOK = status.New(codes.OK, "")
type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
// MethodDesc represents an RPC service's method specification. // MethodDesc represents an RPC service's method specification.
@ -86,21 +89,19 @@ type service struct {
// Server is a gRPC server to serve RPC requests. // Server is a gRPC server to serve RPC requests.
type Server struct { type Server struct {
opts options opts serverOptions
mu sync.Mutex // guards following mu sync.Mutex // guards following
lis map[net.Listener]bool lis map[net.Listener]bool
conns map[io.Closer]bool conns map[transport.ServerTransport]bool
serve bool serve bool
drain bool drain bool
cv *sync.Cond // signaled when connections close for GracefulStop cv *sync.Cond // signaled when connections close for GracefulStop
m map[string]*service // service name -> service info m map[string]*service // service name -> service info
events trace.EventLog events trace.EventLog
quit chan struct{} quit *grpcsync.Event
done chan struct{} done *grpcsync.Event
quitOnce sync.Once
doneOnce sync.Once
channelzRemoveOnce sync.Once channelzRemoveOnce sync.Once
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
@ -108,7 +109,7 @@ type Server struct {
czData *channelzData czData *channelzData
} }
type options struct { type serverOptions struct {
creds credentials.TransportCredentials creds credentials.TransportCredentials
codec baseCodec codec baseCodec
cp Compressor cp Compressor
@ -131,7 +132,7 @@ type options struct {
maxHeaderListSize *uint32 maxHeaderListSize *uint32
} }
var defaultServerOptions = options{ var defaultServerOptions = serverOptions{
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
maxSendMessageSize: defaultServerMaxSendMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize,
connectionTimeout: 120 * time.Second, connectionTimeout: 120 * time.Second,
@ -140,7 +141,33 @@ var defaultServerOptions = options{
} }
// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. // A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
type ServerOption func(*options) type ServerOption interface {
apply(*serverOptions)
}
// EmptyServerOption does not alter the server configuration. It can be embedded
// in another structure to build custom server options.
//
// This API is EXPERIMENTAL.
type EmptyServerOption struct{}
func (EmptyServerOption) apply(*serverOptions) {}
// funcServerOption wraps a function that modifies serverOptions into an
// implementation of the ServerOption interface.
type funcServerOption struct {
f func(*serverOptions)
}
func (fdo *funcServerOption) apply(do *serverOptions) {
fdo.f(do)
}
func newFuncServerOption(f func(*serverOptions)) *funcServerOption {
return &funcServerOption{
f: f,
}
}
// WriteBufferSize determines how much data can be batched before doing a write on the wire. // WriteBufferSize determines how much data can be batched before doing a write on the wire.
// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. // The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
@ -148,9 +175,9 @@ type ServerOption func(*options)
// Zero will disable the write buffer such that each write will be on underlying connection. // Zero will disable the write buffer such that each write will be on underlying connection.
// Note: A Send call may not directly translate to a write. // Note: A Send call may not directly translate to a write.
func WriteBufferSize(s int) ServerOption { func WriteBufferSize(s int) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
o.writeBufferSize = s o.writeBufferSize = s
} })
} }
// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most // ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
@ -159,25 +186,25 @@ func WriteBufferSize(s int) ServerOption {
// Zero will disable read buffer for a connection so data framer can access the underlying // Zero will disable read buffer for a connection so data framer can access the underlying
// conn directly. // conn directly.
func ReadBufferSize(s int) ServerOption { func ReadBufferSize(s int) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
o.readBufferSize = s o.readBufferSize = s
} })
} }
// InitialWindowSize returns a ServerOption that sets window size for stream. // InitialWindowSize returns a ServerOption that sets window size for stream.
// The lower bound for window size is 64K and any value smaller than that will be ignored. // The lower bound for window size is 64K and any value smaller than that will be ignored.
func InitialWindowSize(s int32) ServerOption { func InitialWindowSize(s int32) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
o.initialWindowSize = s o.initialWindowSize = s
} })
} }
// InitialConnWindowSize returns a ServerOption that sets window size for a connection. // InitialConnWindowSize returns a ServerOption that sets window size for a connection.
// The lower bound for window size is 64K and any value smaller than that will be ignored. // The lower bound for window size is 64K and any value smaller than that will be ignored.
func InitialConnWindowSize(s int32) ServerOption { func InitialConnWindowSize(s int32) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
o.initialConnWindowSize = s o.initialConnWindowSize = s
} })
} }
// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
@ -187,25 +214,25 @@ func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
kp.Time = time.Second kp.Time = time.Second
} }
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
o.keepaliveParams = kp o.keepaliveParams = kp
} })
} }
// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. // KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server.
func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
o.keepalivePolicy = kep o.keepalivePolicy = kep
} })
} }
// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
// //
// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. // This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
func CustomCodec(codec Codec) ServerOption { func CustomCodec(codec Codec) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
o.codec = codec o.codec = codec
} })
} }
// RPCCompressor returns a ServerOption that sets a compressor for outbound // RPCCompressor returns a ServerOption that sets a compressor for outbound
@ -216,9 +243,9 @@ func CustomCodec(codec Codec) ServerOption {
// //
// Deprecated: use encoding.RegisterCompressor instead. // Deprecated: use encoding.RegisterCompressor instead.
func RPCCompressor(cp Compressor) ServerOption { func RPCCompressor(cp Compressor) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
o.cp = cp o.cp = cp
} })
} }
// RPCDecompressor returns a ServerOption that sets a decompressor for inbound // RPCDecompressor returns a ServerOption that sets a decompressor for inbound
@ -227,9 +254,9 @@ func RPCCompressor(cp Compressor) ServerOption {
// //
// Deprecated: use encoding.RegisterCompressor instead. // Deprecated: use encoding.RegisterCompressor instead.
func RPCDecompressor(dc Decompressor) ServerOption { func RPCDecompressor(dc Decompressor) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
o.dc = dc o.dc = dc
} })
} }
// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
@ -243,73 +270,73 @@ func MaxMsgSize(m int) ServerOption {
// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. // MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
// If this is not set, gRPC uses the default 4MB. // If this is not set, gRPC uses the default 4MB.
func MaxRecvMsgSize(m int) ServerOption { func MaxRecvMsgSize(m int) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
o.maxReceiveMessageSize = m o.maxReceiveMessageSize = m
} })
} }
// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. // MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
// If this is not set, gRPC uses the default `math.MaxInt32`. // If this is not set, gRPC uses the default `math.MaxInt32`.
func MaxSendMsgSize(m int) ServerOption { func MaxSendMsgSize(m int) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
o.maxSendMessageSize = m o.maxSendMessageSize = m
} })
} }
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
// of concurrent streams to each ServerTransport. // of concurrent streams to each ServerTransport.
func MaxConcurrentStreams(n uint32) ServerOption { func MaxConcurrentStreams(n uint32) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
o.maxConcurrentStreams = n o.maxConcurrentStreams = n
} })
} }
// Creds returns a ServerOption that sets credentials for server connections. // Creds returns a ServerOption that sets credentials for server connections.
func Creds(c credentials.TransportCredentials) ServerOption { func Creds(c credentials.TransportCredentials) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
o.creds = c o.creds = c
} })
} }
// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the // UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the
// server. Only one unary interceptor can be installed. The construction of multiple // server. Only one unary interceptor can be installed. The construction of multiple
// interceptors (e.g., chaining) can be implemented at the caller. // interceptors (e.g., chaining) can be implemented at the caller.
func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
if o.unaryInt != nil { if o.unaryInt != nil {
panic("The unary server interceptor was already set and may not be reset.") panic("The unary server interceptor was already set and may not be reset.")
} }
o.unaryInt = i o.unaryInt = i
} })
} }
// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the // StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
// server. Only one stream interceptor can be installed. // server. Only one stream interceptor can be installed.
func StreamInterceptor(i StreamServerInterceptor) ServerOption { func StreamInterceptor(i StreamServerInterceptor) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
if o.streamInt != nil { if o.streamInt != nil {
panic("The stream server interceptor was already set and may not be reset.") panic("The stream server interceptor was already set and may not be reset.")
} }
o.streamInt = i o.streamInt = i
} })
} }
// InTapHandle returns a ServerOption that sets the tap handle for all the server // InTapHandle returns a ServerOption that sets the tap handle for all the server
// transport to be created. Only one can be installed. // transport to be created. Only one can be installed.
func InTapHandle(h tap.ServerInHandle) ServerOption { func InTapHandle(h tap.ServerInHandle) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
if o.inTapHandle != nil { if o.inTapHandle != nil {
panic("The tap handle was already set and may not be reset.") panic("The tap handle was already set and may not be reset.")
} }
o.inTapHandle = h o.inTapHandle = h
} })
} }
// StatsHandler returns a ServerOption that sets the stats handler for the server. // StatsHandler returns a ServerOption that sets the stats handler for the server.
func StatsHandler(h stats.Handler) ServerOption { func StatsHandler(h stats.Handler) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
o.statsHandler = h o.statsHandler = h
} })
} }
// UnknownServiceHandler returns a ServerOption that allows for adding a custom // UnknownServiceHandler returns a ServerOption that allows for adding a custom
@ -319,7 +346,7 @@ func StatsHandler(h stats.Handler) ServerOption {
// The handling function has full access to the Context of the request and the // The handling function has full access to the Context of the request and the
// stream, and the invocation bypasses interceptors. // stream, and the invocation bypasses interceptors.
func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
o.unknownStreamDesc = &StreamDesc{ o.unknownStreamDesc = &StreamDesc{
StreamName: "unknown_service_handler", StreamName: "unknown_service_handler",
Handler: streamHandler, Handler: streamHandler,
@ -327,7 +354,7 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
ClientStreams: true, ClientStreams: true,
ServerStreams: true, ServerStreams: true,
} }
} })
} }
// ConnectionTimeout returns a ServerOption that sets the timeout for // ConnectionTimeout returns a ServerOption that sets the timeout for
@ -337,17 +364,17 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
// //
// This API is EXPERIMENTAL. // This API is EXPERIMENTAL.
func ConnectionTimeout(d time.Duration) ServerOption { func ConnectionTimeout(d time.Duration) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
o.connectionTimeout = d o.connectionTimeout = d
} })
} }
// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size // MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
// of header list that the server is prepared to accept. // of header list that the server is prepared to accept.
func MaxHeaderListSize(s uint32) ServerOption { func MaxHeaderListSize(s uint32) ServerOption {
return func(o *options) { return newFuncServerOption(func(o *serverOptions) {
o.maxHeaderListSize = &s o.maxHeaderListSize = &s
} })
} }
// NewServer creates a gRPC server which has no service registered and has not // NewServer creates a gRPC server which has no service registered and has not
@ -355,15 +382,15 @@ func MaxHeaderListSize(s uint32) ServerOption {
func NewServer(opt ...ServerOption) *Server { func NewServer(opt ...ServerOption) *Server {
opts := defaultServerOptions opts := defaultServerOptions
for _, o := range opt { for _, o := range opt {
o(&opts) o.apply(&opts)
} }
s := &Server{ s := &Server{
lis: make(map[net.Listener]bool), lis: make(map[net.Listener]bool),
opts: opts, opts: opts,
conns: make(map[io.Closer]bool), conns: make(map[transport.ServerTransport]bool),
m: make(map[string]*service), m: make(map[string]*service),
quit: make(chan struct{}), quit: grpcsync.NewEvent(),
done: make(chan struct{}), done: grpcsync.NewEvent(),
czData: new(channelzData), czData: new(channelzData),
} }
s.cv = sync.NewCond(&s.mu) s.cv = sync.NewCond(&s.mu)
@ -530,11 +557,9 @@ func (s *Server) Serve(lis net.Listener) error {
s.serveWG.Add(1) s.serveWG.Add(1)
defer func() { defer func() {
s.serveWG.Done() s.serveWG.Done()
select { if s.quit.HasFired() {
// Stop or GracefulStop called; block until done and return nil. // Stop or GracefulStop called; block until done and return nil.
case <-s.quit: <-s.done.Done()
<-s.done
default:
} }
}() }()
@ -577,7 +602,7 @@ func (s *Server) Serve(lis net.Listener) error {
timer := time.NewTimer(tempDelay) timer := time.NewTimer(tempDelay)
select { select {
case <-timer.C: case <-timer.C:
case <-s.quit: case <-s.quit.Done():
timer.Stop() timer.Stop()
return nil return nil
} }
@ -587,10 +612,8 @@ func (s *Server) Serve(lis net.Listener) error {
s.printf("done serving; Accept = %v", err) s.printf("done serving; Accept = %v", err)
s.mu.Unlock() s.mu.Unlock()
select { if s.quit.HasFired() {
case <-s.quit:
return nil return nil
default:
} }
return err return err
} }
@ -611,29 +634,26 @@ func (s *Server) Serve(lis net.Listener) error {
// handleRawConn forks a goroutine to handle a just-accepted connection that // handleRawConn forks a goroutine to handle a just-accepted connection that
// has not had any I/O performed on it yet. // has not had any I/O performed on it yet.
func (s *Server) handleRawConn(rawConn net.Conn) { func (s *Server) handleRawConn(rawConn net.Conn) {
if s.quit.HasFired() {
rawConn.Close()
return
}
rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
conn, authInfo, err := s.useTransportAuthenticator(rawConn) conn, authInfo, err := s.useTransportAuthenticator(rawConn)
if err != nil { if err != nil {
s.mu.Lock() // ErrConnDispatched means that the connection was dispatched away from
s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) // gRPC; those connections should be left open.
s.mu.Unlock()
grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
// If serverHandshake returns ErrConnDispatched, keep rawConn open.
if err != credentials.ErrConnDispatched { if err != credentials.ErrConnDispatched {
s.mu.Lock()
s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
s.mu.Unlock()
grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
rawConn.Close() rawConn.Close()
} }
rawConn.SetDeadline(time.Time{}) rawConn.SetDeadline(time.Time{})
return return
} }
s.mu.Lock()
if s.conns == nil {
s.mu.Unlock()
conn.Close()
return
}
s.mu.Unlock()
// Finish handshaking (HTTP2) // Finish handshaking (HTTP2)
st := s.newHTTP2Transport(conn, authInfo) st := s.newHTTP2Transport(conn, authInfo)
if st == nil { if st == nil {
@ -741,6 +761,9 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. // traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
// If tracing is not enabled, it returns nil. // If tracing is not enabled, it returns nil.
func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
if !EnableTracing {
return nil
}
tr, ok := trace.FromContext(stream.Context()) tr, ok := trace.FromContext(stream.Context())
if !ok { if !ok {
return nil return nil
@ -748,37 +771,38 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea
trInfo = &traceInfo{ trInfo = &traceInfo{
tr: tr, tr: tr,
firstLine: firstLine{
client: false,
remoteAddr: st.RemoteAddr(),
},
} }
trInfo.firstLine.client = false
trInfo.firstLine.remoteAddr = st.RemoteAddr()
if dl, ok := stream.Context().Deadline(); ok { if dl, ok := stream.Context().Deadline(); ok {
trInfo.firstLine.deadline = time.Until(dl) trInfo.firstLine.deadline = time.Until(dl)
} }
return trInfo return trInfo
} }
func (s *Server) addConn(c io.Closer) bool { func (s *Server) addConn(st transport.ServerTransport) bool {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
if s.conns == nil { if s.conns == nil {
c.Close() st.Close()
return false return false
} }
if s.drain { if s.drain {
// Transport added after we drained our existing conns: drain it // Transport added after we drained our existing conns: drain it
// immediately. // immediately.
c.(transport.ServerTransport).Drain() st.Drain()
} }
s.conns[c] = true s.conns[st] = true
return true return true
} }
func (s *Server) removeConn(c io.Closer) { func (s *Server) removeConn(st transport.ServerTransport) {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
if s.conns != nil { if s.conns != nil {
delete(s.conns, c) delete(s.conns, st)
s.cv.Broadcast() s.cv.Broadcast()
} }
} }
@ -859,7 +883,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
} }
if trInfo != nil { if trInfo != nil {
defer trInfo.tr.Finish() defer trInfo.tr.Finish()
trInfo.firstLine.client = false
trInfo.tr.LazyLog(&trInfo.firstLine, false) trInfo.tr.LazyLog(&trInfo.firstLine, false)
defer func() { defer func() {
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
@ -951,10 +974,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
} }
if sh != nil { if sh != nil {
sh.HandleRPC(stream.Context(), &stats.InPayload{ sh.HandleRPC(stream.Context(), &stats.InPayload{
RecvTime: time.Now(), RecvTime: time.Now(),
Payload: v, Payload: v,
Data: d, WireLength: payInfo.wireLength,
Length: len(d), Data: d,
Length: len(d),
}) })
} }
if binlog != nil { if binlog != nil {
@ -1050,7 +1074,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
// TODO: Should we be logging if writing status failed here, like above? // TODO: Should we be logging if writing status failed here, like above?
// Should the logging be in WriteStatus? Should we ignore the WriteStatus // Should the logging be in WriteStatus? Should we ignore the WriteStatus
// error or allow the stats handler to see it? // error or allow the stats handler to see it?
err = t.WriteStatus(stream, status.New(codes.OK, "")) err = t.WriteStatus(stream, statusOK)
if binlog != nil { if binlog != nil {
binlog.Log(&binarylog.ServerTrailer{ binlog.Log(&binarylog.ServerTrailer{
Trailer: stream.Trailer(), Trailer: stream.Trailer(),
@ -1208,7 +1232,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
ss.trInfo.tr.LazyLog(stringer("OK"), false) ss.trInfo.tr.LazyLog(stringer("OK"), false)
ss.mu.Unlock() ss.mu.Unlock()
} }
err = t.WriteStatus(ss.s, status.New(codes.OK, "")) err = t.WriteStatus(ss.s, statusOK)
if ss.binlog != nil { if ss.binlog != nil {
ss.binlog.Log(&binarylog.ServerTrailer{ ss.binlog.Log(&binarylog.ServerTrailer{
Trailer: ss.s.Trailer(), Trailer: ss.s.Trailer(),
@ -1245,7 +1269,8 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
service := sm[:pos] service := sm[:pos]
method := sm[pos+1:] method := sm[pos+1:]
if srv, ok := s.m[service]; ok { srv, knownService := s.m[service]
if knownService {
if md, ok := srv.md[method]; ok { if md, ok := srv.md[method]; ok {
s.processUnaryRPC(t, stream, srv, md, trInfo) s.processUnaryRPC(t, stream, srv, md, trInfo)
return return
@ -1260,11 +1285,16 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
return return
} }
var errDesc string
if !knownService {
errDesc = fmt.Sprintf("unknown service %v", service)
} else {
errDesc = fmt.Sprintf("unknown method %v for service %v", method, service)
}
if trInfo != nil { if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) trInfo.tr.LazyPrintf("%s", errDesc)
trInfo.tr.SetError() trInfo.tr.SetError()
} }
errDesc := fmt.Sprintf("unknown service %v", service)
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
if trInfo != nil { if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
@ -1319,15 +1349,11 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream
// pending RPCs on the client side will get notified by connection // pending RPCs on the client side will get notified by connection
// errors. // errors.
func (s *Server) Stop() { func (s *Server) Stop() {
s.quitOnce.Do(func() { s.quit.Fire()
close(s.quit)
})
defer func() { defer func() {
s.serveWG.Wait() s.serveWG.Wait()
s.doneOnce.Do(func() { s.done.Fire()
close(s.done)
})
}() }()
s.channelzRemoveOnce.Do(func() { s.channelzRemoveOnce.Do(func() {
@ -1364,15 +1390,8 @@ func (s *Server) Stop() {
// accepting new connections and RPCs and blocks until all the pending RPCs are // accepting new connections and RPCs and blocks until all the pending RPCs are
// finished. // finished.
func (s *Server) GracefulStop() { func (s *Server) GracefulStop() {
s.quitOnce.Do(func() { s.quit.Fire()
close(s.quit) defer s.done.Fire()
})
defer func() {
s.doneOnce.Do(func() {
close(s.done)
})
}()
s.channelzRemoveOnce.Do(func() { s.channelzRemoveOnce.Do(func() {
if channelz.IsOn() { if channelz.IsOn() {
@ -1390,8 +1409,8 @@ func (s *Server) GracefulStop() {
} }
s.lis = nil s.lis = nil
if !s.drain { if !s.drain {
for c := range s.conns { for st := range s.conns {
c.(transport.ServerTransport).Drain() st.Drain()
} }
s.drain = true s.drain = true
} }

View File

@ -25,8 +25,11 @@ import (
"strings" "strings"
"time" "time"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/serviceconfig"
) )
const maxInt = int(^uint(0) >> 1) const maxInt = int(^uint(0) >> 1)
@ -61,6 +64,11 @@ type MethodConfig struct {
retryPolicy *retryPolicy retryPolicy *retryPolicy
} }
type lbConfig struct {
name string
cfg serviceconfig.LoadBalancingConfig
}
// ServiceConfig is provided by the service provider and contains parameters for how // ServiceConfig is provided by the service provider and contains parameters for how
// clients that connect to the service should behave. // clients that connect to the service should behave.
// //
@ -68,10 +76,18 @@ type MethodConfig struct {
// through name resolver, as specified here // through name resolver, as specified here
// https://github.com/grpc/grpc/blob/master/doc/service_config.md // https://github.com/grpc/grpc/blob/master/doc/service_config.md
type ServiceConfig struct { type ServiceConfig struct {
// LB is the load balancer the service providers recommends. The balancer specified serviceconfig.Config
// via grpc.WithBalancer will override this.
// LB is the load balancer the service providers recommends. The balancer
// specified via grpc.WithBalancer will override this. This is deprecated;
// lbConfigs is preferred. If lbConfig and LB are both present, lbConfig
// will be used.
LB *string LB *string
// lbConfig is the service config's load balancing configuration. If
// lbConfig and LB are both present, lbConfig will be used.
lbConfig *lbConfig
// Methods contains a map for the methods in this service. If there is an // Methods contains a map for the methods in this service. If there is an
// exact match for a method (i.e. /service/method) in the map, use the // exact match for a method (i.e. /service/method) in the map, use the
// corresponding MethodConfig. If there's no exact match, look for the // corresponding MethodConfig. If there's no exact match, look for the
@ -99,6 +115,9 @@ type ServiceConfig struct {
// healthCheckConfig must be set as one of the requirement to enable LB channel // healthCheckConfig must be set as one of the requirement to enable LB channel
// health check. // health check.
healthCheckConfig *healthCheckConfig healthCheckConfig *healthCheckConfig
// rawJSONString stores service config json string that get parsed into
// this service config struct.
rawJSONString string
} }
// healthCheckConfig defines the go-native version of the LB channel health check config. // healthCheckConfig defines the go-native version of the LB channel health check config.
@ -230,34 +249,72 @@ type jsonMC struct {
RetryPolicy *jsonRetryPolicy RetryPolicy *jsonRetryPolicy
} }
type loadBalancingConfig map[string]json.RawMessage
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. // TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
type jsonSC struct { type jsonSC struct {
LoadBalancingPolicy *string LoadBalancingPolicy *string
LoadBalancingConfig *[]loadBalancingConfig
MethodConfig *[]jsonMC MethodConfig *[]jsonMC
RetryThrottling *retryThrottlingPolicy RetryThrottling *retryThrottlingPolicy
HealthCheckConfig *healthCheckConfig HealthCheckConfig *healthCheckConfig
} }
func parseServiceConfig(js string) (ServiceConfig, error) { func init() {
internal.ParseServiceConfig = func(sc string) (interface{}, error) {
return parseServiceConfig(sc)
}
}
func parseServiceConfig(js string) (*ServiceConfig, error) {
if len(js) == 0 { if len(js) == 0 {
return ServiceConfig{}, fmt.Errorf("no JSON service config provided") return nil, fmt.Errorf("no JSON service config provided")
} }
var rsc jsonSC var rsc jsonSC
err := json.Unmarshal([]byte(js), &rsc) err := json.Unmarshal([]byte(js), &rsc)
if err != nil { if err != nil {
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
return ServiceConfig{}, err return nil, err
} }
sc := ServiceConfig{ sc := ServiceConfig{
LB: rsc.LoadBalancingPolicy, LB: rsc.LoadBalancingPolicy,
Methods: make(map[string]MethodConfig), Methods: make(map[string]MethodConfig),
retryThrottling: rsc.RetryThrottling, retryThrottling: rsc.RetryThrottling,
healthCheckConfig: rsc.HealthCheckConfig, healthCheckConfig: rsc.HealthCheckConfig,
rawJSONString: js,
} }
if rsc.MethodConfig == nil { if rsc.LoadBalancingConfig != nil {
return sc, nil for i, lbcfg := range *rsc.LoadBalancingConfig {
if len(lbcfg) != 1 {
err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
grpclog.Warningf(err.Error())
return nil, err
}
var name string
var jsonCfg json.RawMessage
for name, jsonCfg = range lbcfg {
}
builder := balancer.Get(name)
if builder == nil {
continue
}
sc.lbConfig = &lbConfig{name: name}
if parser, ok := builder.(balancer.ConfigParser); ok {
var err error
sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg)
if err != nil {
return nil, fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
}
} else if string(jsonCfg) != "{}" {
grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
}
break
}
} }
if rsc.MethodConfig == nil {
return &sc, nil
}
for _, m := range *rsc.MethodConfig { for _, m := range *rsc.MethodConfig {
if m.Name == nil { if m.Name == nil {
continue continue
@ -265,7 +322,7 @@ func parseServiceConfig(js string) (ServiceConfig, error) {
d, err := parseDuration(m.Timeout) d, err := parseDuration(m.Timeout)
if err != nil { if err != nil {
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
return ServiceConfig{}, err return nil, err
} }
mc := MethodConfig{ mc := MethodConfig{
@ -274,7 +331,7 @@ func parseServiceConfig(js string) (ServiceConfig, error) {
} }
if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
return ServiceConfig{}, err return nil, err
} }
if m.MaxRequestMessageBytes != nil { if m.MaxRequestMessageBytes != nil {
if *m.MaxRequestMessageBytes > int64(maxInt) { if *m.MaxRequestMessageBytes > int64(maxInt) {
@ -298,14 +355,14 @@ func parseServiceConfig(js string) (ServiceConfig, error) {
} }
if sc.retryThrottling != nil { if sc.retryThrottling != nil {
if sc.retryThrottling.MaxTokens <= 0 || if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 {
sc.retryThrottling.MaxTokens >= 1000 || return nil, fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)
sc.retryThrottling.TokenRatio <= 0 { }
// Illegal throttling config; disable throttling. if tr := sc.retryThrottling.TokenRatio; tr <= 0 {
sc.retryThrottling = nil return nil, fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)
} }
} }
return sc, nil return &sc, nil
} }
func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {

View File

@ -0,0 +1,48 @@
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package serviceconfig defines types and methods for operating on gRPC
// service configs.
//
// This package is EXPERIMENTAL.
package serviceconfig
import (
"google.golang.org/grpc/internal"
)
// Config represents an opaque data structure holding a service config.
type Config interface {
isConfig()
}
// LoadBalancingConfig represents an opaque data structure holding a load
// balancer config.
type LoadBalancingConfig interface {
isLoadBalancingConfig()
}
// Parse parses the JSON service config provided into an internal form or
// returns an error if the config is invalid.
func Parse(ServiceConfigJSON string) (Config, error) {
c, err := internal.ParseServiceConfig(ServiceConfigJSON)
if err != nil {
return nil, err
}
return c.(Config), err
}

View File

@ -27,6 +27,8 @@ import (
"context" "context"
"net" "net"
"time" "time"
"google.golang.org/grpc/metadata"
) )
// RPCStats contains stats information about RPCs. // RPCStats contains stats information about RPCs.
@ -172,6 +174,9 @@ type End struct {
BeginTime time.Time BeginTime time.Time
// EndTime is the time when the RPC ends. // EndTime is the time when the RPC ends.
EndTime time.Time EndTime time.Time
// Trailer contains the trailer metadata received from the server. This
// field is only valid if this End is from the client side.
Trailer metadata.MD
// Error is the error the RPC ended with. It is an error generated from // Error is the error the RPC ended with. It is an error generated from
// status.Status and can be converted back to status.Status using // status.Status and can be converted back to status.Status using
// status.FromError if non-nil. // status.FromError if non-nil.

View File

@ -36,8 +36,15 @@ import (
"github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes"
spb "google.golang.org/genproto/googleapis/rpc/status" spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/internal"
) )
func init() {
internal.StatusRawProto = statusRawProto
}
func statusRawProto(s *Status) *spb.Status { return s.s }
// statusError is an alias of a status proto. It implements error and Status, // statusError is an alias of a status proto. It implements error and Status,
// and a nil statusError should never be returned by this package. // and a nil statusError should never be returned by this package.
type statusError spb.Status type statusError spb.Status
@ -51,6 +58,17 @@ func (se *statusError) GRPCStatus() *Status {
return &Status{s: (*spb.Status)(se)} return &Status{s: (*spb.Status)(se)}
} }
// Is implements future error.Is functionality.
// A statusError is equivalent if the code and message are identical.
func (se *statusError) Is(target error) bool {
tse, ok := target.(*statusError)
if !ok {
return false
}
return proto.Equal((*spb.Status)(se), (*spb.Status)(tse))
}
// Status represents an RPC status code, message, and details. It is immutable // Status represents an RPC status code, message, and details. It is immutable
// and should be created with New, Newf, or FromProto. // and should be created with New, Newf, or FromProto.
type Status struct { type Status struct {
@ -125,7 +143,7 @@ func FromProto(s *spb.Status) *Status {
// Status is returned with codes.Unknown and the original error message. // Status is returned with codes.Unknown and the original error message.
func FromError(err error) (s *Status, ok bool) { func FromError(err error) (s *Status, ok bool) {
if err == nil { if err == nil {
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true return nil, true
} }
if se, ok := err.(interface { if se, ok := err.(interface {
GRPCStatus() *Status GRPCStatus() *Status
@ -199,7 +217,7 @@ func Code(err error) codes.Code {
func FromContextError(err error) *Status { func FromContextError(err error) *Status {
switch err { switch err {
case nil: case nil:
return New(codes.OK, "") return nil
case context.DeadlineExceeded: case context.DeadlineExceeded:
return New(codes.DeadlineExceeded, err.Error()) return New(codes.DeadlineExceeded, err.Error())
case context.Canceled: case context.Canceled:

View File

@ -30,9 +30,9 @@ import (
"golang.org/x/net/trace" "golang.org/x/net/trace"
"google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/balancerload"
"google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcrand"
@ -230,17 +230,21 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
if c.creds != nil { if c.creds != nil {
callHdr.Creds = c.creds callHdr.Creds = c.creds
} }
var trInfo traceInfo var trInfo *traceInfo
if EnableTracing { if EnableTracing {
trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) trInfo = &traceInfo{
trInfo.firstLine.client = true tr: trace.New("grpc.Sent."+methodFamily(method), method),
firstLine: firstLine{
client: true,
},
}
if deadline, ok := ctx.Deadline(); ok { if deadline, ok := ctx.Deadline(); ok {
trInfo.firstLine.deadline = time.Until(deadline) trInfo.firstLine.deadline = time.Until(deadline)
} }
trInfo.tr.LazyLog(&trInfo.firstLine, false) trInfo.tr.LazyLog(&trInfo.firstLine, false)
ctx = trace.NewContext(ctx, trInfo.tr) ctx = trace.NewContext(ctx, trInfo.tr)
} }
ctx = newContextWithRPCInfo(ctx, c.failFast) ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp)
sh := cc.dopts.copts.StatsHandler sh := cc.dopts.copts.StatsHandler
var beginTime time.Time var beginTime time.Time
if sh != nil { if sh != nil {
@ -323,13 +327,23 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
return cs, nil return cs, nil
} }
func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo traceInfo) error { // newAttemptLocked creates a new attempt with a transport.
cs.attempt = &csAttempt{ // If it succeeds, then it replaces clientStream's attempt with this new attempt.
func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) {
newAttempt := &csAttempt{
cs: cs, cs: cs,
dc: cs.cc.dopts.dc, dc: cs.cc.dopts.dc,
statsHandler: sh, statsHandler: sh,
trInfo: trInfo, trInfo: trInfo,
} }
defer func() {
if retErr != nil {
// This attempt is not set in the clientStream, so it's finish won't
// be called. Call it here for stats and trace in case they are not
// nil.
newAttempt.finish(retErr)
}
}()
if err := cs.ctx.Err(); err != nil { if err := cs.ctx.Err(); err != nil {
return toRPCErr(err) return toRPCErr(err)
@ -338,8 +352,12 @@ func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo traceInfo) err
if err != nil { if err != nil {
return err return err
} }
cs.attempt.t = t if trInfo != nil {
cs.attempt.done = done trInfo.firstLine.SetRemoteAddr(t.RemoteAddr())
}
newAttempt.t = t
newAttempt.done = done
cs.attempt = newAttempt
return nil return nil
} }
@ -388,11 +406,18 @@ type clientStream struct {
serverHeaderBinlogged bool serverHeaderBinlogged bool
mu sync.Mutex mu sync.Mutex
firstAttempt bool // if true, transparent retry is valid firstAttempt bool // if true, transparent retry is valid
numRetries int // exclusive of transparent retry attempt(s) numRetries int // exclusive of transparent retry attempt(s)
numRetriesSincePushback int // retries since pushback; to reset backoff numRetriesSincePushback int // retries since pushback; to reset backoff
finished bool // TODO: replace with atomic cmpxchg or sync.Once? finished bool // TODO: replace with atomic cmpxchg or sync.Once?
attempt *csAttempt // the active client stream attempt // attempt is the active client stream attempt.
// The only place where it is written is the newAttemptLocked method and this method never writes nil.
// So, attempt can be nil only inside newClientStream function when clientStream is first created.
// One of the first things done after clientStream's creation, is to call newAttemptLocked which either
// assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked,
// then newClientStream calls finish on the clientStream and returns. So, finish method is the only
// place where we need to check if the attempt is nil.
attempt *csAttempt
// TODO(hedging): hedging will have multiple attempts simultaneously. // TODO(hedging): hedging will have multiple attempts simultaneously.
committed bool // active attempt committed for retry? committed bool // active attempt committed for retry?
buffer []func(a *csAttempt) error // operations to replay on retry buffer []func(a *csAttempt) error // operations to replay on retry
@ -414,9 +439,10 @@ type csAttempt struct {
decompSet bool decompSet bool
mu sync.Mutex // guards trInfo.tr mu sync.Mutex // guards trInfo.tr
// trInfo may be nil (if EnableTracing is false).
// trInfo.tr is set when created (if EnableTracing is true), // trInfo.tr is set when created (if EnableTracing is true),
// and cleared when the finish method is called. // and cleared when the finish method is called.
trInfo traceInfo trInfo *traceInfo
statsHandler stats.Handler statsHandler stats.Handler
} }
@ -449,8 +475,8 @@ func (cs *clientStream) shouldRetry(err error) error {
if cs.attempt.s != nil { if cs.attempt.s != nil {
<-cs.attempt.s.Done() <-cs.attempt.s.Done()
} }
if cs.firstAttempt && !cs.callInfo.failFast && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
// First attempt, wait-for-ready, stream unprocessed: transparently retry. // First attempt, stream unprocessed: transparently retry.
cs.firstAttempt = false cs.firstAttempt = false
return nil return nil
} }
@ -540,7 +566,7 @@ func (cs *clientStream) retryLocked(lastErr error) error {
cs.commitAttemptLocked() cs.commitAttemptLocked()
return err return err
} }
if err := cs.newAttemptLocked(nil, traceInfo{}); err != nil { if err := cs.newAttemptLocked(nil, nil); err != nil {
return err return err
} }
if lastErr = cs.replayBufferLocked(); lastErr == nil { if lastErr = cs.replayBufferLocked(); lastErr == nil {
@ -668,15 +694,13 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
if !cs.desc.ClientStreams { if !cs.desc.ClientStreams {
cs.sentLast = true cs.sentLast = true
} }
data, err := encode(cs.codec, m)
// load hdr, payload, data
hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp)
if err != nil { if err != nil {
return err return err
} }
compData, err := compress(data, cs.cp, cs.comp)
if err != nil {
return err
}
hdr, payload := msgHeader(data, compData)
// TODO(dfawley): should we be checking len(data) instead? // TODO(dfawley): should we be checking len(data) instead?
if len(payload) > *cs.callInfo.maxSendMessageSize { if len(payload) > *cs.callInfo.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
@ -799,11 +823,11 @@ func (cs *clientStream) finish(err error) {
} }
if cs.attempt != nil { if cs.attempt != nil {
cs.attempt.finish(err) cs.attempt.finish(err)
} // after functions all rely upon having a stream.
// after functions all rely upon having a stream. if cs.attempt.s != nil {
if cs.attempt.s != nil { for _, o := range cs.opts {
for _, o := range cs.opts { o.after(cs.callInfo)
o.after(cs.callInfo) }
} }
} }
cs.cancel() cs.cancel()
@ -811,7 +835,7 @@ func (cs *clientStream) finish(err error) {
func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
cs := a.cs cs := a.cs
if EnableTracing { if a.trInfo != nil {
a.mu.Lock() a.mu.Lock()
if a.trInfo.tr != nil { if a.trInfo.tr != nil {
a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
@ -868,7 +892,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
} }
return toRPCErr(err) return toRPCErr(err)
} }
if EnableTracing { if a.trInfo != nil {
a.mu.Lock() a.mu.Lock()
if a.trInfo.tr != nil { if a.trInfo.tr != nil {
a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
@ -881,8 +905,9 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
RecvTime: time.Now(), RecvTime: time.Now(),
Payload: m, Payload: m,
// TODO truncate large payload. // TODO truncate large payload.
Data: payInfo.uncompressedBytes, Data: payInfo.uncompressedBytes,
Length: len(payInfo.uncompressedBytes), WireLength: payInfo.wireLength,
Length: len(payInfo.uncompressedBytes),
}) })
} }
if channelz.IsOn() { if channelz.IsOn() {
@ -915,22 +940,23 @@ func (a *csAttempt) finish(err error) {
// Ending a stream with EOF indicates a success. // Ending a stream with EOF indicates a success.
err = nil err = nil
} }
var tr metadata.MD
if a.s != nil { if a.s != nil {
a.t.CloseStream(a.s, err) a.t.CloseStream(a.s, err)
tr = a.s.Trailer()
} }
if a.done != nil { if a.done != nil {
br := false br := false
var tr metadata.MD
if a.s != nil { if a.s != nil {
br = a.s.BytesReceived() br = a.s.BytesReceived()
tr = a.s.Trailer()
} }
a.done(balancer.DoneInfo{ a.done(balancer.DoneInfo{
Err: err, Err: err,
Trailer: tr, Trailer: tr,
BytesSent: a.s != nil, BytesSent: a.s != nil,
BytesReceived: br, BytesReceived: br,
ServerLoad: balancerload.Parse(tr),
}) })
} }
if a.statsHandler != nil { if a.statsHandler != nil {
@ -938,11 +964,12 @@ func (a *csAttempt) finish(err error) {
Client: true, Client: true,
BeginTime: a.cs.beginTime, BeginTime: a.cs.beginTime,
EndTime: time.Now(), EndTime: time.Now(),
Trailer: tr,
Error: err, Error: err,
} }
a.statsHandler.HandleRPC(a.cs.ctx, end) a.statsHandler.HandleRPC(a.cs.ctx, end)
} }
if a.trInfo.tr != nil { if a.trInfo != nil && a.trInfo.tr != nil {
if err == nil { if err == nil {
a.trInfo.tr.LazyPrintf("RPC: [OK]") a.trInfo.tr.LazyPrintf("RPC: [OK]")
} else { } else {
@ -955,19 +982,18 @@ func (a *csAttempt) finish(err error) {
a.mu.Unlock() a.mu.Unlock()
} }
func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, opts ...CallOption) (_ ClientStream, err error) { // newClientStream creates a ClientStream with the specified transport, on the
ac.mu.Lock() // given addrConn.
if ac.transport != t { //
ac.mu.Unlock() // It's expected that the given transport is either the same one in addrConn, or
return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") // is already closed. To avoid race, transport is specified separately, instead
} // of using ac.transpot.
// transition to CONNECTING state when an attempt starts //
if ac.state != connectivity.Connecting { // Main difference between this and ClientConn.NewStream:
ac.updateConnectivityState(connectivity.Connecting) // - no retry
ac.cc.handleSubConnStateChange(ac.acbw, ac.state) // - no service config (or wait for service config)
} // - no tracing or stats
ac.mu.Unlock() func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) {
if t == nil { if t == nil {
// TODO: return RPC error here? // TODO: return RPC error here?
return nil, errors.New("transport provided is nil") return nil, errors.New("transport provided is nil")
@ -975,14 +1001,6 @@ func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, metho
// defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct.
c := &callInfo{} c := &callInfo{}
for _, o := range opts {
if err := o.before(c); err != nil {
return nil, toRPCErr(err)
}
}
c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
// Possible context leak: // Possible context leak:
// The cancel function for the child context we create will only be called // The cancel function for the child context we create will only be called
// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
@ -995,6 +1013,13 @@ func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, metho
} }
}() }()
for _, o := range opts {
if err := o.before(c); err != nil {
return nil, toRPCErr(err)
}
}
c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
if err := setCallInfoCodec(c); err != nil { if err := setCallInfoCodec(c); err != nil {
return nil, err return nil, err
} }
@ -1027,6 +1052,7 @@ func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, metho
callHdr.Creds = c.creds callHdr.Creds = c.creds
} }
// Use a special addrConnStream to avoid retry.
as := &addrConnStream{ as := &addrConnStream{
callHdr: callHdr, callHdr: callHdr,
ac: ac, ac: ac,
@ -1138,15 +1164,13 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) {
if !as.desc.ClientStreams { if !as.desc.ClientStreams {
as.sentLast = true as.sentLast = true
} }
data, err := encode(as.codec, m)
// load hdr, payload, data
hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp)
if err != nil { if err != nil {
return err return err
} }
compData, err := compress(data, as.cp, as.comp)
if err != nil {
return err
}
hdr, payld := msgHeader(data, compData)
// TODO(dfawley): should we be checking len(data) instead? // TODO(dfawley): should we be checking len(data) instead?
if len(payld) > *as.callInfo.maxSendMessageSize { if len(payld) > *as.callInfo.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
@ -1383,15 +1407,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
ss.t.IncrMsgSent() ss.t.IncrMsgSent()
} }
}() }()
data, err := encode(ss.codec, m)
// load hdr, payload, data
hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
if err != nil { if err != nil {
return err return err
} }
compData, err := compress(data, ss.cp, ss.comp)
if err != nil {
return err
}
hdr, payload := msgHeader(data, compData)
// TODO(dfawley): should we be checking len(data) instead? // TODO(dfawley): should we be checking len(data) instead?
if len(payload) > ss.maxSendMessageSize { if len(payload) > ss.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
@ -1466,8 +1488,9 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
RecvTime: time.Now(), RecvTime: time.Now(),
Payload: m, Payload: m,
// TODO truncate large payload. // TODO truncate large payload.
Data: payInfo.uncompressedBytes, Data: payInfo.uncompressedBytes,
Length: len(payInfo.uncompressedBytes), WireLength: payInfo.wireLength,
Length: len(payInfo.uncompressedBytes),
}) })
} }
if ss.binlog != nil { if ss.binlog != nil {
@ -1483,3 +1506,24 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
func MethodFromServerStream(stream ServerStream) (string, bool) { func MethodFromServerStream(stream ServerStream) (string, bool) {
return Method(stream.Context()) return Method(stream.Context())
} }
// prepareMsg returns the hdr, payload and data
// using the compressors passed or using the
// passed preparedmsg
func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
if preparedMsg, ok := m.(*PreparedMsg); ok {
return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
}
// The input interface is not a prepared msg.
// Marshal and Compress the data at this point
data, err = encode(codec, m)
if err != nil {
return nil, nil, nil, err
}
compData, err := compress(data, cp, comp)
if err != nil {
return nil, nil, nil, err
}
hdr, payload = msgHeader(data, compData)
return hdr, payload, data, nil
}

View File

@ -24,6 +24,7 @@ import (
"io" "io"
"net" "net"
"strings" "strings"
"sync"
"time" "time"
"golang.org/x/net/trace" "golang.org/x/net/trace"
@ -53,13 +54,25 @@ type traceInfo struct {
} }
// firstLine is the first line of an RPC trace. // firstLine is the first line of an RPC trace.
// It may be mutated after construction; remoteAddr specifically may change
// during client-side use.
type firstLine struct { type firstLine struct {
mu sync.Mutex
client bool // whether this is a client (outgoing) RPC client bool // whether this is a client (outgoing) RPC
remoteAddr net.Addr remoteAddr net.Addr
deadline time.Duration // may be zero deadline time.Duration // may be zero
} }
func (f *firstLine) SetRemoteAddr(addr net.Addr) {
f.mu.Lock()
f.remoteAddr = addr
f.mu.Unlock()
}
func (f *firstLine) String() string { func (f *firstLine) String() string {
f.mu.Lock()
defer f.mu.Unlock()
var line bytes.Buffer var line bytes.Buffer
io.WriteString(&line, "RPC: ") io.WriteString(&line, "RPC: ")
if f.client { if f.client {

View File

@ -19,4 +19,4 @@
package grpc package grpc
// Version is the current grpc version. // Version is the current grpc version.
const Version = "1.19.1" const Version = "1.23.0"

23
vendor/google.golang.org/grpc/vet.sh generated vendored
View File

@ -75,7 +75,7 @@ git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO
# - Do not import math/rand for real library code. Use internal/grpcrand for # - Do not import math/rand for real library code. Use internal/grpcrand for
# thread safety. # thread safety.
git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand') git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|wrr_test')
# - Ensure all ptypes proto packages are renamed when importing. # - Ensure all ptypes proto packages are renamed when importing.
git ls-files "*.go" | (! xargs grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/") git ls-files "*.go" | (! xargs grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/")
@ -86,9 +86,9 @@ go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go
# - gofmt, goimports, golint (with exceptions for generated code), go vet. # - gofmt, goimports, golint (with exceptions for generated code), go vet.
gofmt -s -d -l . 2>&1 | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output
goimports -l . 2>&1 | fail_on_output goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") | fail_on_output
golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:")
go tool vet -all . go vet -all .
# - Check that generated proto files are up to date. # - Check that generated proto files are up to date.
if [[ -z "${VET_SKIP_PROTO}" ]]; then if [[ -z "${VET_SKIP_PROTO}" ]]; then
@ -105,17 +105,28 @@ if go help mod >& /dev/null; then
fi fi
# - Collection of static analysis checks # - Collection of static analysis checks
# TODO(menghanl): fix errors in transport_test. # TODO(dfawley): don't use deprecated functions in examples.
staticcheck -go 1.9 -checks 'inherit,-ST1015' -ignore ' staticcheck -go 1.9 -checks 'inherit,-ST1015' -ignore '
google.golang.org/grpc/balancer.go:SA1019 google.golang.org/grpc/balancer.go:SA1019
google.golang.org/grpc/balancer_test.go:SA1019 google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go:SA1019
google.golang.org/grpc/clientconn_test.go:SA1019
google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go:SA1019 google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go:SA1019
google.golang.org/grpc/xds/internal/balancer/edsbalancer/balancergroup.go:SA1019
google.golang.org/grpc/xds/internal/balancer/xds.go:SA1019
google.golang.org/grpc/xds/internal/balancer/xds_client.go:SA1019
google.golang.org/grpc/balancer_conn_wrappers.go:SA1019
google.golang.org/grpc/balancer_test.go:SA1019
google.golang.org/grpc/benchmark/benchmain/main.go:SA1019 google.golang.org/grpc/benchmark/benchmain/main.go:SA1019
google.golang.org/grpc/benchmark/worker/benchmark_client.go:SA1019 google.golang.org/grpc/benchmark/worker/benchmark_client.go:SA1019
google.golang.org/grpc/clientconn.go:S1024
google.golang.org/grpc/clientconn_state_transition_test.go:SA1019
google.golang.org/grpc/clientconn_test.go:SA1019
google.golang.org/grpc/examples/features/debugging/client/main.go:SA1019
google.golang.org/grpc/examples/features/load_balancing/client/main.go:SA1019
google.golang.org/grpc/internal/transport/handler_server.go:SA1019 google.golang.org/grpc/internal/transport/handler_server.go:SA1019
google.golang.org/grpc/internal/transport/handler_server_test.go:SA1019 google.golang.org/grpc/internal/transport/handler_server_test.go:SA1019
google.golang.org/grpc/resolver/dns/dns_resolver.go:SA1019
google.golang.org/grpc/stats/stats_test.go:SA1019 google.golang.org/grpc/stats/stats_test.go:SA1019
google.golang.org/grpc/test/balancer_test.go:SA1019
google.golang.org/grpc/test/channelz_test.go:SA1019 google.golang.org/grpc/test/channelz_test.go:SA1019
google.golang.org/grpc/test/end2end_test.go:SA1019 google.golang.org/grpc/test/end2end_test.go:SA1019
google.golang.org/grpc/test/healthcheck_test.go:SA1019 google.golang.org/grpc/test/healthcheck_test.go:SA1019

4
vendor/modules.txt vendored
View File

@ -555,7 +555,7 @@ google.golang.org/appengine/internal/remote_api
google.golang.org/appengine/cloudsql google.golang.org/appengine/cloudsql
# google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 # google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8
google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.19.1 # google.golang.org/grpc v1.23.0
google.golang.org/grpc google.golang.org/grpc
google.golang.org/grpc/credentials google.golang.org/grpc/credentials
google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/health/grpc_health_v1
@ -570,6 +570,7 @@ google.golang.org/grpc/encoding
google.golang.org/grpc/encoding/proto google.golang.org/grpc/encoding/proto
google.golang.org/grpc/internal google.golang.org/grpc/internal
google.golang.org/grpc/internal/backoff google.golang.org/grpc/internal/backoff
google.golang.org/grpc/internal/balancerload
google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/binarylog
google.golang.org/grpc/internal/channelz google.golang.org/grpc/internal/channelz
google.golang.org/grpc/internal/envconfig google.golang.org/grpc/internal/envconfig
@ -582,6 +583,7 @@ google.golang.org/grpc/peer
google.golang.org/grpc/resolver google.golang.org/grpc/resolver
google.golang.org/grpc/resolver/dns google.golang.org/grpc/resolver/dns
google.golang.org/grpc/resolver/passthrough google.golang.org/grpc/resolver/passthrough
google.golang.org/grpc/serviceconfig
google.golang.org/grpc/stats google.golang.org/grpc/stats
google.golang.org/grpc/tap google.golang.org/grpc/tap
google.golang.org/grpc/credentials/internal google.golang.org/grpc/credentials/internal