Clustering enhancements (#1747)
This commit is contained in:
parent
87c42a796b
commit
bdcfe05517
|
@ -6,7 +6,7 @@ services:
|
|||
- docker
|
||||
|
||||
go:
|
||||
- 1.7rc6
|
||||
- 1.7
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
|
|
4
Makefile
4
Makefile
|
@ -20,7 +20,7 @@ dev-dynamic: generate
|
|||
|
||||
# test runs the unit tests and vets the code
|
||||
test: generate
|
||||
CGO_ENABLED=0 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -timeout=120s -parallel=4
|
||||
CGO_ENABLED=0 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -timeout=10m -parallel=4
|
||||
|
||||
# testacc runs acceptance tests
|
||||
testacc: generate
|
||||
|
@ -32,7 +32,7 @@ testacc: generate
|
|||
|
||||
# testrace runs the race checker
|
||||
testrace: generate
|
||||
CGO_ENABLED=1 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' -race $(TEST) $(TESTARGS)
|
||||
CGO_ENABLED=1 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' -race $(TEST) $(TESTARGS) -timeout=20m -parallel=4
|
||||
|
||||
cover:
|
||||
./scripts/coverage.sh --html
|
||||
|
|
|
@ -18,6 +18,8 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/armon/go-metrics/circonus"
|
||||
"github.com/hashicorp/errwrap"
|
||||
|
@ -154,6 +156,7 @@ func (c *ServerCommand) Run(args []string) int {
|
|||
MinLevel: logutils.LogLevel(strings.ToUpper(logLevel)),
|
||||
Writer: logGate,
|
||||
}, "", log.LstdFlags)
|
||||
grpclog.SetLogger(c.logger)
|
||||
|
||||
if err := c.setupTelemetry(config); err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error initializing telemetry: %s", err))
|
||||
|
@ -344,7 +347,7 @@ func (c *ServerCommand) Run(args []string) int {
|
|||
}
|
||||
}
|
||||
|
||||
clusterAddrs := []string{}
|
||||
clusterAddrs := []*net.TCPAddr{}
|
||||
|
||||
// Initialize the listeners
|
||||
lns := make([]net.Listener, 0, len(config.Listeners))
|
||||
|
@ -369,19 +372,24 @@ func (c *ServerCommand) Run(args []string) int {
|
|||
var addr string
|
||||
var ok bool
|
||||
if addr, ok = lnConfig.Config["cluster_address"]; ok {
|
||||
clusterAddrs = append(clusterAddrs, addr)
|
||||
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
"Error resolving cluster_address: %s",
|
||||
err))
|
||||
return 1
|
||||
}
|
||||
clusterAddrs = append(clusterAddrs, tcpAddr)
|
||||
} else {
|
||||
tcpAddr, ok := ln.Addr().(*net.TCPAddr)
|
||||
if !ok {
|
||||
c.Ui.Error("Failed to parse tcp listener")
|
||||
return 1
|
||||
}
|
||||
ipStr := tcpAddr.IP.String()
|
||||
if len(tcpAddr.IP) == net.IPv6len {
|
||||
ipStr = fmt.Sprintf("[%s]", ipStr)
|
||||
}
|
||||
addr = fmt.Sprintf("%s:%d", ipStr, tcpAddr.Port+1)
|
||||
clusterAddrs = append(clusterAddrs, addr)
|
||||
clusterAddrs = append(clusterAddrs, &net.TCPAddr{
|
||||
IP: tcpAddr.IP,
|
||||
Port: tcpAddr.Port + 1,
|
||||
})
|
||||
}
|
||||
props["cluster address"] = addr
|
||||
}
|
||||
|
@ -465,7 +473,8 @@ func (c *ServerCommand) Run(args []string) int {
|
|||
|
||||
// This needs to happen before we first unseal, so before we trigger dev
|
||||
// mode if it's set
|
||||
core.SetClusterListenerSetupFunc(vault.WrapListenersForClustering(clusterAddrs, handler, c.logger))
|
||||
core.SetClusterListenerAddrs(clusterAddrs)
|
||||
core.SetClusterSetupFuncs(vault.WrapHandlerForClustering(handler, c.logger))
|
||||
|
||||
// If we're in dev mode, then initialize the core
|
||||
if dev {
|
||||
|
|
|
@ -0,0 +1,145 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: types.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package forwarding is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
types.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Request
|
||||
URL
|
||||
HeaderEntry
|
||||
Response
|
||||
*/
|
||||
package forwarding
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Request struct {
|
||||
// Not used right now but reserving in case it turns out that streaming
|
||||
// makes things more economical on the gRPC side
|
||||
// uint64 id = 1;
|
||||
Method string `protobuf:"bytes,2,opt,name=method" json:"method,omitempty"`
|
||||
Url *URL `protobuf:"bytes,3,opt,name=url" json:"url,omitempty"`
|
||||
HeaderEntries map[string]*HeaderEntry `protobuf:"bytes,4,rep,name=header_entries,json=headerEntries" json:"header_entries,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
Body []byte `protobuf:"bytes,5,opt,name=body,proto3" json:"body,omitempty"`
|
||||
Host string `protobuf:"bytes,6,opt,name=host" json:"host,omitempty"`
|
||||
RemoteAddr string `protobuf:"bytes,7,opt,name=remote_addr,json=remoteAddr" json:"remote_addr,omitempty"`
|
||||
PeerCertificates [][]byte `protobuf:"bytes,8,rep,name=peer_certificates,json=peerCertificates,proto3" json:"peer_certificates,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Request) Reset() { *m = Request{} }
|
||||
func (m *Request) String() string { return proto.CompactTextString(m) }
|
||||
func (*Request) ProtoMessage() {}
|
||||
func (*Request) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (m *Request) GetUrl() *URL {
|
||||
if m != nil {
|
||||
return m.Url
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Request) GetHeaderEntries() map[string]*HeaderEntry {
|
||||
if m != nil {
|
||||
return m.HeaderEntries
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type URL struct {
|
||||
Scheme string `protobuf:"bytes,1,opt,name=scheme" json:"scheme,omitempty"`
|
||||
Opaque string `protobuf:"bytes,2,opt,name=opaque" json:"opaque,omitempty"`
|
||||
// This isn't needed now but might be in the future, so we'll skip the
|
||||
// number to keep the ordering in net/url
|
||||
// UserInfo user = 3;
|
||||
Host string `protobuf:"bytes,4,opt,name=host" json:"host,omitempty"`
|
||||
Path string `protobuf:"bytes,5,opt,name=path" json:"path,omitempty"`
|
||||
RawPath string `protobuf:"bytes,6,opt,name=raw_path,json=rawPath" json:"raw_path,omitempty"`
|
||||
// This also isn't needed right now, but we'll reserve the number
|
||||
// bool force_query = 7;
|
||||
RawQuery string `protobuf:"bytes,8,opt,name=raw_query,json=rawQuery" json:"raw_query,omitempty"`
|
||||
Fragment string `protobuf:"bytes,9,opt,name=fragment" json:"fragment,omitempty"`
|
||||
}
|
||||
|
||||
func (m *URL) Reset() { *m = URL{} }
|
||||
func (m *URL) String() string { return proto.CompactTextString(m) }
|
||||
func (*URL) ProtoMessage() {}
|
||||
func (*URL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
|
||||
type HeaderEntry struct {
|
||||
Values []string `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
|
||||
}
|
||||
|
||||
func (m *HeaderEntry) Reset() { *m = HeaderEntry{} }
|
||||
func (m *HeaderEntry) String() string { return proto.CompactTextString(m) }
|
||||
func (*HeaderEntry) ProtoMessage() {}
|
||||
func (*HeaderEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||
|
||||
type Response struct {
|
||||
// Not used right now but reserving in case it turns out that streaming
|
||||
// makes things more economical on the gRPC side
|
||||
// uint64 id = 1;
|
||||
StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode" json:"status_code,omitempty"`
|
||||
Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Response) Reset() { *m = Response{} }
|
||||
func (m *Response) String() string { return proto.CompactTextString(m) }
|
||||
func (*Response) ProtoMessage() {}
|
||||
func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Request)(nil), "forwarding.Request")
|
||||
proto.RegisterType((*URL)(nil), "forwarding.URL")
|
||||
proto.RegisterType((*HeaderEntry)(nil), "forwarding.HeaderEntry")
|
||||
proto.RegisterType((*Response)(nil), "forwarding.Response")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("types.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 416 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x64, 0x52, 0xcd, 0x6e, 0xd4, 0x30,
|
||||
0x10, 0x56, 0xd6, 0xdb, 0xdd, 0x64, 0xd2, 0x42, 0xf1, 0x01, 0x4c, 0x39, 0x10, 0x56, 0x02, 0x45,
|
||||
0x42, 0xe4, 0xb0, 0x5c, 0x10, 0x17, 0x84, 0x2a, 0x24, 0x0e, 0x45, 0x02, 0x4b, 0x3d, 0x70, 0x8a,
|
||||
0xdc, 0x78, 0xb6, 0x89, 0x68, 0xe2, 0xac, 0xed, 0xb0, 0xca, 0x63, 0xf1, 0x4e, 0x3c, 0x08, 0xb2,
|
||||
0x1d, 0xba, 0x91, 0x7a, 0xca, 0x7c, 0x3f, 0x99, 0xcc, 0x37, 0x13, 0x48, 0xed, 0xd8, 0xa3, 0x29,
|
||||
0x7a, 0xad, 0xac, 0xa2, 0xb0, 0x53, 0xfa, 0x20, 0xb4, 0x6c, 0xba, 0xdb, 0xcd, 0xdf, 0x05, 0xac,
|
||||
0x39, 0xee, 0x07, 0x34, 0x96, 0x3e, 0x85, 0x55, 0x8b, 0xb6, 0x56, 0x92, 0x2d, 0xb2, 0x28, 0x4f,
|
||||
0xf8, 0x84, 0xe8, 0x2b, 0x20, 0x83, 0xbe, 0x63, 0x24, 0x8b, 0xf2, 0x74, 0xfb, 0xb8, 0x38, 0xbe,
|
||||
0x5d, 0x5c, 0xf3, 0x2b, 0xee, 0x34, 0xfa, 0x0d, 0x1e, 0xd5, 0x28, 0x24, 0xea, 0x12, 0x3b, 0xab,
|
||||
0x1b, 0x34, 0x6c, 0x99, 0x91, 0x3c, 0xdd, 0xbe, 0x99, 0xbb, 0xa7, 0xef, 0x14, 0x5f, 0xbd, 0xf3,
|
||||
0x4b, 0x30, 0xba, 0xc7, 0xc8, 0xcf, 0xea, 0x39, 0x47, 0x29, 0x2c, 0x6f, 0x94, 0x1c, 0xd9, 0x49,
|
||||
0x16, 0xe5, 0xa7, 0xdc, 0xd7, 0x8e, 0xab, 0x95, 0xb1, 0x6c, 0xe5, 0x67, 0xf3, 0x35, 0x7d, 0x09,
|
||||
0xa9, 0xc6, 0x56, 0x59, 0x2c, 0x85, 0x94, 0x9a, 0xad, 0xbd, 0x04, 0x81, 0xfa, 0x2c, 0xa5, 0xa6,
|
||||
0x6f, 0xe1, 0x49, 0x8f, 0xa8, 0xcb, 0x0a, 0xb5, 0x6d, 0x76, 0x4d, 0x25, 0x2c, 0x1a, 0x16, 0x67,
|
||||
0x24, 0x3f, 0xe5, 0xe7, 0x4e, 0xb8, 0x9c, 0xf1, 0x17, 0x3f, 0x81, 0x3e, 0x1c, 0x8d, 0x9e, 0x03,
|
||||
0xf9, 0x85, 0x23, 0x8b, 0x7c, 0x6f, 0x57, 0xd2, 0x77, 0x70, 0xf2, 0x5b, 0xdc, 0x0d, 0xe8, 0xd7,
|
||||
0x94, 0x6e, 0x9f, 0xcd, 0x33, 0x1e, 0x1b, 0x8c, 0x3c, 0xb8, 0x3e, 0x2e, 0x3e, 0x44, 0x9b, 0x3f,
|
||||
0x11, 0x90, 0x6b, 0x7e, 0xe5, 0x56, 0x6c, 0xaa, 0x1a, 0x5b, 0x9c, 0xfa, 0x4d, 0xc8, 0xf1, 0xaa,
|
||||
0x17, 0xfb, 0xa9, 0x67, 0xc2, 0x27, 0x74, 0x1f, 0x7a, 0x39, 0x0b, 0x4d, 0x61, 0xd9, 0x0b, 0x5b,
|
||||
0xfb, 0xe5, 0x24, 0xdc, 0xd7, 0xf4, 0x39, 0xc4, 0x5a, 0x1c, 0x4a, 0xcf, 0x87, 0x05, 0xad, 0xb5,
|
||||
0x38, 0x7c, 0x77, 0xd2, 0x0b, 0x48, 0x9c, 0xb4, 0x1f, 0x50, 0x8f, 0x2c, 0xf6, 0x9a, 0xf3, 0xfe,
|
||||
0x70, 0x98, 0x5e, 0x40, 0xbc, 0xd3, 0xe2, 0xb6, 0xc5, 0xce, 0xb2, 0x24, 0x68, 0xff, 0xf1, 0xe6,
|
||||
0x35, 0xa4, 0xb3, 0x34, 0x6e, 0x44, 0x9f, 0xc7, 0xb0, 0x28, 0x23, 0x6e, 0xc4, 0x80, 0x36, 0x9f,
|
||||
0x20, 0xe6, 0x68, 0x7a, 0xd5, 0x19, 0x74, 0xf7, 0x30, 0x56, 0xd8, 0xc1, 0x94, 0x95, 0x92, 0x21,
|
||||
0xcb, 0x19, 0x87, 0x40, 0x5d, 0x2a, 0x89, 0xf7, 0x87, 0x25, 0xc7, 0xc3, 0xde, 0xac, 0xfc, 0x5f,
|
||||
0xf9, 0xfe, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x58, 0xe7, 0xdc, 0xa4, 0x02, 0x00, 0x00,
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package forwarding;
|
||||
|
||||
message Request {
|
||||
// Not used right now but reserving in case it turns out that streaming
|
||||
// makes things more economical on the gRPC side
|
||||
//uint64 id = 1;
|
||||
string method = 2;
|
||||
URL url = 3;
|
||||
map<string, HeaderEntry> header_entries = 4;
|
||||
bytes body = 5;
|
||||
string host = 6;
|
||||
string remote_addr = 7;
|
||||
repeated bytes peer_certificates = 8;
|
||||
}
|
||||
|
||||
message URL {
|
||||
string scheme = 1;
|
||||
string opaque = 2;
|
||||
// This isn't needed now but might be in the future, so we'll skip the
|
||||
// number to keep the ordering in net/url
|
||||
//UserInfo user = 3;
|
||||
string host = 4;
|
||||
string path = 5;
|
||||
string raw_path = 6;
|
||||
// This also isn't needed right now, but we'll reserve the number
|
||||
//bool force_query = 7;
|
||||
string raw_query = 8;
|
||||
string fragment = 9;
|
||||
}
|
||||
|
||||
message HeaderEntry {
|
||||
repeated string values = 1;
|
||||
}
|
||||
|
||||
message Response {
|
||||
// Not used right now but reserving in case it turns out that streaming
|
||||
// makes things more economical on the gRPC side
|
||||
//uint64 id = 1;
|
||||
uint32 status_code = 2;
|
||||
bytes body = 3;
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package requestutil
|
||||
package forwarding
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
@ -24,8 +24,39 @@ func (b bufCloser) Close() error {
|
|||
|
||||
// GenerateForwardedRequest generates a new http.Request that contains the
|
||||
// original requests's information in the new request's body.
|
||||
func GenerateForwardedRequest(req *http.Request, addr string) (*http.Request, error) {
|
||||
fq := ForwardedRequest{
|
||||
func GenerateForwardedHTTPRequest(req *http.Request, addr string) (*http.Request, error) {
|
||||
fq, err := GenerateForwardedRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var newBody []byte
|
||||
switch os.Getenv("VAULT_MESSAGE_TYPE") {
|
||||
case "json":
|
||||
newBody, err = jsonutil.EncodeJSON(fq)
|
||||
case "json_compress":
|
||||
newBody, err = jsonutil.EncodeJSONAndCompress(fq, &compressutil.CompressionConfig{
|
||||
Type: compressutil.CompressionTypeLzw,
|
||||
})
|
||||
case "proto3":
|
||||
fallthrough
|
||||
default:
|
||||
newBody, err = proto.Marshal(fq)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret, err := http.NewRequest("POST", addr, bytes.NewBuffer(newBody))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func GenerateForwardedRequest(req *http.Request) (*Request, error) {
|
||||
fq := Request{
|
||||
Method: req.Method,
|
||||
HeaderEntries: make(map[string]*HeaderEntry, len(req.Header)),
|
||||
Host: req.Host,
|
||||
|
@ -49,13 +80,6 @@ func GenerateForwardedRequest(req *http.Request, addr string) (*http.Request, er
|
|||
}
|
||||
}
|
||||
|
||||
if req.TLS != nil && req.TLS.PeerCertificates != nil && len(req.TLS.PeerCertificates) > 0 {
|
||||
fq.PeerCertificates = make([][]byte, len(req.TLS.PeerCertificates))
|
||||
for i, cert := range req.TLS.PeerCertificates {
|
||||
fq.PeerCertificates[i] = cert.Raw
|
||||
}
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
_, err := buf.ReadFrom(req.Body)
|
||||
if err != nil {
|
||||
|
@ -63,44 +87,27 @@ func GenerateForwardedRequest(req *http.Request, addr string) (*http.Request, er
|
|||
}
|
||||
fq.Body = buf.Bytes()
|
||||
|
||||
var newBody []byte
|
||||
switch os.Getenv("VAULT_MESSAGE_TYPE") {
|
||||
case "json":
|
||||
newBody, err = jsonutil.EncodeJSON(&fq)
|
||||
case "json_compress":
|
||||
newBody, err = jsonutil.EncodeJSONAndCompress(&fq, &compressutil.CompressionConfig{
|
||||
Type: compressutil.CompressionTypeLzw,
|
||||
})
|
||||
case "proto3":
|
||||
fallthrough
|
||||
default:
|
||||
newBody, err = proto.Marshal(&fq)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if req.TLS != nil && req.TLS.PeerCertificates != nil && len(req.TLS.PeerCertificates) > 0 {
|
||||
fq.PeerCertificates = make([][]byte, len(req.TLS.PeerCertificates))
|
||||
for i, cert := range req.TLS.PeerCertificates {
|
||||
fq.PeerCertificates[i] = cert.Raw
|
||||
}
|
||||
}
|
||||
|
||||
ret, err := http.NewRequest("POST", addr, bytes.NewBuffer(newBody))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
return &fq, nil
|
||||
}
|
||||
|
||||
// ParseForwardedRequest generates a new http.Request that is comprised of the
|
||||
// values in the given request's body, assuming it correctly parses into a
|
||||
// ForwardedRequest.
|
||||
func ParseForwardedRequest(req *http.Request) (*http.Request, error) {
|
||||
buf := bufCloser{
|
||||
Buffer: bytes.NewBuffer(nil),
|
||||
}
|
||||
func ParseForwardedHTTPRequest(req *http.Request) (*http.Request, error) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
_, err := buf.ReadFrom(req.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fq := new(ForwardedRequest)
|
||||
fq := new(Request)
|
||||
switch os.Getenv("VAULT_MESSAGE_TYPE") {
|
||||
case "json", "json_compress":
|
||||
err = jsonutil.DecodeJSON(buf.Bytes(), fq)
|
||||
|
@ -111,10 +118,12 @@ func ParseForwardedRequest(req *http.Request) (*http.Request, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
_, err = buf.Write(fq.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return ParseForwardedRequest(fq)
|
||||
}
|
||||
|
||||
func ParseForwardedRequest(fq *Request) (*http.Request, error) {
|
||||
buf := bufCloser{
|
||||
Buffer: bytes.NewBuffer(fq.Body),
|
||||
}
|
||||
|
||||
ret := &http.Request{
|
||||
|
@ -154,3 +163,41 @@ func ParseForwardedRequest(req *http.Request) (*http.Request, error) {
|
|||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
type RPCResponseWriter struct {
|
||||
statusCode int
|
||||
header http.Header
|
||||
body *bytes.Buffer
|
||||
}
|
||||
|
||||
// NewRPCResponseWriter returns an initialized RPCResponseWriter
|
||||
func NewRPCResponseWriter() *RPCResponseWriter {
|
||||
w := &RPCResponseWriter{
|
||||
header: make(http.Header),
|
||||
body: new(bytes.Buffer),
|
||||
statusCode: 200,
|
||||
}
|
||||
//w.header.Set("Content-Type", "application/octet-stream")
|
||||
return w
|
||||
}
|
||||
|
||||
func (w *RPCResponseWriter) Header() http.Header {
|
||||
return w.header
|
||||
}
|
||||
|
||||
func (w *RPCResponseWriter) Write(buf []byte) (int, error) {
|
||||
w.body.Write(buf)
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
func (w *RPCResponseWriter) WriteHeader(code int) {
|
||||
w.statusCode = code
|
||||
}
|
||||
|
||||
func (w *RPCResponseWriter) StatusCode() int {
|
||||
return w.statusCode
|
||||
}
|
||||
|
||||
func (w *RPCResponseWriter) Body() *bytes.Buffer {
|
||||
return w.body
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package requestutil
|
||||
package forwarding
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
|
@ -69,7 +69,7 @@ func testForwardedRequestGenerateParse(t testing.TB) int64 {
|
|||
}
|
||||
|
||||
// Generate the request with the forwarded request in the body
|
||||
req, err = GenerateForwardedRequest(initialReq, "https://bloopety.bloop:8201")
|
||||
req, err = GenerateForwardedHTTPRequest(initialReq, "https://bloopety.bloop:8201")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ func testForwardedRequestGenerateParse(t testing.TB) int64 {
|
|||
}
|
||||
|
||||
// Now extract the forwarded request to generate a final request for processing
|
||||
finalReq, err := ParseForwardedRequest(intreq)
|
||||
finalReq, err := ParseForwardedHTTPRequest(intreq)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
|
@ -1,126 +0,0 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: forwarding_types.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package requestutil is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
forwarding_types.proto
|
||||
|
||||
It has these top-level messages:
|
||||
ForwardedRequest
|
||||
URL
|
||||
HeaderEntry
|
||||
*/
|
||||
package requestutil
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type ForwardedRequest struct {
|
||||
Method string `protobuf:"bytes,1,opt,name=method" json:"method,omitempty"`
|
||||
Url *URL `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"`
|
||||
HeaderEntries map[string]*HeaderEntry `protobuf:"bytes,3,rep,name=header_entries,json=headerEntries" json:"header_entries,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
Body []byte `protobuf:"bytes,4,opt,name=body,proto3" json:"body,omitempty"`
|
||||
Host string `protobuf:"bytes,5,opt,name=host" json:"host,omitempty"`
|
||||
RemoteAddr string `protobuf:"bytes,6,opt,name=remote_addr,json=remoteAddr" json:"remote_addr,omitempty"`
|
||||
PeerCertificates [][]byte `protobuf:"bytes,7,rep,name=peer_certificates,json=peerCertificates,proto3" json:"peer_certificates,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ForwardedRequest) Reset() { *m = ForwardedRequest{} }
|
||||
func (m *ForwardedRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ForwardedRequest) ProtoMessage() {}
|
||||
func (*ForwardedRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (m *ForwardedRequest) GetUrl() *URL {
|
||||
if m != nil {
|
||||
return m.Url
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ForwardedRequest) GetHeaderEntries() map[string]*HeaderEntry {
|
||||
if m != nil {
|
||||
return m.HeaderEntries
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type URL struct {
|
||||
Scheme string `protobuf:"bytes,1,opt,name=scheme" json:"scheme,omitempty"`
|
||||
Opaque string `protobuf:"bytes,2,opt,name=opaque" json:"opaque,omitempty"`
|
||||
// This isn't needed now but might be in the future, so we'll skip the
|
||||
// number to keep the ordering in net/url
|
||||
// UserInfo user = 3;
|
||||
Host string `protobuf:"bytes,4,opt,name=host" json:"host,omitempty"`
|
||||
Path string `protobuf:"bytes,5,opt,name=path" json:"path,omitempty"`
|
||||
RawPath string `protobuf:"bytes,6,opt,name=raw_path,json=rawPath" json:"raw_path,omitempty"`
|
||||
// This also isn't needed right now, but we'll reserve the number
|
||||
// bool force_query = 7;
|
||||
RawQuery string `protobuf:"bytes,8,opt,name=raw_query,json=rawQuery" json:"raw_query,omitempty"`
|
||||
Fragment string `protobuf:"bytes,9,opt,name=fragment" json:"fragment,omitempty"`
|
||||
}
|
||||
|
||||
func (m *URL) Reset() { *m = URL{} }
|
||||
func (m *URL) String() string { return proto.CompactTextString(m) }
|
||||
func (*URL) ProtoMessage() {}
|
||||
func (*URL) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
|
||||
type HeaderEntry struct {
|
||||
Values []string `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
|
||||
}
|
||||
|
||||
func (m *HeaderEntry) Reset() { *m = HeaderEntry{} }
|
||||
func (m *HeaderEntry) String() string { return proto.CompactTextString(m) }
|
||||
func (*HeaderEntry) ProtoMessage() {}
|
||||
func (*HeaderEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ForwardedRequest)(nil), "requestutil.ForwardedRequest")
|
||||
proto.RegisterType((*URL)(nil), "requestutil.URL")
|
||||
proto.RegisterType((*HeaderEntry)(nil), "requestutil.HeaderEntry")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("forwarding_types.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 390 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x64, 0x52, 0xc1, 0x8a, 0xd4, 0x40,
|
||||
0x10, 0x25, 0x93, 0xec, 0xec, 0xa4, 0xb2, 0x4a, 0xec, 0xc3, 0xd2, 0xae, 0x07, 0x43, 0x40, 0x08,
|
||||
0x08, 0x41, 0xd6, 0x8b, 0x78, 0x13, 0x51, 0x3c, 0xec, 0x41, 0x1b, 0x16, 0xc1, 0x4b, 0xe8, 0x9d,
|
||||
0xae, 0x99, 0x04, 0x27, 0xd3, 0x99, 0x4a, 0xc7, 0x21, 0x9f, 0xe5, 0xe7, 0x79, 0x93, 0xee, 0xce,
|
||||
0x68, 0x46, 0x4f, 0xa9, 0xf7, 0x5e, 0x55, 0xa5, 0xde, 0xa3, 0xe1, 0x7a, 0xa3, 0xe9, 0x28, 0x49,
|
||||
0x35, 0xfb, 0x6d, 0x65, 0xc6, 0x0e, 0xfb, 0xb2, 0x23, 0x6d, 0x34, 0x4b, 0x08, 0x0f, 0x03, 0xf6,
|
||||
0x66, 0x30, 0xcd, 0x2e, 0xff, 0xb5, 0x80, 0xf4, 0xa3, 0xef, 0x43, 0x25, 0xbc, 0xc0, 0xae, 0x61,
|
||||
0xd9, 0xa2, 0xa9, 0xb5, 0xe2, 0x41, 0x16, 0x14, 0xb1, 0x98, 0x10, 0xcb, 0x21, 0x1c, 0x68, 0xc7,
|
||||
0x17, 0x59, 0x50, 0x24, 0xb7, 0x69, 0x39, 0xdb, 0x53, 0xde, 0x8b, 0x3b, 0x61, 0x45, 0xf6, 0x15,
|
||||
0x1e, 0xd7, 0x28, 0x15, 0x52, 0x85, 0x7b, 0x43, 0x0d, 0xf6, 0x3c, 0xcc, 0xc2, 0x22, 0xb9, 0x7d,
|
||||
0x75, 0xd6, 0xfe, 0xef, 0x2f, 0xcb, 0x4f, 0x6e, 0xe6, 0x83, 0x1f, 0xb1, 0x9f, 0x51, 0x3c, 0xaa,
|
||||
0xe7, 0x1c, 0x63, 0x10, 0x3d, 0x68, 0x35, 0xf2, 0x28, 0x0b, 0x8a, 0x2b, 0xe1, 0x6a, 0xcb, 0xd5,
|
||||
0xba, 0x37, 0xfc, 0xc2, 0x9d, 0xe9, 0x6a, 0xf6, 0x1c, 0x12, 0xc2, 0x56, 0x1b, 0xac, 0xa4, 0x52,
|
||||
0xc4, 0x97, 0x4e, 0x02, 0x4f, 0xbd, 0x53, 0x8a, 0xd8, 0x4b, 0x78, 0xd2, 0x21, 0x52, 0xb5, 0x46,
|
||||
0x32, 0xcd, 0xa6, 0x59, 0x4b, 0x83, 0x3d, 0xbf, 0xcc, 0xc2, 0xe2, 0x4a, 0xa4, 0x56, 0x78, 0x3f,
|
||||
0xe3, 0x6f, 0xbe, 0x01, 0xfb, 0xff, 0x34, 0x96, 0x42, 0xf8, 0x1d, 0xc7, 0x29, 0x1d, 0x5b, 0xb2,
|
||||
0x12, 0x2e, 0x7e, 0xc8, 0xdd, 0x80, 0x53, 0x38, 0xfc, 0xcc, 0xed, 0xdf, 0x0d, 0xa3, 0xf0, 0x6d,
|
||||
0x6f, 0x17, 0x6f, 0x82, 0xfc, 0x67, 0x00, 0xe1, 0xbd, 0xb8, 0xb3, 0x71, 0xf7, 0xeb, 0x1a, 0x5b,
|
||||
0x3c, 0xc5, 0xed, 0x91, 0xe5, 0x75, 0x27, 0x0f, 0xd3, 0xd2, 0x58, 0x4c, 0xe8, 0x8f, 0xeb, 0x68,
|
||||
0xe6, 0x9a, 0x41, 0xd4, 0x49, 0x53, 0x9f, 0x92, 0xb0, 0x35, 0x7b, 0x0a, 0x2b, 0x92, 0xc7, 0xca,
|
||||
0xf1, 0x3e, 0x86, 0x4b, 0x92, 0xc7, 0xcf, 0x56, 0x7a, 0x06, 0xb1, 0x95, 0x0e, 0x03, 0xd2, 0xc8,
|
||||
0x57, 0x4e, 0xb3, 0xbd, 0x5f, 0x2c, 0x66, 0x37, 0xb0, 0xda, 0x90, 0xdc, 0xb6, 0xb8, 0x37, 0x3c,
|
||||
0xf6, 0xda, 0x09, 0xe7, 0x2f, 0x20, 0x99, 0xb9, 0xb1, 0x27, 0x3a, 0x3f, 0x3d, 0x0f, 0xb2, 0xd0,
|
||||
0x9e, 0xe8, 0xd1, 0xc3, 0xd2, 0x3d, 0xb5, 0xd7, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf0, 0xb1,
|
||||
0x66, 0x16, 0x84, 0x02, 0x00, 0x00,
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package requestutil;
|
||||
|
||||
message ForwardedRequest {
|
||||
string method = 1;
|
||||
URL url = 2;
|
||||
map<string, HeaderEntry> header_entries = 3;
|
||||
bytes body = 4;
|
||||
string host = 5;
|
||||
string remote_addr = 6;
|
||||
repeated bytes peer_certificates = 7;
|
||||
}
|
||||
|
||||
message URL {
|
||||
string scheme = 1;
|
||||
string opaque = 2;
|
||||
// This isn't needed now but might be in the future, so we'll skip the
|
||||
// number to keep the ordering in net/url
|
||||
//UserInfo user = 3;
|
||||
string host = 4;
|
||||
string path = 5;
|
||||
string raw_path = 6;
|
||||
// This also isn't needed right now, but we'll reserve the number
|
||||
//bool force_query = 7;
|
||||
string raw_query = 8;
|
||||
string fragment = 9;
|
||||
}
|
||||
|
||||
message HeaderEntry {
|
||||
repeated string values = 1;
|
||||
}
|
|
@ -8,12 +8,15 @@ import (
|
|||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/vault/api"
|
||||
credCert "github.com/hashicorp/vault/builtin/credential/cert"
|
||||
|
@ -139,9 +142,23 @@ func TestHTTP_Fallback_Disabled(t *testing.T) {
|
|||
// This function recreates the fuzzy testing from transit to pipe a large
|
||||
// number of requests from the standbys to the active node.
|
||||
func TestHTTP_Forwarding_Stress(t *testing.T) {
|
||||
testHTTP_Forwarding_Stress_Common(t, false, false, 50)
|
||||
testHTTP_Forwarding_Stress_Common(t, false, true, 50)
|
||||
testHTTP_Forwarding_Stress_Common(t, true, false, 50)
|
||||
testHTTP_Forwarding_Stress_Common(t, true, true, 50)
|
||||
os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "")
|
||||
}
|
||||
|
||||
func testHTTP_Forwarding_Stress_Common(t *testing.T, rpc, parallel bool, num uint64) {
|
||||
testPlaintext := "the quick brown fox"
|
||||
testPlaintextB64 := "dGhlIHF1aWNrIGJyb3duIGZveA=="
|
||||
|
||||
if rpc {
|
||||
os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "1")
|
||||
} else {
|
||||
os.Setenv("VAULT_USE_GRPC_REQUEST_FORWARDING", "")
|
||||
}
|
||||
|
||||
handler1 := http.NewServeMux()
|
||||
handler2 := http.NewServeMux()
|
||||
handler3 := http.NewServeMux()
|
||||
|
@ -178,8 +195,10 @@ func TestHTTP_Forwarding_Stress(t *testing.T) {
|
|||
fmt.Sprintf("https://127.0.0.1:%d/v1/transit/", cores[2].Listeners[0].Address.Port),
|
||||
}
|
||||
|
||||
transport := cleanhttp.DefaultPooledTransport()
|
||||
transport.TLSClientConfig = cores[0].TLSConfig
|
||||
transport := &http.Transport{
|
||||
TLSClientConfig: cores[0].TLSConfig,
|
||||
}
|
||||
http2.ConfigureTransport(transport)
|
||||
|
||||
client := &http.Client{
|
||||
Transport: transport,
|
||||
|
@ -201,33 +220,43 @@ func TestHTTP_Forwarding_Stress(t *testing.T) {
|
|||
}
|
||||
//core.Logger().Printf("[TRACE] done mounting transit")
|
||||
|
||||
var totalOps int64
|
||||
var successfulOps int64
|
||||
var totalOps uint64
|
||||
var successfulOps uint64
|
||||
var key1ver int64 = 1
|
||||
var key2ver int64 = 1
|
||||
var key3ver int64 = 1
|
||||
var numWorkers uint64 = 50
|
||||
var numWorkersStarted uint64
|
||||
var waitLock sync.Mutex
|
||||
waitCond := sync.NewCond(&waitLock)
|
||||
|
||||
// This is the goroutine loop
|
||||
doFuzzy := func(id int) {
|
||||
doFuzzy := func(id int, parallel bool) {
|
||||
var myTotalOps uint64
|
||||
var mySuccessfulOps uint64
|
||||
var keyVer int64 = 1
|
||||
// Check for panics, otherwise notify we're done
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
core.Logger().Printf("[ERR] got a panic: %v", err)
|
||||
t.Fail()
|
||||
}
|
||||
atomic.AddUint64(&totalOps, myTotalOps)
|
||||
atomic.AddUint64(&successfulOps, mySuccessfulOps)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
// Holds the latest encrypted value for each key
|
||||
latestEncryptedText := map[string]string{}
|
||||
|
||||
startTime := time.Now()
|
||||
client := &http.Client{
|
||||
Transport: transport,
|
||||
}
|
||||
|
||||
var chosenFunc, chosenKey, chosenHost string
|
||||
|
||||
myRand := rand.New(rand.NewSource(int64(id) * 400))
|
||||
|
||||
doReq := func(method, url string, body io.Reader) (*http.Response, error) {
|
||||
req, err := http.NewRequest(method, url, body)
|
||||
if err != nil {
|
||||
|
@ -253,7 +282,7 @@ func TestHTTP_Forwarding_Stress(t *testing.T) {
|
|||
}
|
||||
|
||||
result := &api.Response{Response: resp}
|
||||
err = result.Error()
|
||||
err := result.Error()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -269,26 +298,49 @@ func TestHTTP_Forwarding_Stress(t *testing.T) {
|
|||
for _, chosenHost := range hosts {
|
||||
for _, chosenKey := range keys {
|
||||
// Try to write the key to make sure it exists
|
||||
_, err := doReq("POST", chosenHost+"keys/"+chosenKey, bytes.NewBuffer([]byte("{}")))
|
||||
_, err := doReq("POST", chosenHost+"keys/"+fmt.Sprintf("%s-%t", chosenKey, parallel), bytes.NewBuffer([]byte("{}")))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//core.Logger().Printf("[TRACE] Starting %d", id)
|
||||
if !parallel {
|
||||
chosenHost = hosts[id%len(hosts)]
|
||||
chosenKey = fmt.Sprintf("key-%t-%d", parallel, id)
|
||||
|
||||
_, err := doReq("POST", chosenHost+"keys/"+chosenKey, bytes.NewBuffer([]byte("{}")))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
atomic.AddUint64(&numWorkersStarted, 1)
|
||||
|
||||
waitCond.L.Lock()
|
||||
for atomic.LoadUint64(&numWorkersStarted) != numWorkers {
|
||||
waitCond.Wait()
|
||||
}
|
||||
waitCond.L.Unlock()
|
||||
waitCond.Broadcast()
|
||||
|
||||
core.Logger().Printf("[TRACE] Starting %d", id)
|
||||
|
||||
startTime := time.Now()
|
||||
for {
|
||||
// Stop after 10 seconds
|
||||
if time.Now().Sub(startTime) > 10*time.Second {
|
||||
return
|
||||
}
|
||||
|
||||
atomic.AddInt64(&totalOps, 1)
|
||||
myTotalOps++
|
||||
|
||||
// Pick a function and a key
|
||||
chosenFunc = funcs[rand.Int()%len(funcs)]
|
||||
chosenKey = keys[rand.Int()%len(keys)]
|
||||
chosenHost = hosts[rand.Int()%len(hosts)]
|
||||
chosenFunc = funcs[myRand.Int()%len(funcs)]
|
||||
if parallel {
|
||||
chosenKey = fmt.Sprintf("%s-%t", keys[myRand.Int()%len(keys)], parallel)
|
||||
chosenHost = hosts[myRand.Int()%len(hosts)]
|
||||
}
|
||||
|
||||
switch chosenFunc {
|
||||
// Encrypt our plaintext and store the result
|
||||
|
@ -310,13 +362,13 @@ func TestHTTP_Forwarding_Stress(t *testing.T) {
|
|||
}
|
||||
latestEncryptedText[chosenKey] = secret.Data["ciphertext"].(string)
|
||||
|
||||
atomic.AddInt64(&successfulOps, 1)
|
||||
mySuccessfulOps++
|
||||
|
||||
// Decrypt the ciphertext and compare the result
|
||||
case "decrypt":
|
||||
ct := latestEncryptedText[chosenKey]
|
||||
if ct == "" {
|
||||
atomic.AddInt64(&successfulOps, 1)
|
||||
mySuccessfulOps++
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -330,7 +382,7 @@ func TestHTTP_Forwarding_Stress(t *testing.T) {
|
|||
if err != nil {
|
||||
// This could well happen since the min version is jumping around
|
||||
if strings.Contains(err.Error(), transit.ErrTooOld) {
|
||||
atomic.AddInt64(&successfulOps, 1)
|
||||
mySuccessfulOps++
|
||||
continue
|
||||
}
|
||||
panic(err)
|
||||
|
@ -345,7 +397,7 @@ func TestHTTP_Forwarding_Stress(t *testing.T) {
|
|||
panic(fmt.Errorf("got bad plaintext back: %s", pt))
|
||||
}
|
||||
|
||||
atomic.AddInt64(&successfulOps, 1)
|
||||
mySuccessfulOps++
|
||||
|
||||
// Rotate to a new key version
|
||||
case "rotate":
|
||||
|
@ -354,29 +406,36 @@ func TestHTTP_Forwarding_Stress(t *testing.T) {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
switch chosenKey {
|
||||
case "test1":
|
||||
atomic.AddInt64(&key1ver, 1)
|
||||
case "test2":
|
||||
atomic.AddInt64(&key2ver, 1)
|
||||
case "test3":
|
||||
atomic.AddInt64(&key3ver, 1)
|
||||
if parallel {
|
||||
switch chosenKey {
|
||||
case "test1":
|
||||
atomic.AddInt64(&key1ver, 1)
|
||||
case "test2":
|
||||
atomic.AddInt64(&key2ver, 1)
|
||||
case "test3":
|
||||
atomic.AddInt64(&key3ver, 1)
|
||||
}
|
||||
} else {
|
||||
keyVer++
|
||||
}
|
||||
atomic.AddInt64(&successfulOps, 1)
|
||||
|
||||
mySuccessfulOps++
|
||||
|
||||
// Change the min version, which also tests the archive functionality
|
||||
case "change_min_version":
|
||||
var latestVersion int64
|
||||
switch chosenKey {
|
||||
case "test1":
|
||||
latestVersion = atomic.LoadInt64(&key1ver)
|
||||
case "test2":
|
||||
latestVersion = atomic.LoadInt64(&key2ver)
|
||||
case "test3":
|
||||
latestVersion = atomic.LoadInt64(&key3ver)
|
||||
var latestVersion int64 = keyVer
|
||||
if parallel {
|
||||
switch chosenKey {
|
||||
case "test1":
|
||||
latestVersion = atomic.LoadInt64(&key1ver)
|
||||
case "test2":
|
||||
latestVersion = atomic.LoadInt64(&key2ver)
|
||||
case "test3":
|
||||
latestVersion = atomic.LoadInt64(&key3ver)
|
||||
}
|
||||
}
|
||||
|
||||
setVersion := (rand.Int63() % latestVersion) + 1
|
||||
setVersion := (myRand.Int63() % latestVersion) + 1
|
||||
|
||||
//core.Logger().Printf("[TRACE] %s, %s, %d, new min version %d", chosenFunc, chosenKey, id, setVersion)
|
||||
|
||||
|
@ -385,25 +444,27 @@ func TestHTTP_Forwarding_Stress(t *testing.T) {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
atomic.AddInt64(&successfulOps, 1)
|
||||
mySuccessfulOps++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Spawn 20 of these workers for 10 seconds
|
||||
for i := 0; i < 20; i++ {
|
||||
atomic.StoreUint64(&numWorkers, num)
|
||||
|
||||
// Spawn some of these workers for 10 seconds
|
||||
for i := 0; i < int(atomic.LoadUint64(&numWorkers)); i++ {
|
||||
wg.Add(1)
|
||||
//core.Logger().Printf("[TRACE] spawning %d", i)
|
||||
go doFuzzy(i)
|
||||
go doFuzzy(i+1, parallel)
|
||||
}
|
||||
|
||||
// Wait for them all to finish
|
||||
wg.Wait()
|
||||
|
||||
core.Logger().Printf("[TRACE] total operations tried: %d, total successful: %d", totalOps, successfulOps)
|
||||
if totalOps != successfulOps {
|
||||
t.Fatalf("total/successful ops mismatch: %d/%d", totalOps, successfulOps)
|
||||
if totalOps == 0 || totalOps != successfulOps {
|
||||
t.Fatalf("total/successful ops zero or mismatch: %d/%d; rpc: %t, parallel: %t, num %d", totalOps, successfulOps, rpc, parallel, num)
|
||||
}
|
||||
t.Logf("total operations tried: %d, total successful: %d; rpc: %t, parallel: %t, num %d", totalOps, successfulOps, rpc, parallel, num)
|
||||
}
|
||||
|
||||
// This tests TLS connection state forwarding by ensuring that we can use a
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -137,7 +136,7 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle
|
|||
// Attempt forwarding the request. If we cannot forward -- perhaps it's
|
||||
// been disabled on the active node -- this will return with an
|
||||
// ErrCannotForward and we simply fall back
|
||||
resp, err := core.ForwardRequest(r)
|
||||
statusCode, retBytes, err := core.ForwardRequest(r)
|
||||
if err != nil {
|
||||
if err == vault.ErrCannotForward {
|
||||
core.Logger().Printf("[TRACE] http/handleRequestForwarding: cannot forward (possibly disabled on active node), falling back")
|
||||
|
@ -149,20 +148,9 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle
|
|||
handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Read the body into a buffer so we can write it back out to the
|
||||
// original requestor
|
||||
buf := bytes.NewBuffer(nil)
|
||||
_, err = buf.ReadFrom(resp.Body)
|
||||
if err != nil {
|
||||
core.Logger().Printf("[ERR] http/handleRequestForwarding: error reading response body: %v", err)
|
||||
respondError(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
w.Write(buf.Bytes())
|
||||
w.WriteHeader(statusCode)
|
||||
w.Write(retBytes)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
|
363
vault/cluster.go
363
vault/cluster.go
|
@ -20,8 +20,8 @@ import (
|
|||
"golang.org/x/net/http2"
|
||||
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/helper/forwarding"
|
||||
"github.com/hashicorp/vault/helper/jsonutil"
|
||||
"github.com/hashicorp/vault/helper/requestutil"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -46,7 +46,7 @@ type clusterKeyParams struct {
|
|||
}
|
||||
|
||||
type activeConnection struct {
|
||||
*http.Client
|
||||
transport *http2.Transport
|
||||
clusterAddr string
|
||||
}
|
||||
|
||||
|
@ -86,21 +86,26 @@ func (c *Core) Cluster() (*Cluster, error) {
|
|||
return &cluster, nil
|
||||
}
|
||||
|
||||
// This is idempotent, so we return nil if there is no entry yet (say, because
|
||||
// the active node has not yet generated this)
|
||||
// This sets our local cluster cert and private key based on the advertisement.
|
||||
// It also ensures the cert is in our local cluster cert pool.
|
||||
func (c *Core) loadClusterTLS(adv activeAdvertisement) error {
|
||||
c.clusterParamsLock.Lock()
|
||||
defer c.clusterParamsLock.Unlock()
|
||||
|
||||
switch {
|
||||
case adv.ClusterKeyParams.X == nil, adv.ClusterKeyParams.Y == nil, adv.ClusterKeyParams.D == nil:
|
||||
c.logger.Printf("[ERR] core/loadClusterPrivateKey: failed to parse local cluster key due to missing params")
|
||||
return fmt.Errorf("failed to parse local cluster key")
|
||||
|
||||
case adv.ClusterKeyParams.Type == corePrivateKeyTypeP521:
|
||||
// Nothing, this is what we want
|
||||
|
||||
default:
|
||||
c.logger.Printf("[ERR] core/loadClusterPrivateKey: unknown local cluster key type %v", adv.ClusterKeyParams.Type)
|
||||
return fmt.Errorf("failed to find valid local cluster key type")
|
||||
}
|
||||
|
||||
// Prevent data races with the TLS parameters
|
||||
c.clusterParamsLock.Lock()
|
||||
defer c.clusterParamsLock.Unlock()
|
||||
|
||||
c.localClusterPrivateKey = &ecdsa.PrivateKey{
|
||||
PublicKey: ecdsa.PublicKey{
|
||||
Curve: elliptic.P521(),
|
||||
|
@ -127,9 +132,6 @@ func (c *Core) loadClusterTLS(adv activeAdvertisement) error {
|
|||
// Entries will be created only if they are not already present. If clusterName
|
||||
// is not supplied, this method will auto-generate it.
|
||||
func (c *Core) setupCluster() error {
|
||||
c.clusterParamsLock.Lock()
|
||||
defer c.clusterParamsLock.Unlock()
|
||||
|
||||
// Check if storage index is already present or not
|
||||
cluster, err := c.Cluster()
|
||||
if err != nil {
|
||||
|
@ -173,58 +175,65 @@ func (c *Core) setupCluster() error {
|
|||
modified = true
|
||||
}
|
||||
|
||||
// Create a private key
|
||||
{
|
||||
c.logger.Printf("[TRACE] core: generating cluster private key")
|
||||
key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to generate local cluster key: %v", err)
|
||||
return err
|
||||
// If we're using HA, generate server-to-server parameters
|
||||
if c.ha != nil {
|
||||
// Prevent data races with the TLS parameters
|
||||
c.clusterParamsLock.Lock()
|
||||
defer c.clusterParamsLock.Unlock()
|
||||
|
||||
// Create a private key
|
||||
{
|
||||
c.logger.Printf("[TRACE] core: generating cluster private key")
|
||||
key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to generate local cluster key: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
c.localClusterPrivateKey = key
|
||||
}
|
||||
|
||||
c.localClusterPrivateKey = key
|
||||
}
|
||||
// Create a certificate
|
||||
{
|
||||
c.logger.Printf("[TRACE] core: generating local cluster certificate")
|
||||
|
||||
// Create a certificate
|
||||
{
|
||||
c.logger.Printf("[TRACE] core: generating local cluster certificate")
|
||||
host, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
return err
|
||||
template := &x509.Certificate{
|
||||
Subject: pkix.Name{
|
||||
CommonName: host,
|
||||
},
|
||||
DNSNames: []string{host},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{
|
||||
x509.ExtKeyUsageServerAuth,
|
||||
x509.ExtKeyUsageClientAuth,
|
||||
},
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign,
|
||||
SerialNumber: big.NewInt(mathrand.Int63()),
|
||||
NotBefore: time.Now().Add(-30 * time.Second),
|
||||
// 30 years of single-active uptime ought to be enough for anybody
|
||||
NotAfter: time.Now().Add(262980 * time.Hour),
|
||||
BasicConstraintsValid: true,
|
||||
IsCA: true,
|
||||
}
|
||||
|
||||
certBytes, err := x509.CreateCertificate(rand.Reader, template, template, c.localClusterPrivateKey.Public(), c.localClusterPrivateKey)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: error generating self-signed cert: %v", err)
|
||||
return fmt.Errorf("unable to generate local cluster certificate: %v", err)
|
||||
}
|
||||
|
||||
_, err = x509.ParseCertificate(certBytes)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: error parsing self-signed cert: %v", err)
|
||||
return fmt.Errorf("error parsing generated certificate: %v", err)
|
||||
}
|
||||
|
||||
c.localClusterCert = certBytes
|
||||
}
|
||||
|
||||
template := &x509.Certificate{
|
||||
Subject: pkix.Name{
|
||||
CommonName: host,
|
||||
},
|
||||
DNSNames: []string{host},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{
|
||||
x509.ExtKeyUsageServerAuth,
|
||||
x509.ExtKeyUsageClientAuth,
|
||||
},
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign,
|
||||
SerialNumber: big.NewInt(mathrand.Int63()),
|
||||
NotBefore: time.Now().Add(-30 * time.Second),
|
||||
// 30 years of single-active uptime ought to be enough for anybody
|
||||
NotAfter: time.Now().Add(262980 * time.Hour),
|
||||
BasicConstraintsValid: true,
|
||||
IsCA: true,
|
||||
}
|
||||
|
||||
certBytes, err := x509.CreateCertificate(rand.Reader, template, template, c.localClusterPrivateKey.Public(), c.localClusterPrivateKey)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: error generating self-signed cert: %v", err)
|
||||
return fmt.Errorf("unable to generate local cluster certificate: %v", err)
|
||||
}
|
||||
|
||||
_, err = x509.ParseCertificate(certBytes)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: error parsing self-signed cert: %v", err)
|
||||
return fmt.Errorf("error parsing generated certificate: %v", err)
|
||||
}
|
||||
|
||||
c.localClusterCert = certBytes
|
||||
}
|
||||
|
||||
if modified {
|
||||
|
@ -249,96 +258,66 @@ func (c *Core) setupCluster() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// SetClusterListenerSetupFunc sets the listener setup func, which is used to
|
||||
// know which ports to listen on and a handler to use.
|
||||
func (c *Core) SetClusterListenerSetupFunc(setupFunc func() ([]net.Listener, http.Handler, error)) {
|
||||
c.clusterListenerSetupFunc = setupFunc
|
||||
// SetClusterSetupFuncs sets the handler setup func
|
||||
func (c *Core) SetClusterSetupFuncs(handler func() (http.Handler, http.Handler)) {
|
||||
c.clusterHandlerSetupFunc = handler
|
||||
}
|
||||
|
||||
// startClusterListener starts cluster request listeners during postunseal. It
|
||||
// is assumed that the state lock is held while this is run.
|
||||
// is assumed that the state lock is held while this is run. Right now this
|
||||
// only starts forwarding listeners; it's TBD whether other request types will
|
||||
// be built in the same mechanism or started independently.
|
||||
func (c *Core) startClusterListener() error {
|
||||
if c.clusterListenerShutdownCh != nil {
|
||||
c.logger.Printf("[ERR] core/startClusterListener: attempt to set up cluster listeners when already set up")
|
||||
return fmt.Errorf("cluster listeners already setup")
|
||||
}
|
||||
|
||||
if c.clusterListenerSetupFunc == nil {
|
||||
c.logger.Printf("[ERR] core/startClusterListener: cluster listener setup function has not been set")
|
||||
return fmt.Errorf("cluster listener setup function has not been set")
|
||||
if c.clusterHandlerSetupFunc == nil {
|
||||
c.logger.Printf("[ERR] core/startClusterListener: cluster handler setup function has not been set")
|
||||
return fmt.Errorf("cluster handler setup function has not been set")
|
||||
}
|
||||
|
||||
if c.clusterAddr == "" {
|
||||
c.logger.Printf("[TRACE] core/startClusterListener: clustering disabled, starting listeners")
|
||||
c.logger.Printf("[TRACE] core/startClusterListener: clustering disabled, not starting listeners")
|
||||
return nil
|
||||
}
|
||||
|
||||
if c.clusterListenerAddrs == nil || len(c.clusterListenerAddrs) == 0 {
|
||||
c.logger.Printf("[TRACE] core/startClusterListener: clustering not disabled but no addresses to listen on")
|
||||
return fmt.Errorf("cluster addresses not found")
|
||||
}
|
||||
|
||||
c.logger.Printf("[TRACE] core/startClusterListener: starting listeners")
|
||||
|
||||
lns, handler, err := c.clusterListenerSetupFunc()
|
||||
err := c.startForwarding()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tlsConfig, err := c.ClusterTLSConfig()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/startClusterListener: failed to get tls configuration: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
tlsLns := make([]net.Listener, 0, len(lns))
|
||||
for _, ln := range lns {
|
||||
tlsLn := tls.NewListener(ln, tlsConfig)
|
||||
tlsLns = append(tlsLns, tlsLn)
|
||||
server := &http.Server{
|
||||
Handler: handler,
|
||||
}
|
||||
http2.ConfigureServer(server, nil)
|
||||
c.logger.Printf("[TRACE] core/startClusterListener: serving cluster requests on %s", tlsLn.Addr())
|
||||
go server.Serve(tlsLn)
|
||||
}
|
||||
|
||||
c.clusterListenerShutdownCh = make(chan struct{})
|
||||
c.clusterListenerShutdownSuccessCh = make(chan struct{})
|
||||
|
||||
go func() {
|
||||
<-c.clusterListenerShutdownCh
|
||||
c.logger.Printf("[TRACE] core/startClusterListener: shutting down listeners")
|
||||
for _, tlsLn := range tlsLns {
|
||||
tlsLn.Close()
|
||||
}
|
||||
close(c.clusterListenerShutdownSuccessCh)
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// stopClusterListener stops any existing listeners during preseal. It is
|
||||
// assumed that the state lock is held while this is run.
|
||||
func (c *Core) stopClusterListener() {
|
||||
c.logger.Printf("[TRACE] core/stopClusterListener: stopping listeners")
|
||||
if c.clusterListenerShutdownCh != nil {
|
||||
close(c.clusterListenerShutdownCh)
|
||||
defer func() { c.clusterListenerShutdownCh = nil }()
|
||||
if c.clusterAddr == "" {
|
||||
c.logger.Printf("[TRACE] core/stopClusterListener: clustering disabled, nothing to do")
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Printf("[TRACE] core/stopClusterListener: stopping listeners")
|
||||
|
||||
// Tell the goroutine managing the listeners to perform the shutdown
|
||||
// process
|
||||
c.clusterListenerShutdownCh <- struct{}{}
|
||||
|
||||
// The reason for this loop-de-loop is that we may be unsealing again
|
||||
// quickly, and if the listeners are not yet closed, we will get socket
|
||||
// bind errors. This ensures proper ordering.
|
||||
if c.clusterListenerShutdownSuccessCh == nil {
|
||||
return
|
||||
}
|
||||
c.logger.Printf("[TRACE] core/stopClusterListener: waiting for success notification")
|
||||
<-c.clusterListenerShutdownSuccessCh
|
||||
defer func() { c.clusterListenerShutdownSuccessCh = nil }()
|
||||
c.logger.Printf("[TRACE] core/stopClusterListener: success")
|
||||
}
|
||||
|
||||
// ClusterTLSConfig generates a TLS configuration based on the local cluster
|
||||
// key and cert. This isn't called often and we lock because the CertPool is
|
||||
// not concurrency-safe.
|
||||
// key and cert.
|
||||
func (c *Core) ClusterTLSConfig() (*tls.Config, error) {
|
||||
c.clusterParamsLock.Lock()
|
||||
defer c.clusterParamsLock.Unlock()
|
||||
|
||||
cluster, err := c.Cluster()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -346,6 +325,11 @@ func (c *Core) ClusterTLSConfig() (*tls.Config, error) {
|
|||
if cluster == nil {
|
||||
return nil, fmt.Errorf("cluster information is nil")
|
||||
}
|
||||
|
||||
// Prevent data races with the TLS parameters
|
||||
c.clusterParamsLock.Lock()
|
||||
defer c.clusterParamsLock.Unlock()
|
||||
|
||||
if c.localClusterCert == nil || len(c.localClusterCert) == 0 {
|
||||
return nil, fmt.Errorf("cluster certificate is nil")
|
||||
}
|
||||
|
@ -369,131 +353,70 @@ func (c *Core) ClusterTLSConfig() (*tls.Config, error) {
|
|||
ServerName: parsedCert.Subject.CommonName,
|
||||
ClientAuth: tls.RequireAndVerifyClientCert,
|
||||
ClientCAs: c.localClusterCertPool,
|
||||
NextProtos: []string{
|
||||
"h2",
|
||||
},
|
||||
}
|
||||
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
// refreshRequestForwardingConnection ensures that the client/transport are
|
||||
// alive and that the current active address value matches the most
|
||||
// recently-known address.
|
||||
func (c *Core) refreshRequestForwardingConnection(clusterAddr string) error {
|
||||
c.requestForwardingConnectionLock.Lock()
|
||||
defer c.requestForwardingConnectionLock.Unlock()
|
||||
|
||||
// It's nil but we don't have an address anyways, so exit
|
||||
if c.requestForwardingConnection == nil && clusterAddr == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NOTE: We don't fast path the case where we have a connection because the
|
||||
// address is the same, because the cert/key could have changed if the
|
||||
// active node ended up being the same node. Before we hit this function in
|
||||
// Leader() we'll have done a hash on the advertised info to ensure that we
|
||||
// won't hit this function unnecessarily anyways.
|
||||
|
||||
// Disabled, potentially
|
||||
if clusterAddr == "" {
|
||||
c.requestForwardingConnection = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
tlsConfig, err := c.ClusterTLSConfig()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/refreshRequestForwardingConnection: error fetching cluster tls configuration: %v", err)
|
||||
return err
|
||||
}
|
||||
tp := &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
}
|
||||
err = http2.ConfigureTransport(tp)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/refreshRequestForwardingConnection: error configuring transport: %v", err)
|
||||
return err
|
||||
}
|
||||
c.requestForwardingConnection = &activeConnection{
|
||||
Client: &http.Client{
|
||||
Transport: tp,
|
||||
},
|
||||
clusterAddr: clusterAddr,
|
||||
}
|
||||
|
||||
return nil
|
||||
func (c *Core) SetClusterListenerAddrs(addrs []*net.TCPAddr) {
|
||||
c.clusterListenerAddrs = addrs
|
||||
}
|
||||
|
||||
// ForwardRequest forwards a given request to the active node and returns the
|
||||
// response.
|
||||
func (c *Core) ForwardRequest(req *http.Request) (*http.Response, error) {
|
||||
c.requestForwardingConnectionLock.RLock()
|
||||
defer c.requestForwardingConnectionLock.RUnlock()
|
||||
if c.requestForwardingConnection == nil {
|
||||
return nil, ErrCannotForward
|
||||
}
|
||||
// WrapHandlerForClustering takes in Vault's HTTP handler and returns a setup
|
||||
// function that returns both the original handler and one wrapped with cluster
|
||||
// methods
|
||||
func WrapHandlerForClustering(handler http.Handler, logger *log.Logger) func() (http.Handler, http.Handler) {
|
||||
return func() (http.Handler, http.Handler) {
|
||||
// This mux handles cluster functions (right now, only forwarded requests)
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/cluster/local/forwarded-request", func(w http.ResponseWriter, req *http.Request) {
|
||||
freq, err := forwarding.ParseForwardedHTTPRequest(req)
|
||||
if err != nil {
|
||||
if logger != nil {
|
||||
logger.Printf("[ERR] http/ForwardedRequestHandler: error parsing forwarded request: %v", err)
|
||||
}
|
||||
|
||||
if c.requestForwardingConnection.clusterAddr == "" {
|
||||
return nil, ErrCannotForward
|
||||
}
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
freq, err := requestutil.GenerateForwardedRequest(req, c.requestForwardingConnection.clusterAddr+"/cluster/local/forwarded-request")
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/ForwardRequest: error creating forwarded request: %v", err)
|
||||
return nil, fmt.Errorf("error creating forwarding request")
|
||||
}
|
||||
type errorResponse struct {
|
||||
Errors []string
|
||||
}
|
||||
resp := &errorResponse{
|
||||
Errors: []string{
|
||||
err.Error(),
|
||||
},
|
||||
}
|
||||
|
||||
return c.requestForwardingConnection.Do(freq)
|
||||
enc := json.NewEncoder(w)
|
||||
enc.Encode(resp)
|
||||
return
|
||||
}
|
||||
|
||||
// To avoid the risk of a forward loop in some pathological condition,
|
||||
// set the no-forward header
|
||||
freq.Header.Set(IntNoForwardingHeaderName, "true")
|
||||
handler.ServeHTTP(w, freq)
|
||||
})
|
||||
|
||||
return handler, mux
|
||||
}
|
||||
}
|
||||
|
||||
// WrapListenersForClustering takes in Vault's listeners and original HTTP
|
||||
// handler, creates a new handler that handles forwarded requests, and returns
|
||||
// the cluster setup function that creates the new listners and assigns to the
|
||||
// new handler
|
||||
func WrapListenersForClustering(addrs []string, handler http.Handler, logger *log.Logger) func() ([]net.Listener, http.Handler, error) {
|
||||
// This mux handles cluster functions (right now, only forwarded requests)
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/cluster/local/forwarded-request", func(w http.ResponseWriter, req *http.Request) {
|
||||
freq, err := requestutil.ParseForwardedRequest(req)
|
||||
if err != nil {
|
||||
if logger != nil {
|
||||
logger.Printf("[ERR] http/ForwardedRequestHandler: error parsing forwarded request: %v", err)
|
||||
}
|
||||
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
|
||||
type errorResponse struct {
|
||||
Errors []string
|
||||
}
|
||||
resp := &errorResponse{
|
||||
Errors: []string{
|
||||
err.Error(),
|
||||
},
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
enc.Encode(resp)
|
||||
return
|
||||
}
|
||||
|
||||
// To avoid the risk of a forward loop in some pathological condition,
|
||||
// set the no-forward header
|
||||
freq.Header.Set(IntNoForwardingHeaderName, "true")
|
||||
handler.ServeHTTP(w, freq)
|
||||
})
|
||||
|
||||
return func() ([]net.Listener, http.Handler, error) {
|
||||
// WrapListenersForClustering takes in Vault's cluster addresses and returns a
|
||||
// setup function that creates the new listeners
|
||||
func WrapListenersForClustering(addrs []string, logger *log.Logger) func() ([]net.Listener, error) {
|
||||
return func() ([]net.Listener, error) {
|
||||
ret := make([]net.Listener, 0, len(addrs))
|
||||
// Loop over the existing listeners and start listeners on appropriate ports
|
||||
for _, addr := range addrs {
|
||||
ln, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
ret = append(ret, ln)
|
||||
}
|
||||
|
||||
return ret, mux, nil
|
||||
return ret, nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -87,10 +87,18 @@ func TestCluster_ListenForRequests(t *testing.T) {
|
|||
// Wait for core to become active
|
||||
TestWaitActive(t, cores[0].Core)
|
||||
|
||||
// Use this to have a valid config after sealing since ClusterTLSConfig returns nil
|
||||
var lastTLSConfig *tls.Config
|
||||
checkListenersFunc := func(expectFail bool) {
|
||||
tlsConfig, err := cores[0].ClusterTLSConfig()
|
||||
if err != nil && err.Error() != ErrSealed.Error() {
|
||||
t.Fatal(err)
|
||||
if err != nil {
|
||||
if err.Error() != ErrSealed.Error() {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tlsConfig = lastTLSConfig
|
||||
} else {
|
||||
tlsConfig.NextProtos = []string{"h2"}
|
||||
lastTLSConfig = tlsConfig
|
||||
}
|
||||
|
||||
for _, ln := range cores[0].Listeners {
|
||||
|
@ -125,6 +133,7 @@ func TestCluster_ListenForRequests(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
checkListenersFunc(false)
|
||||
|
||||
err := cores[0].StepDown(&logical.Request{
|
||||
|
@ -138,7 +147,7 @@ func TestCluster_ListenForRequests(t *testing.T) {
|
|||
|
||||
// StepDown doesn't wait during actual preSeal so give time for listeners
|
||||
// to close
|
||||
time.Sleep(1 * time.Second)
|
||||
time.Sleep(2 * time.Second)
|
||||
checkListenersFunc(true)
|
||||
|
||||
// After this period it should be active again
|
||||
|
@ -149,7 +158,7 @@ func TestCluster_ListenForRequests(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
time.Sleep(2 * time.Second)
|
||||
// After sealing it should be inactive again
|
||||
checkListenersFunc(true)
|
||||
}
|
||||
|
@ -321,32 +330,26 @@ func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, remoteCoreID
|
|||
}
|
||||
req.Header.Add("X-Vault-Token", c.Root)
|
||||
|
||||
resp, err := c.ForwardRequest(req)
|
||||
statusCode, respBytes, err := c.ForwardRequest(req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if resp == nil {
|
||||
t.Fatal("nil resp")
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body := string(respBytes)
|
||||
|
||||
body := bytes.NewBuffer(nil)
|
||||
body.ReadFrom(resp.Body)
|
||||
|
||||
if body.String() != remoteCoreID {
|
||||
t.Fatalf("expected %s, got %s", remoteCoreID, body.String())
|
||||
if body != remoteCoreID {
|
||||
t.Fatalf("expected %s, got %s", remoteCoreID, body)
|
||||
}
|
||||
switch body.String() {
|
||||
switch body {
|
||||
case "core1":
|
||||
if resp.StatusCode != 201 {
|
||||
if statusCode != 201 {
|
||||
t.Fatal("bad response")
|
||||
}
|
||||
case "core2":
|
||||
if resp.StatusCode != 202 {
|
||||
if statusCode != 202 {
|
||||
t.Fatal("bad response")
|
||||
}
|
||||
case "core3":
|
||||
if resp.StatusCode != 203 {
|
||||
if statusCode != 203 {
|
||||
t.Fatal("bad response")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,6 +16,9 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
|
@ -252,9 +255,10 @@ type Core struct {
|
|||
localClusterCert []byte
|
||||
// The cert pool containing the self-signed CA as a trusted CA
|
||||
localClusterCertPool *x509.CertPool
|
||||
// The setup function that gives us the listeners for the cluster-cluster
|
||||
// connection and the handler to use
|
||||
clusterListenerSetupFunc func() ([]net.Listener, http.Handler, error)
|
||||
// The TCP addresses we should use for clustering
|
||||
clusterListenerAddrs []*net.TCPAddr
|
||||
// The setup function that gives us the handler to use
|
||||
clusterHandlerSetupFunc func() (http.Handler, http.Handler)
|
||||
// Shutdown channel for the cluster listeners
|
||||
clusterListenerShutdownCh chan struct{}
|
||||
// Shutdown success channel. We need this to be done serially to ensure
|
||||
|
@ -271,6 +275,14 @@ type Core struct {
|
|||
// Cache of most recently known active advertisement information, used to
|
||||
// return values when the hash matches
|
||||
clusterActiveAdvertisement activeAdvertisement
|
||||
// The grpc Server that handles server RPC calls
|
||||
rpcServer *grpc.Server
|
||||
// The function for canceling the client connection
|
||||
rpcClientConnCancelFunc context.CancelFunc
|
||||
// The grpc ClientConn for RPC calls
|
||||
rpcClientConn *grpc.ClientConn
|
||||
// The grpc forwarding client
|
||||
rpcForwardingClient ForwardedRequestHandlerClient
|
||||
}
|
||||
|
||||
// CoreConfig is used to parameterize a core
|
||||
|
@ -381,20 +393,22 @@ func NewCore(conf *CoreConfig) (*Core, error) {
|
|||
|
||||
// Setup the core
|
||||
c := &Core{
|
||||
redirectAddr: conf.RedirectAddr,
|
||||
clusterAddr: conf.ClusterAddr,
|
||||
physical: conf.Physical,
|
||||
seal: conf.Seal,
|
||||
barrier: barrier,
|
||||
router: NewRouter(),
|
||||
sealed: true,
|
||||
standby: true,
|
||||
logger: conf.Logger,
|
||||
defaultLeaseTTL: conf.DefaultLeaseTTL,
|
||||
maxLeaseTTL: conf.MaxLeaseTTL,
|
||||
cachingDisabled: conf.DisableCache,
|
||||
clusterName: conf.ClusterName,
|
||||
localClusterCertPool: x509.NewCertPool(),
|
||||
redirectAddr: conf.RedirectAddr,
|
||||
clusterAddr: conf.ClusterAddr,
|
||||
physical: conf.Physical,
|
||||
seal: conf.Seal,
|
||||
barrier: barrier,
|
||||
router: NewRouter(),
|
||||
sealed: true,
|
||||
standby: true,
|
||||
logger: conf.Logger,
|
||||
defaultLeaseTTL: conf.DefaultLeaseTTL,
|
||||
maxLeaseTTL: conf.MaxLeaseTTL,
|
||||
cachingDisabled: conf.DisableCache,
|
||||
clusterName: conf.ClusterName,
|
||||
localClusterCertPool: x509.NewCertPool(),
|
||||
clusterListenerShutdownCh: make(chan struct{}),
|
||||
clusterListenerShutdownSuccessCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
if conf.HAPhysical != nil && conf.HAPhysical.HAEnabled() {
|
||||
|
@ -597,7 +611,7 @@ func (c *Core) Leader() (isLeader bool, leaderAddr string, err error) {
|
|||
c.requestForwardingConnectionLock.Lock()
|
||||
// Verify that the condition hasn't changed
|
||||
if c.requestForwardingConnection != nil {
|
||||
c.requestForwardingConnection.Transport.(*http.Transport).CloseIdleConnections()
|
||||
c.requestForwardingConnection.transport.CloseIdleConnections()
|
||||
}
|
||||
c.requestForwardingConnection = nil
|
||||
c.requestForwardingConnectionLock.Unlock()
|
||||
|
|
|
@ -0,0 +1,122 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: forwarding_service.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package vault is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
forwarding_service.proto
|
||||
|
||||
It has these top-level messages:
|
||||
*/
|
||||
package vault
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import forwarding "github.com/hashicorp/vault/helper/forwarding"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion3
|
||||
|
||||
// Client API for ForwardedRequestHandler service
|
||||
|
||||
type ForwardedRequestHandlerClient interface {
|
||||
HandleRequest(ctx context.Context, in *forwarding.Request, opts ...grpc.CallOption) (*forwarding.Response, error)
|
||||
}
|
||||
|
||||
type forwardedRequestHandlerClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewForwardedRequestHandlerClient(cc *grpc.ClientConn) ForwardedRequestHandlerClient {
|
||||
return &forwardedRequestHandlerClient{cc}
|
||||
}
|
||||
|
||||
func (c *forwardedRequestHandlerClient) HandleRequest(ctx context.Context, in *forwarding.Request, opts ...grpc.CallOption) (*forwarding.Response, error) {
|
||||
out := new(forwarding.Response)
|
||||
err := grpc.Invoke(ctx, "/vault.ForwardedRequestHandler/HandleRequest", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for ForwardedRequestHandler service
|
||||
|
||||
type ForwardedRequestHandlerServer interface {
|
||||
HandleRequest(context.Context, *forwarding.Request) (*forwarding.Response, error)
|
||||
}
|
||||
|
||||
func RegisterForwardedRequestHandlerServer(s *grpc.Server, srv ForwardedRequestHandlerServer) {
|
||||
s.RegisterService(&_ForwardedRequestHandler_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _ForwardedRequestHandler_HandleRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(forwarding.Request)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ForwardedRequestHandlerServer).HandleRequest(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/vault.ForwardedRequestHandler/HandleRequest",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ForwardedRequestHandlerServer).HandleRequest(ctx, req.(*forwarding.Request))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _ForwardedRequestHandler_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "vault.ForwardedRequestHandler",
|
||||
HandlerType: (*ForwardedRequestHandlerServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "HandleRequest",
|
||||
Handler: _ForwardedRequestHandler_HandleRequest_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: fileDescriptor0,
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("forwarding_service.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 156 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x92, 0x48, 0xcb, 0x2f, 0x2a,
|
||||
0x4f, 0x2c, 0x4a, 0xc9, 0xcc, 0x4b, 0x8f, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b,
|
||||
0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4b, 0x2c, 0xcd, 0x29, 0x91, 0xb2, 0x48, 0xcf, 0x2c,
|
||||
0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0xcf, 0x48, 0x2c, 0xce, 0xc8, 0x4c, 0xce, 0x2f,
|
||||
0x2a, 0xd0, 0x07, 0xcb, 0xe9, 0x67, 0xa4, 0xe6, 0x14, 0xa4, 0x16, 0xe9, 0x23, 0x8c, 0xd0, 0x2f,
|
||||
0xa9, 0x2c, 0x48, 0x2d, 0x86, 0x18, 0x60, 0x14, 0xce, 0x25, 0xee, 0x06, 0x91, 0x49, 0x4d, 0x09,
|
||||
0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0xf1, 0x48, 0xcc, 0x4b, 0xc9, 0x49, 0x2d, 0x12, 0xb2, 0xe1,
|
||||
0xe2, 0x85, 0x30, 0xa1, 0xe2, 0x42, 0xc2, 0x7a, 0x08, 0x43, 0xf4, 0xa0, 0x82, 0x52, 0x22, 0xa8,
|
||||
0x82, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x4a, 0x0c, 0x49, 0x6c, 0x60, 0xf3, 0x8d, 0x01, 0x01,
|
||||
0x00, 0x00, 0xff, 0xff, 0x4e, 0x32, 0x79, 0x01, 0xbc, 0x00, 0x00, 0x00,
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
syntax = "proto3";
|
||||
|
||||
import "github.com/hashicorp/vault/helper/forwarding/types.proto";
|
||||
|
||||
package vault;
|
||||
|
||||
service ForwardedRequestHandler {
|
||||
rpc HandleRequest(forwarding.Request) returns (forwarding.Response) {}
|
||||
}
|
|
@ -0,0 +1,349 @@
|
|||
package vault
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/helper/forwarding"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
const (
|
||||
clusterListenerAcceptDeadline = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
// Starts the listeners and servers necessary to handle forwarded requests
|
||||
func (c *Core) startForwarding() error {
|
||||
// Clean up in case we have transitioned from a client to a server
|
||||
c.requestForwardingConnection = nil
|
||||
c.rpcForwardingClient = nil
|
||||
if c.rpcClientConnCancelFunc != nil {
|
||||
c.rpcClientConnCancelFunc()
|
||||
c.rpcClientConnCancelFunc = nil
|
||||
}
|
||||
if c.rpcClientConn != nil {
|
||||
c.rpcClientConn.Close()
|
||||
c.rpcClientConn = nil
|
||||
}
|
||||
|
||||
// Get our base handler (for our RPC server) and our wrapped handler (for
|
||||
// straight HTTP/2 forwarding)
|
||||
baseHandler, wrappedHandler := c.clusterHandlerSetupFunc()
|
||||
|
||||
// Get our TLS config
|
||||
tlsConfig, err := c.ClusterTLSConfig()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/startClusterListener: failed to get tls configuration: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// The server supports all of the possible protos
|
||||
tlsConfig.NextProtos = []string{"h2", "req_fw_sb-act_v1"}
|
||||
|
||||
// Create our RPC server and register the request handler server
|
||||
c.rpcServer = grpc.NewServer()
|
||||
RegisterForwardedRequestHandlerServer(c.rpcServer, &forwardedRequestRPCServer{
|
||||
core: c,
|
||||
handler: baseHandler,
|
||||
})
|
||||
|
||||
// Create the HTTP/2 server that will be shared by both RPC and regular
|
||||
// duties. Doing it this way instead of listening via the server and gRPC
|
||||
// allows us to re-use the same port via ALPN. We can just tell the server
|
||||
// to serve a given conn and which handler to use.
|
||||
fws := &http2.Server{}
|
||||
|
||||
// Shutdown coordination logic
|
||||
var shutdown uint32
|
||||
shutdownWg := &sync.WaitGroup{}
|
||||
|
||||
for _, addr := range c.clusterListenerAddrs {
|
||||
shutdownWg.Add(1)
|
||||
|
||||
// Force a local resolution to avoid data races
|
||||
laddr := addr
|
||||
|
||||
// Start our listening loop
|
||||
go func() {
|
||||
defer shutdownWg.Done()
|
||||
|
||||
c.logger.Printf("[TRACE] core/startClusterListener: starting listener")
|
||||
|
||||
// Create a TCP listener. We do this separately and specifically
|
||||
// with TCP so that we can set deadlines.
|
||||
tcpLn, err := net.ListenTCP("tcp", laddr)
|
||||
if err != nil {
|
||||
c.logger.Printf("[TRACE] core/startClusterListener: error starting listener: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Wrap the listener with TLS
|
||||
tlsLn := tls.NewListener(tcpLn, tlsConfig)
|
||||
|
||||
c.logger.Printf("[TRACE] core/startClusterListener: serving cluster requests on %s", tlsLn.Addr())
|
||||
|
||||
for {
|
||||
if atomic.LoadUint32(&shutdown) > 0 {
|
||||
tlsLn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
// Set the deadline for the accept call. If it passes we'll get
|
||||
// an error, causing us to check the condition at the top
|
||||
// again.
|
||||
tcpLn.SetDeadline(time.Now().Add(clusterListenerAcceptDeadline))
|
||||
|
||||
// Accept the connection
|
||||
conn, err := tlsLn.Accept()
|
||||
if err != nil {
|
||||
if conn != nil {
|
||||
conn.Close()
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Type assert to TLS connection and handshake to populate the
|
||||
// connection state
|
||||
tlsConn := conn.(*tls.Conn)
|
||||
err = tlsConn.Handshake()
|
||||
if err != nil {
|
||||
c.logger.Printf("[TRACE] core/startClusterListener/Accept: error handshaking: %v", err)
|
||||
if conn != nil {
|
||||
conn.Close()
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch tlsConn.ConnectionState().NegotiatedProtocol {
|
||||
case "h2":
|
||||
c.logger.Printf("[TRACE] core/startClusterListener/Accept: got h2 connection")
|
||||
go fws.ServeConn(conn, &http2.ServeConnOpts{
|
||||
Handler: wrappedHandler,
|
||||
})
|
||||
|
||||
case "req_fw_sb-act_v1":
|
||||
c.logger.Printf("[TRACE] core/startClusterListener/Accept: got req_fw_sb-act_v1 connection")
|
||||
go fws.ServeConn(conn, &http2.ServeConnOpts{
|
||||
Handler: c.rpcServer,
|
||||
})
|
||||
|
||||
default:
|
||||
c.logger.Printf("[TRACE] core/startClusterListener/Accept: unknown negotiated protocol")
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// This is in its own goroutine so that we don't block the main thread, and
|
||||
// thus we use atomic and channels to coordinate
|
||||
go func() {
|
||||
// If we get told to shut down...
|
||||
<-c.clusterListenerShutdownCh
|
||||
|
||||
// Stop the RPC server
|
||||
c.rpcServer.Stop()
|
||||
c.logger.Printf("[TRACE] core/startClusterListener: shutting down listeners")
|
||||
|
||||
// Set the shutdown flag. This will cause the listeners to shut down
|
||||
// within the deadline in clusterListenerAcceptDeadline
|
||||
atomic.StoreUint32(&shutdown, 1)
|
||||
|
||||
// Wait for them all to shut down
|
||||
shutdownWg.Wait()
|
||||
c.logger.Printf("[TRACE] core/startClusterListener: listeners successfully shut down")
|
||||
|
||||
// Tell the main thread that shutdown is done.
|
||||
c.clusterListenerShutdownSuccessCh <- struct{}{}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// refreshRequestForwardingConnection ensures that the client/transport are
|
||||
// alive and that the current active address value matches the most
|
||||
// recently-known address.
|
||||
func (c *Core) refreshRequestForwardingConnection(clusterAddr string) error {
|
||||
c.requestForwardingConnectionLock.Lock()
|
||||
defer c.requestForwardingConnectionLock.Unlock()
|
||||
|
||||
// It's nil but we don't have an address anyways, so exit
|
||||
if c.requestForwardingConnection == nil && clusterAddr == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NOTE: We don't fast path the case where we have a connection because the
|
||||
// address is the same, because the cert/key could have changed if the
|
||||
// active node ended up being the same node. Before we hit this function in
|
||||
// Leader() we'll have done a hash on the advertised info to ensure that we
|
||||
// won't hit this function unnecessarily anyways.
|
||||
|
||||
// Disabled, potentially, so clean up anything that might be around.
|
||||
if clusterAddr == "" {
|
||||
c.requestForwardingConnection = nil
|
||||
c.rpcForwardingClient = nil
|
||||
if c.rpcClientConnCancelFunc != nil {
|
||||
c.rpcClientConnCancelFunc()
|
||||
c.rpcClientConnCancelFunc = nil
|
||||
}
|
||||
if c.rpcClientConn != nil {
|
||||
c.rpcClientConn.Close()
|
||||
c.rpcClientConn = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
clusterURL, err := url.Parse(clusterAddr)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/refreshRequestForwardingConnection: error parsing cluster address: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
switch os.Getenv("VAULT_USE_GRPC_REQUEST_FORWARDING") {
|
||||
case "":
|
||||
// Set up normal HTTP forwarding handling
|
||||
tlsConfig, err := c.ClusterTLSConfig()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/refreshRequestForwardingConnection: error fetching cluster tls configuration: %v", err)
|
||||
return err
|
||||
}
|
||||
tp := &http2.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
}
|
||||
c.requestForwardingConnection = &activeConnection{
|
||||
transport: tp,
|
||||
clusterAddr: clusterAddr,
|
||||
}
|
||||
|
||||
default:
|
||||
// Set up grpc forwarding handling
|
||||
// It's not really insecure, but we have to dial manually to get the
|
||||
// ALPN header right. It's just "insecure" because GRPC isn't managing
|
||||
// the TLS state.
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
c.rpcClientConnCancelFunc = cancelFunc
|
||||
c.rpcClientConn, err = grpc.DialContext(ctx, clusterURL.Host, grpc.WithDialer(c.getGRPCDialer()), grpc.WithInsecure())
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/refreshRequestForwardingConnection: err setting up rpc client: %v", err)
|
||||
return err
|
||||
}
|
||||
c.rpcForwardingClient = NewForwardedRequestHandlerClient(c.rpcClientConn)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForwardRequest forwards a given request to the active node and returns the
|
||||
// response.
|
||||
func (c *Core) ForwardRequest(req *http.Request) (int, []byte, error) {
|
||||
c.requestForwardingConnectionLock.RLock()
|
||||
defer c.requestForwardingConnectionLock.RUnlock()
|
||||
|
||||
switch os.Getenv("VAULT_USE_GRPC_REQUEST_FORWARDING") {
|
||||
case "":
|
||||
if c.requestForwardingConnection == nil {
|
||||
return 0, nil, ErrCannotForward
|
||||
}
|
||||
|
||||
if c.requestForwardingConnection.clusterAddr == "" {
|
||||
return 0, nil, ErrCannotForward
|
||||
}
|
||||
|
||||
freq, err := forwarding.GenerateForwardedHTTPRequest(req, c.requestForwardingConnection.clusterAddr+"/cluster/local/forwarded-request")
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/ForwardRequest: error creating forwarded request: %v", err)
|
||||
return 0, nil, fmt.Errorf("error creating forwarding request")
|
||||
}
|
||||
|
||||
//resp, err := c.requestForwardingConnection.Do(freq)
|
||||
resp, err := c.requestForwardingConnection.transport.RoundTrip(freq)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Read the body into a buffer so we can write it back out to the
|
||||
// original requestor
|
||||
buf := bytes.NewBuffer(nil)
|
||||
_, err = buf.ReadFrom(resp.Body)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
return resp.StatusCode, buf.Bytes(), nil
|
||||
|
||||
default:
|
||||
if c.rpcForwardingClient == nil {
|
||||
return 0, nil, ErrCannotForward
|
||||
}
|
||||
|
||||
freq, err := forwarding.GenerateForwardedRequest(req)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/ForwardRequest: error creating forwarding RPC request: %v", err)
|
||||
return 0, nil, fmt.Errorf("error creating forwarding RPC request")
|
||||
}
|
||||
if freq == nil {
|
||||
c.logger.Printf("[ERR] core/ForwardRequest: got nil forwarding RPC request")
|
||||
return 0, nil, fmt.Errorf("got nil forwarding RPC request")
|
||||
}
|
||||
resp, err := c.rpcForwardingClient.HandleRequest(context.Background(), freq, grpc.FailFast(true))
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/ForwardRequest: error during forwarded RPC request: %v", err)
|
||||
return 0, nil, fmt.Errorf("error during forwarding RPC request")
|
||||
}
|
||||
return int(resp.StatusCode), resp.Body, nil
|
||||
}
|
||||
}
|
||||
|
||||
// getGRPCDialer is used to return a dialer that has the correct TLS
|
||||
// configuration. Otherwise gRPC tries to be helpful and stomps all over our
|
||||
// NextProtos.
|
||||
func (c *Core) getGRPCDialer() func(string, time.Duration) (net.Conn, error) {
|
||||
return func(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
tlsConfig, err := c.ClusterTLSConfig()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/getGRPCDialer: failed to get tls configuration: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
tlsConfig.NextProtos = []string{"req_fw_sb-act_v1"}
|
||||
dialer := &net.Dialer{
|
||||
Timeout: timeout,
|
||||
}
|
||||
return tls.DialWithDialer(dialer, "tcp", addr, tlsConfig)
|
||||
}
|
||||
}
|
||||
|
||||
type forwardedRequestRPCServer struct {
|
||||
core *Core
|
||||
handler http.Handler
|
||||
}
|
||||
|
||||
func (s *forwardedRequestRPCServer) HandleRequest(ctx context.Context, freq *forwarding.Request) (*forwarding.Response, error) {
|
||||
// Parse an http.Request out of it
|
||||
req, err := forwarding.ParseForwardedRequest(freq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// A very dummy response writer that doesn't follow normal semantics, just
|
||||
// lets you write a status code (last written wins) and a body. But it
|
||||
// meets the interface requirements.
|
||||
w := forwarding.NewRPCResponseWriter()
|
||||
|
||||
s.handler.ServeHTTP(w, req)
|
||||
|
||||
return &forwarding.Response{
|
||||
StatusCode: uint32(w.StatusCode()),
|
||||
Body: w.Body().Bytes(),
|
||||
}, nil
|
||||
}
|
|
@ -135,11 +135,12 @@ func TestCoreWithSeal(t *testing.T, testSeal Seal) *Core {
|
|||
// TestCoreInit initializes the core with a single key, and returns
|
||||
// the key that must be used to unseal the core and a root token.
|
||||
func TestCoreInit(t *testing.T, core *Core) ([]byte, string) {
|
||||
return TestCoreInitClusterListenerSetup(t, core, func() ([]net.Listener, http.Handler, error) { return nil, nil, nil })
|
||||
return TestCoreInitClusterWrapperSetup(t, core, nil, func() (http.Handler, http.Handler) { return nil, nil })
|
||||
}
|
||||
|
||||
func TestCoreInitClusterListenerSetup(t *testing.T, core *Core, setupFunc func() ([]net.Listener, http.Handler, error)) ([]byte, string) {
|
||||
core.SetClusterListenerSetupFunc(setupFunc)
|
||||
func TestCoreInitClusterWrapperSetup(t *testing.T, core *Core, clusterAddrs []*net.TCPAddr, handlerSetupFunc func() (http.Handler, http.Handler)) ([]byte, string) {
|
||||
core.SetClusterListenerAddrs(clusterAddrs)
|
||||
core.SetClusterSetupFuncs(handlerSetupFunc)
|
||||
result, err := core.Initialize(&SealConfig{
|
||||
SecretShares: 1,
|
||||
SecretThreshold: 1,
|
||||
|
@ -151,7 +152,7 @@ func TestCoreInitClusterListenerSetup(t *testing.T, core *Core, setupFunc func()
|
|||
}
|
||||
|
||||
func TestCoreUnseal(core *Core, key []byte) (bool, error) {
|
||||
core.SetClusterListenerSetupFunc(func() ([]net.Listener, http.Handler, error) { return nil, nil, nil })
|
||||
core.SetClusterSetupFuncs(func() (http.Handler, http.Handler) { return nil, nil })
|
||||
return core.Unseal(key)
|
||||
}
|
||||
|
||||
|
@ -640,22 +641,22 @@ func TestCluster(t *testing.T, handlers []http.Handler, base *CoreConfig, unseal
|
|||
//
|
||||
// Clustering setup
|
||||
//
|
||||
clusterAddrGen := func(lns []*TestListener) []string {
|
||||
ret := make([]string, len(lns))
|
||||
clusterAddrGen := func(lns []*TestListener) []*net.TCPAddr {
|
||||
ret := make([]*net.TCPAddr, len(lns))
|
||||
for i, ln := range lns {
|
||||
curAddr := ln.Address
|
||||
ipStr := curAddr.IP.String()
|
||||
if len(curAddr.IP) == net.IPv6len {
|
||||
ipStr = fmt.Sprintf("[%s]", ipStr)
|
||||
ret[i] = &net.TCPAddr{
|
||||
IP: ln.Address.IP,
|
||||
Port: ln.Address.Port + 1,
|
||||
}
|
||||
ret[i] = fmt.Sprintf("%s:%d", ipStr, curAddr.Port+1)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
c2.SetClusterListenerSetupFunc(WrapListenersForClustering(clusterAddrGen(c2lns), handlers[1], logger))
|
||||
c3.SetClusterListenerSetupFunc(WrapListenersForClustering(clusterAddrGen(c3lns), handlers[2], logger))
|
||||
key, root := TestCoreInitClusterListenerSetup(t, c1, WrapListenersForClustering(clusterAddrGen(c1lns), handlers[0], logger))
|
||||
c2.SetClusterListenerAddrs(clusterAddrGen(c2lns))
|
||||
c2.SetClusterSetupFuncs(WrapHandlerForClustering(handlers[1], logger))
|
||||
c3.SetClusterListenerAddrs(clusterAddrGen(c3lns))
|
||||
c3.SetClusterSetupFuncs(WrapHandlerForClustering(handlers[2], logger))
|
||||
key, root := TestCoreInitClusterWrapperSetup(t, c1, clusterAddrGen(c1lns), WrapHandlerForClustering(handlers[0], logger))
|
||||
if _, err := c1.Unseal(TestKeyCopy(key)); err != nil {
|
||||
t.Fatalf("unseal err: %s", err)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,525 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package timeseries implements a time series structure for stats collection.
|
||||
package timeseries // import "golang.org/x/net/internal/timeseries"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
timeSeriesNumBuckets = 64
|
||||
minuteHourSeriesNumBuckets = 60
|
||||
)
|
||||
|
||||
var timeSeriesResolutions = []time.Duration{
|
||||
1 * time.Second,
|
||||
10 * time.Second,
|
||||
1 * time.Minute,
|
||||
10 * time.Minute,
|
||||
1 * time.Hour,
|
||||
6 * time.Hour,
|
||||
24 * time.Hour, // 1 day
|
||||
7 * 24 * time.Hour, // 1 week
|
||||
4 * 7 * 24 * time.Hour, // 4 weeks
|
||||
16 * 7 * 24 * time.Hour, // 16 weeks
|
||||
}
|
||||
|
||||
var minuteHourSeriesResolutions = []time.Duration{
|
||||
1 * time.Second,
|
||||
1 * time.Minute,
|
||||
}
|
||||
|
||||
// An Observable is a kind of data that can be aggregated in a time series.
|
||||
type Observable interface {
|
||||
Multiply(ratio float64) // Multiplies the data in self by a given ratio
|
||||
Add(other Observable) // Adds the data from a different observation to self
|
||||
Clear() // Clears the observation so it can be reused.
|
||||
CopyFrom(other Observable) // Copies the contents of a given observation to self
|
||||
}
|
||||
|
||||
// Float attaches the methods of Observable to a float64.
|
||||
type Float float64
|
||||
|
||||
// NewFloat returns a Float.
|
||||
func NewFloat() Observable {
|
||||
f := Float(0)
|
||||
return &f
|
||||
}
|
||||
|
||||
// String returns the float as a string.
|
||||
func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
|
||||
|
||||
// Value returns the float's value.
|
||||
func (f *Float) Value() float64 { return float64(*f) }
|
||||
|
||||
func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
|
||||
|
||||
func (f *Float) Add(other Observable) {
|
||||
o := other.(*Float)
|
||||
*f += *o
|
||||
}
|
||||
|
||||
func (f *Float) Clear() { *f = 0 }
|
||||
|
||||
func (f *Float) CopyFrom(other Observable) {
|
||||
o := other.(*Float)
|
||||
*f = *o
|
||||
}
|
||||
|
||||
// A Clock tells the current time.
|
||||
type Clock interface {
|
||||
Time() time.Time
|
||||
}
|
||||
|
||||
type defaultClock int
|
||||
|
||||
var defaultClockInstance defaultClock
|
||||
|
||||
func (defaultClock) Time() time.Time { return time.Now() }
|
||||
|
||||
// Information kept per level. Each level consists of a circular list of
|
||||
// observations. The start of the level may be derived from end and the
|
||||
// len(buckets) * sizeInMillis.
|
||||
type tsLevel struct {
|
||||
oldest int // index to oldest bucketed Observable
|
||||
newest int // index to newest bucketed Observable
|
||||
end time.Time // end timestamp for this level
|
||||
size time.Duration // duration of the bucketed Observable
|
||||
buckets []Observable // collections of observations
|
||||
provider func() Observable // used for creating new Observable
|
||||
}
|
||||
|
||||
func (l *tsLevel) Clear() {
|
||||
l.oldest = 0
|
||||
l.newest = len(l.buckets) - 1
|
||||
l.end = time.Time{}
|
||||
for i := range l.buckets {
|
||||
if l.buckets[i] != nil {
|
||||
l.buckets[i].Clear()
|
||||
l.buckets[i] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
|
||||
l.size = size
|
||||
l.provider = f
|
||||
l.buckets = make([]Observable, numBuckets)
|
||||
}
|
||||
|
||||
// Keeps a sequence of levels. Each level is responsible for storing data at
|
||||
// a given resolution. For example, the first level stores data at a one
|
||||
// minute resolution while the second level stores data at a one hour
|
||||
// resolution.
|
||||
|
||||
// Each level is represented by a sequence of buckets. Each bucket spans an
|
||||
// interval equal to the resolution of the level. New observations are added
|
||||
// to the last bucket.
|
||||
type timeSeries struct {
|
||||
provider func() Observable // make more Observable
|
||||
numBuckets int // number of buckets in each level
|
||||
levels []*tsLevel // levels of bucketed Observable
|
||||
lastAdd time.Time // time of last Observable tracked
|
||||
total Observable // convenient aggregation of all Observable
|
||||
clock Clock // Clock for getting current time
|
||||
pending Observable // observations not yet bucketed
|
||||
pendingTime time.Time // what time are we keeping in pending
|
||||
dirty bool // if there are pending observations
|
||||
}
|
||||
|
||||
// init initializes a level according to the supplied criteria.
|
||||
func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
|
||||
ts.provider = f
|
||||
ts.numBuckets = numBuckets
|
||||
ts.clock = clock
|
||||
ts.levels = make([]*tsLevel, len(resolutions))
|
||||
|
||||
for i := range resolutions {
|
||||
if i > 0 && resolutions[i-1] >= resolutions[i] {
|
||||
log.Print("timeseries: resolutions must be monotonically increasing")
|
||||
break
|
||||
}
|
||||
newLevel := new(tsLevel)
|
||||
newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
|
||||
ts.levels[i] = newLevel
|
||||
}
|
||||
|
||||
ts.Clear()
|
||||
}
|
||||
|
||||
// Clear removes all observations from the time series.
|
||||
func (ts *timeSeries) Clear() {
|
||||
ts.lastAdd = time.Time{}
|
||||
ts.total = ts.resetObservation(ts.total)
|
||||
ts.pending = ts.resetObservation(ts.pending)
|
||||
ts.pendingTime = time.Time{}
|
||||
ts.dirty = false
|
||||
|
||||
for i := range ts.levels {
|
||||
ts.levels[i].Clear()
|
||||
}
|
||||
}
|
||||
|
||||
// Add records an observation at the current time.
|
||||
func (ts *timeSeries) Add(observation Observable) {
|
||||
ts.AddWithTime(observation, ts.clock.Time())
|
||||
}
|
||||
|
||||
// AddWithTime records an observation at the specified time.
|
||||
func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
|
||||
|
||||
smallBucketDuration := ts.levels[0].size
|
||||
|
||||
if t.After(ts.lastAdd) {
|
||||
ts.lastAdd = t
|
||||
}
|
||||
|
||||
if t.After(ts.pendingTime) {
|
||||
ts.advance(t)
|
||||
ts.mergePendingUpdates()
|
||||
ts.pendingTime = ts.levels[0].end
|
||||
ts.pending.CopyFrom(observation)
|
||||
ts.dirty = true
|
||||
} else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
|
||||
// The observation is close enough to go into the pending bucket.
|
||||
// This compensates for clock skewing and small scheduling delays
|
||||
// by letting the update stay in the fast path.
|
||||
ts.pending.Add(observation)
|
||||
ts.dirty = true
|
||||
} else {
|
||||
ts.mergeValue(observation, t)
|
||||
}
|
||||
}
|
||||
|
||||
// mergeValue inserts the observation at the specified time in the past into all levels.
|
||||
func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
|
||||
for _, level := range ts.levels {
|
||||
index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
|
||||
if 0 <= index && index < ts.numBuckets {
|
||||
bucketNumber := (level.oldest + index) % ts.numBuckets
|
||||
if level.buckets[bucketNumber] == nil {
|
||||
level.buckets[bucketNumber] = level.provider()
|
||||
}
|
||||
level.buckets[bucketNumber].Add(observation)
|
||||
}
|
||||
}
|
||||
ts.total.Add(observation)
|
||||
}
|
||||
|
||||
// mergePendingUpdates applies the pending updates into all levels.
|
||||
func (ts *timeSeries) mergePendingUpdates() {
|
||||
if ts.dirty {
|
||||
ts.mergeValue(ts.pending, ts.pendingTime)
|
||||
ts.pending = ts.resetObservation(ts.pending)
|
||||
ts.dirty = false
|
||||
}
|
||||
}
|
||||
|
||||
// advance cycles the buckets at each level until the latest bucket in
|
||||
// each level can hold the time specified.
|
||||
func (ts *timeSeries) advance(t time.Time) {
|
||||
if !t.After(ts.levels[0].end) {
|
||||
return
|
||||
}
|
||||
for i := 0; i < len(ts.levels); i++ {
|
||||
level := ts.levels[i]
|
||||
if !level.end.Before(t) {
|
||||
break
|
||||
}
|
||||
|
||||
// If the time is sufficiently far, just clear the level and advance
|
||||
// directly.
|
||||
if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
|
||||
for _, b := range level.buckets {
|
||||
ts.resetObservation(b)
|
||||
}
|
||||
level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
|
||||
}
|
||||
|
||||
for t.After(level.end) {
|
||||
level.end = level.end.Add(level.size)
|
||||
level.newest = level.oldest
|
||||
level.oldest = (level.oldest + 1) % ts.numBuckets
|
||||
ts.resetObservation(level.buckets[level.newest])
|
||||
}
|
||||
|
||||
t = level.end
|
||||
}
|
||||
}
|
||||
|
||||
// Latest returns the sum of the num latest buckets from the level.
|
||||
func (ts *timeSeries) Latest(level, num int) Observable {
|
||||
now := ts.clock.Time()
|
||||
if ts.levels[0].end.Before(now) {
|
||||
ts.advance(now)
|
||||
}
|
||||
|
||||
ts.mergePendingUpdates()
|
||||
|
||||
result := ts.provider()
|
||||
l := ts.levels[level]
|
||||
index := l.newest
|
||||
|
||||
for i := 0; i < num; i++ {
|
||||
if l.buckets[index] != nil {
|
||||
result.Add(l.buckets[index])
|
||||
}
|
||||
if index == 0 {
|
||||
index = ts.numBuckets
|
||||
}
|
||||
index--
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// LatestBuckets returns a copy of the num latest buckets from level.
|
||||
func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
|
||||
if level < 0 || level > len(ts.levels) {
|
||||
log.Print("timeseries: bad level argument: ", level)
|
||||
return nil
|
||||
}
|
||||
if num < 0 || num >= ts.numBuckets {
|
||||
log.Print("timeseries: bad num argument: ", num)
|
||||
return nil
|
||||
}
|
||||
|
||||
results := make([]Observable, num)
|
||||
now := ts.clock.Time()
|
||||
if ts.levels[0].end.Before(now) {
|
||||
ts.advance(now)
|
||||
}
|
||||
|
||||
ts.mergePendingUpdates()
|
||||
|
||||
l := ts.levels[level]
|
||||
index := l.newest
|
||||
|
||||
for i := 0; i < num; i++ {
|
||||
result := ts.provider()
|
||||
results[i] = result
|
||||
if l.buckets[index] != nil {
|
||||
result.CopyFrom(l.buckets[index])
|
||||
}
|
||||
|
||||
if index == 0 {
|
||||
index = ts.numBuckets
|
||||
}
|
||||
index -= 1
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// ScaleBy updates observations by scaling by factor.
|
||||
func (ts *timeSeries) ScaleBy(factor float64) {
|
||||
for _, l := range ts.levels {
|
||||
for i := 0; i < ts.numBuckets; i++ {
|
||||
l.buckets[i].Multiply(factor)
|
||||
}
|
||||
}
|
||||
|
||||
ts.total.Multiply(factor)
|
||||
ts.pending.Multiply(factor)
|
||||
}
|
||||
|
||||
// Range returns the sum of observations added over the specified time range.
|
||||
// If start or finish times don't fall on bucket boundaries of the same
|
||||
// level, then return values are approximate answers.
|
||||
func (ts *timeSeries) Range(start, finish time.Time) Observable {
|
||||
return ts.ComputeRange(start, finish, 1)[0]
|
||||
}
|
||||
|
||||
// Recent returns the sum of observations from the last delta.
|
||||
func (ts *timeSeries) Recent(delta time.Duration) Observable {
|
||||
now := ts.clock.Time()
|
||||
return ts.Range(now.Add(-delta), now)
|
||||
}
|
||||
|
||||
// Total returns the total of all observations.
|
||||
func (ts *timeSeries) Total() Observable {
|
||||
ts.mergePendingUpdates()
|
||||
return ts.total
|
||||
}
|
||||
|
||||
// ComputeRange computes a specified number of values into a slice using
|
||||
// the observations recorded over the specified time period. The return
|
||||
// values are approximate if the start or finish times don't fall on the
|
||||
// bucket boundaries at the same level or if the number of buckets spanning
|
||||
// the range is not an integral multiple of num.
|
||||
func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
|
||||
if start.After(finish) {
|
||||
log.Printf("timeseries: start > finish, %v>%v", start, finish)
|
||||
return nil
|
||||
}
|
||||
|
||||
if num < 0 {
|
||||
log.Printf("timeseries: num < 0, %v", num)
|
||||
return nil
|
||||
}
|
||||
|
||||
results := make([]Observable, num)
|
||||
|
||||
for _, l := range ts.levels {
|
||||
if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
|
||||
ts.extract(l, start, finish, num, results)
|
||||
return results
|
||||
}
|
||||
}
|
||||
|
||||
// Failed to find a level that covers the desired range. So just
|
||||
// extract from the last level, even if it doesn't cover the entire
|
||||
// desired range.
|
||||
ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// RecentList returns the specified number of values in slice over the most
|
||||
// recent time period of the specified range.
|
||||
func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
|
||||
if delta < 0 {
|
||||
return nil
|
||||
}
|
||||
now := ts.clock.Time()
|
||||
return ts.ComputeRange(now.Add(-delta), now, num)
|
||||
}
|
||||
|
||||
// extract returns a slice of specified number of observations from a given
|
||||
// level over a given range.
|
||||
func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
|
||||
ts.mergePendingUpdates()
|
||||
|
||||
srcInterval := l.size
|
||||
dstInterval := finish.Sub(start) / time.Duration(num)
|
||||
dstStart := start
|
||||
srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
|
||||
|
||||
srcIndex := 0
|
||||
|
||||
// Where should scanning start?
|
||||
if dstStart.After(srcStart) {
|
||||
advance := dstStart.Sub(srcStart) / srcInterval
|
||||
srcIndex += int(advance)
|
||||
srcStart = srcStart.Add(advance * srcInterval)
|
||||
}
|
||||
|
||||
// The i'th value is computed as show below.
|
||||
// interval = (finish/start)/num
|
||||
// i'th value = sum of observation in range
|
||||
// [ start + i * interval,
|
||||
// start + (i + 1) * interval )
|
||||
for i := 0; i < num; i++ {
|
||||
results[i] = ts.resetObservation(results[i])
|
||||
dstEnd := dstStart.Add(dstInterval)
|
||||
for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
|
||||
srcEnd := srcStart.Add(srcInterval)
|
||||
if srcEnd.After(ts.lastAdd) {
|
||||
srcEnd = ts.lastAdd
|
||||
}
|
||||
|
||||
if !srcEnd.Before(dstStart) {
|
||||
srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
|
||||
if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
|
||||
// dst completely contains src.
|
||||
if srcValue != nil {
|
||||
results[i].Add(srcValue)
|
||||
}
|
||||
} else {
|
||||
// dst partially overlaps src.
|
||||
overlapStart := maxTime(srcStart, dstStart)
|
||||
overlapEnd := minTime(srcEnd, dstEnd)
|
||||
base := srcEnd.Sub(srcStart)
|
||||
fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
|
||||
|
||||
used := ts.provider()
|
||||
if srcValue != nil {
|
||||
used.CopyFrom(srcValue)
|
||||
}
|
||||
used.Multiply(fraction)
|
||||
results[i].Add(used)
|
||||
}
|
||||
|
||||
if srcEnd.After(dstEnd) {
|
||||
break
|
||||
}
|
||||
}
|
||||
srcIndex++
|
||||
srcStart = srcStart.Add(srcInterval)
|
||||
}
|
||||
dstStart = dstStart.Add(dstInterval)
|
||||
}
|
||||
}
|
||||
|
||||
// resetObservation clears the content so the struct may be reused.
|
||||
func (ts *timeSeries) resetObservation(observation Observable) Observable {
|
||||
if observation == nil {
|
||||
observation = ts.provider()
|
||||
} else {
|
||||
observation.Clear()
|
||||
}
|
||||
return observation
|
||||
}
|
||||
|
||||
// TimeSeries tracks data at granularities from 1 second to 16 weeks.
|
||||
type TimeSeries struct {
|
||||
timeSeries
|
||||
}
|
||||
|
||||
// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
|
||||
func NewTimeSeries(f func() Observable) *TimeSeries {
|
||||
return NewTimeSeriesWithClock(f, defaultClockInstance)
|
||||
}
|
||||
|
||||
// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
|
||||
// assigning timestamps.
|
||||
func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
|
||||
ts := new(TimeSeries)
|
||||
ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
|
||||
return ts
|
||||
}
|
||||
|
||||
// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
|
||||
type MinuteHourSeries struct {
|
||||
timeSeries
|
||||
}
|
||||
|
||||
// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
|
||||
func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
|
||||
return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
|
||||
}
|
||||
|
||||
// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
|
||||
// assigning timestamps.
|
||||
func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
|
||||
ts := new(MinuteHourSeries)
|
||||
ts.timeSeries.init(minuteHourSeriesResolutions, f,
|
||||
minuteHourSeriesNumBuckets, clock)
|
||||
return ts
|
||||
}
|
||||
|
||||
func (ts *MinuteHourSeries) Minute() Observable {
|
||||
return ts.timeSeries.Latest(0, 60)
|
||||
}
|
||||
|
||||
func (ts *MinuteHourSeries) Hour() Observable {
|
||||
return ts.timeSeries.Latest(1, 60)
|
||||
}
|
||||
|
||||
func minTime(a, b time.Time) time.Time {
|
||||
if a.Before(b) {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func maxTime(a, b time.Time) time.Time {
|
||||
if a.After(b) {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
|
@ -0,0 +1,524 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package trace
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
)
|
||||
|
||||
var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{
|
||||
"elapsed": elapsed,
|
||||
"trimSpace": strings.TrimSpace,
|
||||
}).Parse(eventsHTML))
|
||||
|
||||
const maxEventsPerLog = 100
|
||||
|
||||
type bucket struct {
|
||||
MaxErrAge time.Duration
|
||||
String string
|
||||
}
|
||||
|
||||
var buckets = []bucket{
|
||||
{0, "total"},
|
||||
{10 * time.Second, "errs<10s"},
|
||||
{1 * time.Minute, "errs<1m"},
|
||||
{10 * time.Minute, "errs<10m"},
|
||||
{1 * time.Hour, "errs<1h"},
|
||||
{10 * time.Hour, "errs<10h"},
|
||||
{24000 * time.Hour, "errors"},
|
||||
}
|
||||
|
||||
// RenderEvents renders the HTML page typically served at /debug/events.
|
||||
// It does not do any auth checking; see AuthRequest for the default auth check
|
||||
// used by the handler registered on http.DefaultServeMux.
|
||||
// req may be nil.
|
||||
func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
|
||||
now := time.Now()
|
||||
data := &struct {
|
||||
Families []string // family names
|
||||
Buckets []bucket
|
||||
Counts [][]int // eventLog count per family/bucket
|
||||
|
||||
// Set when a bucket has been selected.
|
||||
Family string
|
||||
Bucket int
|
||||
EventLogs eventLogs
|
||||
Expanded bool
|
||||
}{
|
||||
Buckets: buckets,
|
||||
}
|
||||
|
||||
data.Families = make([]string, 0, len(families))
|
||||
famMu.RLock()
|
||||
for name := range families {
|
||||
data.Families = append(data.Families, name)
|
||||
}
|
||||
famMu.RUnlock()
|
||||
sort.Strings(data.Families)
|
||||
|
||||
// Count the number of eventLogs in each family for each error age.
|
||||
data.Counts = make([][]int, len(data.Families))
|
||||
for i, name := range data.Families {
|
||||
// TODO(sameer): move this loop under the family lock.
|
||||
f := getEventFamily(name)
|
||||
data.Counts[i] = make([]int, len(data.Buckets))
|
||||
for j, b := range data.Buckets {
|
||||
data.Counts[i][j] = f.Count(now, b.MaxErrAge)
|
||||
}
|
||||
}
|
||||
|
||||
if req != nil {
|
||||
var ok bool
|
||||
data.Family, data.Bucket, ok = parseEventsArgs(req)
|
||||
if !ok {
|
||||
// No-op
|
||||
} else {
|
||||
data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
|
||||
}
|
||||
if data.EventLogs != nil {
|
||||
defer data.EventLogs.Free()
|
||||
sort.Sort(data.EventLogs)
|
||||
}
|
||||
if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
|
||||
data.Expanded = exp
|
||||
}
|
||||
}
|
||||
|
||||
famMu.RLock()
|
||||
defer famMu.RUnlock()
|
||||
if err := eventsTmpl.Execute(w, data); err != nil {
|
||||
log.Printf("net/trace: Failed executing template: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
|
||||
fam, bStr := req.FormValue("fam"), req.FormValue("b")
|
||||
if fam == "" || bStr == "" {
|
||||
return "", 0, false
|
||||
}
|
||||
b, err := strconv.Atoi(bStr)
|
||||
if err != nil || b < 0 || b >= len(buckets) {
|
||||
return "", 0, false
|
||||
}
|
||||
return fam, b, true
|
||||
}
|
||||
|
||||
// An EventLog provides a log of events associated with a specific object.
|
||||
type EventLog interface {
|
||||
// Printf formats its arguments with fmt.Sprintf and adds the
|
||||
// result to the event log.
|
||||
Printf(format string, a ...interface{})
|
||||
|
||||
// Errorf is like Printf, but it marks this event as an error.
|
||||
Errorf(format string, a ...interface{})
|
||||
|
||||
// Finish declares that this event log is complete.
|
||||
// The event log should not be used after calling this method.
|
||||
Finish()
|
||||
}
|
||||
|
||||
// NewEventLog returns a new EventLog with the specified family name
|
||||
// and title.
|
||||
func NewEventLog(family, title string) EventLog {
|
||||
el := newEventLog()
|
||||
el.ref()
|
||||
el.Family, el.Title = family, title
|
||||
el.Start = time.Now()
|
||||
el.events = make([]logEntry, 0, maxEventsPerLog)
|
||||
el.stack = make([]uintptr, 32)
|
||||
n := runtime.Callers(2, el.stack)
|
||||
el.stack = el.stack[:n]
|
||||
|
||||
getEventFamily(family).add(el)
|
||||
return el
|
||||
}
|
||||
|
||||
func (el *eventLog) Finish() {
|
||||
getEventFamily(el.Family).remove(el)
|
||||
el.unref() // matches ref in New
|
||||
}
|
||||
|
||||
var (
|
||||
famMu sync.RWMutex
|
||||
families = make(map[string]*eventFamily) // family name => family
|
||||
)
|
||||
|
||||
func getEventFamily(fam string) *eventFamily {
|
||||
famMu.Lock()
|
||||
defer famMu.Unlock()
|
||||
f := families[fam]
|
||||
if f == nil {
|
||||
f = &eventFamily{}
|
||||
families[fam] = f
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
type eventFamily struct {
|
||||
mu sync.RWMutex
|
||||
eventLogs eventLogs
|
||||
}
|
||||
|
||||
func (f *eventFamily) add(el *eventLog) {
|
||||
f.mu.Lock()
|
||||
f.eventLogs = append(f.eventLogs, el)
|
||||
f.mu.Unlock()
|
||||
}
|
||||
|
||||
func (f *eventFamily) remove(el *eventLog) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
for i, el0 := range f.eventLogs {
|
||||
if el == el0 {
|
||||
copy(f.eventLogs[i:], f.eventLogs[i+1:])
|
||||
f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
|
||||
f.mu.RLock()
|
||||
defer f.mu.RUnlock()
|
||||
for _, el := range f.eventLogs {
|
||||
if el.hasRecentError(now, maxErrAge) {
|
||||
n++
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
|
||||
f.mu.RLock()
|
||||
defer f.mu.RUnlock()
|
||||
els = make(eventLogs, 0, len(f.eventLogs))
|
||||
for _, el := range f.eventLogs {
|
||||
if el.hasRecentError(now, maxErrAge) {
|
||||
el.ref()
|
||||
els = append(els, el)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type eventLogs []*eventLog
|
||||
|
||||
// Free calls unref on each element of the list.
|
||||
func (els eventLogs) Free() {
|
||||
for _, el := range els {
|
||||
el.unref()
|
||||
}
|
||||
}
|
||||
|
||||
// eventLogs may be sorted in reverse chronological order.
|
||||
func (els eventLogs) Len() int { return len(els) }
|
||||
func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
|
||||
func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] }
|
||||
|
||||
// A logEntry is a timestamped log entry in an event log.
|
||||
type logEntry struct {
|
||||
When time.Time
|
||||
Elapsed time.Duration // since previous event in log
|
||||
NewDay bool // whether this event is on a different day to the previous event
|
||||
What string
|
||||
IsErr bool
|
||||
}
|
||||
|
||||
// WhenString returns a string representation of the elapsed time of the event.
|
||||
// It will include the date if midnight was crossed.
|
||||
func (e logEntry) WhenString() string {
|
||||
if e.NewDay {
|
||||
return e.When.Format("2006/01/02 15:04:05.000000")
|
||||
}
|
||||
return e.When.Format("15:04:05.000000")
|
||||
}
|
||||
|
||||
// An eventLog represents an active event log.
|
||||
type eventLog struct {
|
||||
// Family is the top-level grouping of event logs to which this belongs.
|
||||
Family string
|
||||
|
||||
// Title is the title of this event log.
|
||||
Title string
|
||||
|
||||
// Timing information.
|
||||
Start time.Time
|
||||
|
||||
// Call stack where this event log was created.
|
||||
stack []uintptr
|
||||
|
||||
// Append-only sequence of events.
|
||||
//
|
||||
// TODO(sameer): change this to a ring buffer to avoid the array copy
|
||||
// when we hit maxEventsPerLog.
|
||||
mu sync.RWMutex
|
||||
events []logEntry
|
||||
LastErrorTime time.Time
|
||||
discarded int
|
||||
|
||||
refs int32 // how many buckets this is in
|
||||
}
|
||||
|
||||
func (el *eventLog) reset() {
|
||||
// Clear all but the mutex. Mutexes may not be copied, even when unlocked.
|
||||
el.Family = ""
|
||||
el.Title = ""
|
||||
el.Start = time.Time{}
|
||||
el.stack = nil
|
||||
el.events = nil
|
||||
el.LastErrorTime = time.Time{}
|
||||
el.discarded = 0
|
||||
el.refs = 0
|
||||
}
|
||||
|
||||
func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
|
||||
if maxErrAge == 0 {
|
||||
return true
|
||||
}
|
||||
el.mu.RLock()
|
||||
defer el.mu.RUnlock()
|
||||
return now.Sub(el.LastErrorTime) < maxErrAge
|
||||
}
|
||||
|
||||
// delta returns the elapsed time since the last event or the log start,
|
||||
// and whether it spans midnight.
|
||||
// L >= el.mu
|
||||
func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
|
||||
if len(el.events) == 0 {
|
||||
return t.Sub(el.Start), false
|
||||
}
|
||||
prev := el.events[len(el.events)-1].When
|
||||
return t.Sub(prev), prev.Day() != t.Day()
|
||||
|
||||
}
|
||||
|
||||
func (el *eventLog) Printf(format string, a ...interface{}) {
|
||||
el.printf(false, format, a...)
|
||||
}
|
||||
|
||||
func (el *eventLog) Errorf(format string, a ...interface{}) {
|
||||
el.printf(true, format, a...)
|
||||
}
|
||||
|
||||
func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
|
||||
e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
|
||||
el.mu.Lock()
|
||||
e.Elapsed, e.NewDay = el.delta(e.When)
|
||||
if len(el.events) < maxEventsPerLog {
|
||||
el.events = append(el.events, e)
|
||||
} else {
|
||||
// Discard the oldest event.
|
||||
if el.discarded == 0 {
|
||||
// el.discarded starts at two to count for the event it
|
||||
// is replacing, plus the next one that we are about to
|
||||
// drop.
|
||||
el.discarded = 2
|
||||
} else {
|
||||
el.discarded++
|
||||
}
|
||||
// TODO(sameer): if this causes allocations on a critical path,
|
||||
// change eventLog.What to be a fmt.Stringer, as in trace.go.
|
||||
el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
|
||||
// The timestamp of the discarded meta-event should be
|
||||
// the time of the last event it is representing.
|
||||
el.events[0].When = el.events[1].When
|
||||
copy(el.events[1:], el.events[2:])
|
||||
el.events[maxEventsPerLog-1] = e
|
||||
}
|
||||
if e.IsErr {
|
||||
el.LastErrorTime = e.When
|
||||
}
|
||||
el.mu.Unlock()
|
||||
}
|
||||
|
||||
func (el *eventLog) ref() {
|
||||
atomic.AddInt32(&el.refs, 1)
|
||||
}
|
||||
|
||||
func (el *eventLog) unref() {
|
||||
if atomic.AddInt32(&el.refs, -1) == 0 {
|
||||
freeEventLog(el)
|
||||
}
|
||||
}
|
||||
|
||||
func (el *eventLog) When() string {
|
||||
return el.Start.Format("2006/01/02 15:04:05.000000")
|
||||
}
|
||||
|
||||
func (el *eventLog) ElapsedTime() string {
|
||||
elapsed := time.Since(el.Start)
|
||||
return fmt.Sprintf("%.6f", elapsed.Seconds())
|
||||
}
|
||||
|
||||
func (el *eventLog) Stack() string {
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
|
||||
printStackRecord(tw, el.stack)
|
||||
tw.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// printStackRecord prints the function + source line information
|
||||
// for a single stack trace.
|
||||
// Adapted from runtime/pprof/pprof.go.
|
||||
func printStackRecord(w io.Writer, stk []uintptr) {
|
||||
for _, pc := range stk {
|
||||
f := runtime.FuncForPC(pc)
|
||||
if f == nil {
|
||||
continue
|
||||
}
|
||||
file, line := f.FileLine(pc)
|
||||
name := f.Name()
|
||||
// Hide runtime.goexit and any runtime functions at the beginning.
|
||||
if strings.HasPrefix(name, "runtime.") {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line)
|
||||
}
|
||||
}
|
||||
|
||||
func (el *eventLog) Events() []logEntry {
|
||||
el.mu.RLock()
|
||||
defer el.mu.RUnlock()
|
||||
return el.events
|
||||
}
|
||||
|
||||
// freeEventLogs is a freelist of *eventLog
|
||||
var freeEventLogs = make(chan *eventLog, 1000)
|
||||
|
||||
// newEventLog returns a event log ready to use.
|
||||
func newEventLog() *eventLog {
|
||||
select {
|
||||
case el := <-freeEventLogs:
|
||||
return el
|
||||
default:
|
||||
return new(eventLog)
|
||||
}
|
||||
}
|
||||
|
||||
// freeEventLog adds el to freeEventLogs if there's room.
|
||||
// This is non-blocking.
|
||||
func freeEventLog(el *eventLog) {
|
||||
el.reset()
|
||||
select {
|
||||
case freeEventLogs <- el:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
const eventsHTML = `
|
||||
<html>
|
||||
<head>
|
||||
<title>events</title>
|
||||
</head>
|
||||
<style type="text/css">
|
||||
body {
|
||||
font-family: sans-serif;
|
||||
}
|
||||
table#req-status td.family {
|
||||
padding-right: 2em;
|
||||
}
|
||||
table#req-status td.active {
|
||||
padding-right: 1em;
|
||||
}
|
||||
table#req-status td.empty {
|
||||
color: #aaa;
|
||||
}
|
||||
table#reqs {
|
||||
margin-top: 1em;
|
||||
}
|
||||
table#reqs tr.first {
|
||||
{{if $.Expanded}}font-weight: bold;{{end}}
|
||||
}
|
||||
table#reqs td {
|
||||
font-family: monospace;
|
||||
}
|
||||
table#reqs td.when {
|
||||
text-align: right;
|
||||
white-space: nowrap;
|
||||
}
|
||||
table#reqs td.elapsed {
|
||||
padding: 0 0.5em;
|
||||
text-align: right;
|
||||
white-space: pre;
|
||||
width: 10em;
|
||||
}
|
||||
address {
|
||||
font-size: smaller;
|
||||
margin-top: 5em;
|
||||
}
|
||||
</style>
|
||||
<body>
|
||||
|
||||
<h1>/debug/events</h1>
|
||||
|
||||
<table id="req-status">
|
||||
{{range $i, $fam := .Families}}
|
||||
<tr>
|
||||
<td class="family">{{$fam}}</td>
|
||||
|
||||
{{range $j, $bucket := $.Buckets}}
|
||||
{{$n := index $.Counts $i $j}}
|
||||
<td class="{{if not $bucket.MaxErrAge}}active{{end}}{{if not $n}}empty{{end}}">
|
||||
{{if $n}}<a href="?fam={{$fam}}&b={{$j}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
|
||||
[{{$n}} {{$bucket.String}}]
|
||||
{{if $n}}</a>{{end}}
|
||||
</td>
|
||||
{{end}}
|
||||
|
||||
</tr>{{end}}
|
||||
</table>
|
||||
|
||||
{{if $.EventLogs}}
|
||||
<hr />
|
||||
<h3>Family: {{$.Family}}</h3>
|
||||
|
||||
{{if $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}">{{end}}
|
||||
[Summary]{{if $.Expanded}}</a>{{end}}
|
||||
|
||||
{{if not $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">{{end}}
|
||||
[Expanded]{{if not $.Expanded}}</a>{{end}}
|
||||
|
||||
<table id="reqs">
|
||||
<tr><th>When</th><th>Elapsed</th></tr>
|
||||
{{range $el := $.EventLogs}}
|
||||
<tr class="first">
|
||||
<td class="when">{{$el.When}}</td>
|
||||
<td class="elapsed">{{$el.ElapsedTime}}</td>
|
||||
<td>{{$el.Title}}
|
||||
</tr>
|
||||
{{if $.Expanded}}
|
||||
<tr>
|
||||
<td class="when"></td>
|
||||
<td class="elapsed"></td>
|
||||
<td><pre>{{$el.Stack|trimSpace}}</pre></td>
|
||||
</tr>
|
||||
{{range $el.Events}}
|
||||
<tr>
|
||||
<td class="when">{{.WhenString}}</td>
|
||||
<td class="elapsed">{{elapsed .Elapsed}}</td>
|
||||
<td>.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
</table>
|
||||
{{end}}
|
||||
</body>
|
||||
</html>
|
||||
`
|
|
@ -0,0 +1,356 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package trace
|
||||
|
||||
// This file implements histogramming for RPC statistics collection.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"log"
|
||||
"math"
|
||||
|
||||
"golang.org/x/net/internal/timeseries"
|
||||
)
|
||||
|
||||
const (
|
||||
bucketCount = 38
|
||||
)
|
||||
|
||||
// histogram keeps counts of values in buckets that are spaced
|
||||
// out in powers of 2: 0-1, 2-3, 4-7...
|
||||
// histogram implements timeseries.Observable
|
||||
type histogram struct {
|
||||
sum int64 // running total of measurements
|
||||
sumOfSquares float64 // square of running total
|
||||
buckets []int64 // bucketed values for histogram
|
||||
value int // holds a single value as an optimization
|
||||
valueCount int64 // number of values recorded for single value
|
||||
}
|
||||
|
||||
// AddMeasurement records a value measurement observation to the histogram.
|
||||
func (h *histogram) addMeasurement(value int64) {
|
||||
// TODO: assert invariant
|
||||
h.sum += value
|
||||
h.sumOfSquares += float64(value) * float64(value)
|
||||
|
||||
bucketIndex := getBucket(value)
|
||||
|
||||
if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
|
||||
h.value = bucketIndex
|
||||
h.valueCount++
|
||||
} else {
|
||||
h.allocateBuckets()
|
||||
h.buckets[bucketIndex]++
|
||||
}
|
||||
}
|
||||
|
||||
func (h *histogram) allocateBuckets() {
|
||||
if h.buckets == nil {
|
||||
h.buckets = make([]int64, bucketCount)
|
||||
h.buckets[h.value] = h.valueCount
|
||||
h.value = 0
|
||||
h.valueCount = -1
|
||||
}
|
||||
}
|
||||
|
||||
func log2(i int64) int {
|
||||
n := 0
|
||||
for ; i >= 0x100; i >>= 8 {
|
||||
n += 8
|
||||
}
|
||||
for ; i > 0; i >>= 1 {
|
||||
n += 1
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func getBucket(i int64) (index int) {
|
||||
index = log2(i) - 1
|
||||
if index < 0 {
|
||||
index = 0
|
||||
}
|
||||
if index >= bucketCount {
|
||||
index = bucketCount - 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Total returns the number of recorded observations.
|
||||
func (h *histogram) total() (total int64) {
|
||||
if h.valueCount >= 0 {
|
||||
total = h.valueCount
|
||||
}
|
||||
for _, val := range h.buckets {
|
||||
total += int64(val)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Average returns the average value of recorded observations.
|
||||
func (h *histogram) average() float64 {
|
||||
t := h.total()
|
||||
if t == 0 {
|
||||
return 0
|
||||
}
|
||||
return float64(h.sum) / float64(t)
|
||||
}
|
||||
|
||||
// Variance returns the variance of recorded observations.
|
||||
func (h *histogram) variance() float64 {
|
||||
t := float64(h.total())
|
||||
if t == 0 {
|
||||
return 0
|
||||
}
|
||||
s := float64(h.sum) / t
|
||||
return h.sumOfSquares/t - s*s
|
||||
}
|
||||
|
||||
// StandardDeviation returns the standard deviation of recorded observations.
|
||||
func (h *histogram) standardDeviation() float64 {
|
||||
return math.Sqrt(h.variance())
|
||||
}
|
||||
|
||||
// PercentileBoundary estimates the value that the given fraction of recorded
|
||||
// observations are less than.
|
||||
func (h *histogram) percentileBoundary(percentile float64) int64 {
|
||||
total := h.total()
|
||||
|
||||
// Corner cases (make sure result is strictly less than Total())
|
||||
if total == 0 {
|
||||
return 0
|
||||
} else if total == 1 {
|
||||
return int64(h.average())
|
||||
}
|
||||
|
||||
percentOfTotal := round(float64(total) * percentile)
|
||||
var runningTotal int64
|
||||
|
||||
for i := range h.buckets {
|
||||
value := h.buckets[i]
|
||||
runningTotal += value
|
||||
if runningTotal == percentOfTotal {
|
||||
// We hit an exact bucket boundary. If the next bucket has data, it is a
|
||||
// good estimate of the value. If the bucket is empty, we interpolate the
|
||||
// midpoint between the next bucket's boundary and the next non-zero
|
||||
// bucket. If the remaining buckets are all empty, then we use the
|
||||
// boundary for the next bucket as the estimate.
|
||||
j := uint8(i + 1)
|
||||
min := bucketBoundary(j)
|
||||
if runningTotal < total {
|
||||
for h.buckets[j] == 0 {
|
||||
j++
|
||||
}
|
||||
}
|
||||
max := bucketBoundary(j)
|
||||
return min + round(float64(max-min)/2)
|
||||
} else if runningTotal > percentOfTotal {
|
||||
// The value is in this bucket. Interpolate the value.
|
||||
delta := runningTotal - percentOfTotal
|
||||
percentBucket := float64(value-delta) / float64(value)
|
||||
bucketMin := bucketBoundary(uint8(i))
|
||||
nextBucketMin := bucketBoundary(uint8(i + 1))
|
||||
bucketSize := nextBucketMin - bucketMin
|
||||
return bucketMin + round(percentBucket*float64(bucketSize))
|
||||
}
|
||||
}
|
||||
return bucketBoundary(bucketCount - 1)
|
||||
}
|
||||
|
||||
// Median returns the estimated median of the observed values.
|
||||
func (h *histogram) median() int64 {
|
||||
return h.percentileBoundary(0.5)
|
||||
}
|
||||
|
||||
// Add adds other to h.
|
||||
func (h *histogram) Add(other timeseries.Observable) {
|
||||
o := other.(*histogram)
|
||||
if o.valueCount == 0 {
|
||||
// Other histogram is empty
|
||||
} else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
|
||||
// Both have a single bucketed value, aggregate them
|
||||
h.valueCount += o.valueCount
|
||||
} else {
|
||||
// Two different values necessitate buckets in this histogram
|
||||
h.allocateBuckets()
|
||||
if o.valueCount >= 0 {
|
||||
h.buckets[o.value] += o.valueCount
|
||||
} else {
|
||||
for i := range h.buckets {
|
||||
h.buckets[i] += o.buckets[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
h.sumOfSquares += o.sumOfSquares
|
||||
h.sum += o.sum
|
||||
}
|
||||
|
||||
// Clear resets the histogram to an empty state, removing all observed values.
|
||||
func (h *histogram) Clear() {
|
||||
h.buckets = nil
|
||||
h.value = 0
|
||||
h.valueCount = 0
|
||||
h.sum = 0
|
||||
h.sumOfSquares = 0
|
||||
}
|
||||
|
||||
// CopyFrom copies from other, which must be a *histogram, into h.
|
||||
func (h *histogram) CopyFrom(other timeseries.Observable) {
|
||||
o := other.(*histogram)
|
||||
if o.valueCount == -1 {
|
||||
h.allocateBuckets()
|
||||
copy(h.buckets, o.buckets)
|
||||
}
|
||||
h.sum = o.sum
|
||||
h.sumOfSquares = o.sumOfSquares
|
||||
h.value = o.value
|
||||
h.valueCount = o.valueCount
|
||||
}
|
||||
|
||||
// Multiply scales the histogram by the specified ratio.
|
||||
func (h *histogram) Multiply(ratio float64) {
|
||||
if h.valueCount == -1 {
|
||||
for i := range h.buckets {
|
||||
h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
|
||||
}
|
||||
} else {
|
||||
h.valueCount = int64(float64(h.valueCount) * ratio)
|
||||
}
|
||||
h.sum = int64(float64(h.sum) * ratio)
|
||||
h.sumOfSquares = h.sumOfSquares * ratio
|
||||
}
|
||||
|
||||
// New creates a new histogram.
|
||||
func (h *histogram) New() timeseries.Observable {
|
||||
r := new(histogram)
|
||||
r.Clear()
|
||||
return r
|
||||
}
|
||||
|
||||
func (h *histogram) String() string {
|
||||
return fmt.Sprintf("%d, %f, %d, %d, %v",
|
||||
h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
|
||||
}
|
||||
|
||||
// round returns the closest int64 to the argument
|
||||
func round(in float64) int64 {
|
||||
return int64(math.Floor(in + 0.5))
|
||||
}
|
||||
|
||||
// bucketBoundary returns the first value in the bucket.
|
||||
func bucketBoundary(bucket uint8) int64 {
|
||||
if bucket == 0 {
|
||||
return 0
|
||||
}
|
||||
return 1 << bucket
|
||||
}
|
||||
|
||||
// bucketData holds data about a specific bucket for use in distTmpl.
|
||||
type bucketData struct {
|
||||
Lower, Upper int64
|
||||
N int64
|
||||
Pct, CumulativePct float64
|
||||
GraphWidth int
|
||||
}
|
||||
|
||||
// data holds data about a Distribution for use in distTmpl.
|
||||
type data struct {
|
||||
Buckets []*bucketData
|
||||
Count, Median int64
|
||||
Mean, StandardDeviation float64
|
||||
}
|
||||
|
||||
// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
|
||||
const maxHTMLBarWidth = 350.0
|
||||
|
||||
// newData returns data representing h for use in distTmpl.
|
||||
func (h *histogram) newData() *data {
|
||||
// Force the allocation of buckets to simplify the rendering implementation
|
||||
h.allocateBuckets()
|
||||
// We scale the bars on the right so that the largest bar is
|
||||
// maxHTMLBarWidth pixels in width.
|
||||
maxBucket := int64(0)
|
||||
for _, n := range h.buckets {
|
||||
if n > maxBucket {
|
||||
maxBucket = n
|
||||
}
|
||||
}
|
||||
total := h.total()
|
||||
barsizeMult := maxHTMLBarWidth / float64(maxBucket)
|
||||
var pctMult float64
|
||||
if total == 0 {
|
||||
pctMult = 1.0
|
||||
} else {
|
||||
pctMult = 100.0 / float64(total)
|
||||
}
|
||||
|
||||
buckets := make([]*bucketData, len(h.buckets))
|
||||
runningTotal := int64(0)
|
||||
for i, n := range h.buckets {
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
runningTotal += n
|
||||
var upperBound int64
|
||||
if i < bucketCount-1 {
|
||||
upperBound = bucketBoundary(uint8(i + 1))
|
||||
} else {
|
||||
upperBound = math.MaxInt64
|
||||
}
|
||||
buckets[i] = &bucketData{
|
||||
Lower: bucketBoundary(uint8(i)),
|
||||
Upper: upperBound,
|
||||
N: n,
|
||||
Pct: float64(n) * pctMult,
|
||||
CumulativePct: float64(runningTotal) * pctMult,
|
||||
GraphWidth: int(float64(n) * barsizeMult),
|
||||
}
|
||||
}
|
||||
return &data{
|
||||
Buckets: buckets,
|
||||
Count: total,
|
||||
Median: h.median(),
|
||||
Mean: h.average(),
|
||||
StandardDeviation: h.standardDeviation(),
|
||||
}
|
||||
}
|
||||
|
||||
func (h *histogram) html() template.HTML {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := distTmpl.Execute(buf, h.newData()); err != nil {
|
||||
buf.Reset()
|
||||
log.Printf("net/trace: couldn't execute template: %v", err)
|
||||
}
|
||||
return template.HTML(buf.String())
|
||||
}
|
||||
|
||||
// Input: data
|
||||
var distTmpl = template.Must(template.New("distTmpl").Parse(`
|
||||
<table>
|
||||
<tr>
|
||||
<td style="padding:0.25em">Count: {{.Count}}</td>
|
||||
<td style="padding:0.25em">Mean: {{printf "%.0f" .Mean}}</td>
|
||||
<td style="padding:0.25em">StdDev: {{printf "%.0f" .StandardDeviation}}</td>
|
||||
<td style="padding:0.25em">Median: {{.Median}}</td>
|
||||
</tr>
|
||||
</table>
|
||||
<hr>
|
||||
<table>
|
||||
{{range $b := .Buckets}}
|
||||
{{if $b}}
|
||||
<tr>
|
||||
<td style="padding:0 0 0 0.25em">[</td>
|
||||
<td style="text-align:right;padding:0 0.25em">{{.Lower}},</td>
|
||||
<td style="text-align:right;padding:0 0.25em">{{.Upper}})</td>
|
||||
<td style="text-align:right;padding:0 0.25em">{{.N}}</td>
|
||||
<td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .Pct}}%</td>
|
||||
<td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .CumulativePct}}%</td>
|
||||
<td><div style="background-color: blue; height: 1em; width: {{.GraphWidth}};"></div></td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{end}}
|
||||
</table>
|
||||
`))
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,46 @@
|
|||
# How to contribute
|
||||
|
||||
We definitely welcome patches and contribution to grpc! Here are some guidelines
|
||||
and information about how to do so.
|
||||
|
||||
## Sending patches
|
||||
|
||||
### Getting started
|
||||
|
||||
1. Check out the code:
|
||||
|
||||
$ go get google.golang.org/grpc
|
||||
$ cd $GOPATH/src/google.golang.org/grpc
|
||||
|
||||
1. Create a fork of the grpc-go repository.
|
||||
1. Add your fork as a remote:
|
||||
|
||||
$ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git
|
||||
|
||||
1. Make changes, commit them.
|
||||
1. Run the test suite:
|
||||
|
||||
$ make test
|
||||
|
||||
1. Push your changes to your fork:
|
||||
|
||||
$ git push fork ...
|
||||
|
||||
1. Open a pull request.
|
||||
|
||||
## Legal requirements
|
||||
|
||||
In order to protect both you and ourselves, you will need to sign the
|
||||
[Contributor License Agreement](https://cla.developers.google.com/clas).
|
||||
|
||||
## Filing Issues
|
||||
When filing an issue, make sure to answer these five questions:
|
||||
|
||||
1. What version of Go are you using (`go version`)?
|
||||
2. What operating system and processor architecture are you using?
|
||||
3. What did you do?
|
||||
4. What did you expect to see?
|
||||
5. What did you see instead?
|
||||
|
||||
### Contributing code
|
||||
Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file.
|
|
@ -0,0 +1,28 @@
|
|||
Copyright 2014, Google Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,52 @@
|
|||
all: test testrace
|
||||
|
||||
deps:
|
||||
go get -d -v google.golang.org/grpc/...
|
||||
|
||||
updatedeps:
|
||||
go get -d -v -u -f google.golang.org/grpc/...
|
||||
|
||||
testdeps:
|
||||
go get -d -v -t google.golang.org/grpc/...
|
||||
|
||||
updatetestdeps:
|
||||
go get -d -v -t -u -f google.golang.org/grpc/...
|
||||
|
||||
build: deps
|
||||
go build google.golang.org/grpc/...
|
||||
|
||||
proto:
|
||||
@ if ! which protoc > /dev/null; then \
|
||||
echo "error: protoc not installed" >&2; \
|
||||
exit 1; \
|
||||
fi
|
||||
go get -u -v github.com/golang/protobuf/protoc-gen-go
|
||||
# use $$dir as the root for all proto files in the same directory
|
||||
for dir in $$(git ls-files '*.proto' | xargs -n1 dirname | uniq); do \
|
||||
protoc -I $$dir --go_out=plugins=grpc:$$dir $$dir/*.proto; \
|
||||
done
|
||||
|
||||
test: testdeps
|
||||
go test -v -cpu 1,4 google.golang.org/grpc/...
|
||||
|
||||
testrace: testdeps
|
||||
go test -v -race -cpu 1,4 google.golang.org/grpc/...
|
||||
|
||||
clean:
|
||||
go clean -i google.golang.org/grpc/...
|
||||
|
||||
coverage: testdeps
|
||||
./coverage.sh --coveralls
|
||||
|
||||
.PHONY: \
|
||||
all \
|
||||
deps \
|
||||
updatedeps \
|
||||
testdeps \
|
||||
updatetestdeps \
|
||||
build \
|
||||
proto \
|
||||
test \
|
||||
testrace \
|
||||
clean \
|
||||
coverage
|
|
@ -0,0 +1,22 @@
|
|||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the gRPC project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of gRPC, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of gRPC. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of gRPC or any code incorporated within this
|
||||
implementation of gRPC constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of gRPC
|
||||
shall terminate as of the date such litigation is filed.
|
|
@ -0,0 +1,32 @@
|
|||
#gRPC-Go
|
||||
|
||||
[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
|
||||
|
||||
The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run:
|
||||
|
||||
```
|
||||
$ go get google.golang.org/grpc
|
||||
```
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
This requires Go 1.5 or later .
|
||||
|
||||
Constraints
|
||||
-----------
|
||||
The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/).
|
||||
|
||||
Status
|
||||
------
|
||||
Beta release
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultBackoffConfig uses values specified for backoff in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
var (
|
||||
DefaultBackoffConfig = BackoffConfig{
|
||||
MaxDelay: 120 * time.Second,
|
||||
baseDelay: 1.0 * time.Second,
|
||||
factor: 1.6,
|
||||
jitter: 0.2,
|
||||
}
|
||||
)
|
||||
|
||||
// backoffStrategy defines the methodology for backing off after a grpc
|
||||
// connection failure.
|
||||
//
|
||||
// This is unexported until the gRPC project decides whether or not to allow
|
||||
// alternative backoff strategies. Once a decision is made, this type and its
|
||||
// method may be exported.
|
||||
type backoffStrategy interface {
|
||||
// backoff returns the amount of time to wait before the next retry given
|
||||
// the number of consecutive failures.
|
||||
backoff(retries int) time.Duration
|
||||
}
|
||||
|
||||
// BackoffConfig defines the parameters for the default gRPC backoff strategy.
|
||||
type BackoffConfig struct {
|
||||
// MaxDelay is the upper bound of backoff delay.
|
||||
MaxDelay time.Duration
|
||||
|
||||
// TODO(stevvooe): The following fields are not exported, as allowing
|
||||
// changes would violate the current gRPC specification for backoff. If
|
||||
// gRPC decides to allow more interesting backoff strategies, these fields
|
||||
// may be opened up in the future.
|
||||
|
||||
// baseDelay is the amount of time to wait before retrying after the first
|
||||
// failure.
|
||||
baseDelay time.Duration
|
||||
|
||||
// factor is applied to the backoff after each retry.
|
||||
factor float64
|
||||
|
||||
// jitter provides a range to randomize backoff delays.
|
||||
jitter float64
|
||||
}
|
||||
|
||||
func setDefaults(bc *BackoffConfig) {
|
||||
md := bc.MaxDelay
|
||||
*bc = DefaultBackoffConfig
|
||||
|
||||
if md > 0 {
|
||||
bc.MaxDelay = md
|
||||
}
|
||||
}
|
||||
|
||||
func (bc BackoffConfig) backoff(retries int) (t time.Duration) {
|
||||
if retries == 0 {
|
||||
return bc.baseDelay
|
||||
}
|
||||
backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay)
|
||||
for backoff < max && retries > 0 {
|
||||
backoff *= bc.factor
|
||||
retries--
|
||||
}
|
||||
if backoff > max {
|
||||
backoff = max
|
||||
}
|
||||
// Randomize backoff delays so that if a cluster of requests start at
|
||||
// the same time, they won't operate in lockstep.
|
||||
backoff *= 1 + bc.jitter*(rand.Float64()*2-1)
|
||||
if backoff < 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(backoff)
|
||||
}
|
|
@ -0,0 +1,385 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2016, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/naming"
|
||||
)
|
||||
|
||||
// Address represents a server the client connects to.
|
||||
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
||||
type Address struct {
|
||||
// Addr is the server address on which a connection will be established.
|
||||
Addr string
|
||||
// Metadata is the information associated with Addr, which may be used
|
||||
// to make load balancing decision.
|
||||
Metadata interface{}
|
||||
}
|
||||
|
||||
// BalancerGetOptions configures a Get call.
|
||||
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
||||
type BalancerGetOptions struct {
|
||||
// BlockingWait specifies whether Get should block when there is no
|
||||
// connected address.
|
||||
BlockingWait bool
|
||||
}
|
||||
|
||||
// Balancer chooses network addresses for RPCs.
|
||||
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
||||
type Balancer interface {
|
||||
// Start does the initialization work to bootstrap a Balancer. For example,
|
||||
// this function may start the name resolution and watch the updates. It will
|
||||
// be called when dialing.
|
||||
Start(target string) error
|
||||
// Up informs the Balancer that gRPC has a connection to the server at
|
||||
// addr. It returns down which is called once the connection to addr gets
|
||||
// lost or closed.
|
||||
// TODO: It is not clear how to construct and take advantage the meaningful error
|
||||
// parameter for down. Need realistic demands to guide.
|
||||
Up(addr Address) (down func(error))
|
||||
// Get gets the address of a server for the RPC corresponding to ctx.
|
||||
// i) If it returns a connected address, gRPC internals issues the RPC on the
|
||||
// connection to this address;
|
||||
// ii) If it returns an address on which the connection is under construction
|
||||
// (initiated by Notify(...)) but not connected, gRPC internals
|
||||
// * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or
|
||||
// Shutdown state;
|
||||
// or
|
||||
// * issues RPC on the connection otherwise.
|
||||
// iii) If it returns an address on which the connection does not exist, gRPC
|
||||
// internals treats it as an error and will fail the corresponding RPC.
|
||||
//
|
||||
// Therefore, the following is the recommended rule when writing a custom Balancer.
|
||||
// If opts.BlockingWait is true, it should return a connected address or
|
||||
// block if there is no connected address. It should respect the timeout or
|
||||
// cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast
|
||||
// RPCs), it should return an address it has notified via Notify(...) immediately
|
||||
// instead of blocking.
|
||||
//
|
||||
// The function returns put which is called once the rpc has completed or failed.
|
||||
// put can collect and report RPC stats to a remote load balancer.
|
||||
//
|
||||
// This function should only return the errors Balancer cannot recover by itself.
|
||||
// gRPC internals will fail the RPC if an error is returned.
|
||||
Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error)
|
||||
// Notify returns a channel that is used by gRPC internals to watch the addresses
|
||||
// gRPC needs to connect. The addresses might be from a name resolver or remote
|
||||
// load balancer. gRPC internals will compare it with the existing connected
|
||||
// addresses. If the address Balancer notified is not in the existing connected
|
||||
// addresses, gRPC starts to connect the address. If an address in the existing
|
||||
// connected addresses is not in the notification list, the corresponding connection
|
||||
// is shutdown gracefully. Otherwise, there are no operations to take. Note that
|
||||
// the Address slice must be the full list of the Addresses which should be connected.
|
||||
// It is NOT delta.
|
||||
Notify() <-chan []Address
|
||||
// Close shuts down the balancer.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// downErr implements net.Error. It is constructed by gRPC internals and passed to the down
|
||||
// call of Balancer.
|
||||
type downErr struct {
|
||||
timeout bool
|
||||
temporary bool
|
||||
desc string
|
||||
}
|
||||
|
||||
func (e downErr) Error() string { return e.desc }
|
||||
func (e downErr) Timeout() bool { return e.timeout }
|
||||
func (e downErr) Temporary() bool { return e.temporary }
|
||||
|
||||
func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr {
|
||||
return downErr{
|
||||
timeout: timeout,
|
||||
temporary: temporary,
|
||||
desc: fmt.Sprintf(format, a...),
|
||||
}
|
||||
}
|
||||
|
||||
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
|
||||
// the name resolution updates and updates the addresses available correspondingly.
|
||||
func RoundRobin(r naming.Resolver) Balancer {
|
||||
return &roundRobin{r: r}
|
||||
}
|
||||
|
||||
type addrInfo struct {
|
||||
addr Address
|
||||
connected bool
|
||||
}
|
||||
|
||||
type roundRobin struct {
|
||||
r naming.Resolver
|
||||
w naming.Watcher
|
||||
addrs []*addrInfo // all the addresses the client should potentially connect
|
||||
mu sync.Mutex
|
||||
addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to.
|
||||
next int // index of the next address to return for Get()
|
||||
waitCh chan struct{} // the channel to block when there is no connected address available
|
||||
done bool // The Balancer is closed.
|
||||
}
|
||||
|
||||
func (rr *roundRobin) watchAddrUpdates() error {
|
||||
updates, err := rr.w.Next()
|
||||
if err != nil {
|
||||
grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err)
|
||||
return err
|
||||
}
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
for _, update := range updates {
|
||||
addr := Address{
|
||||
Addr: update.Addr,
|
||||
Metadata: update.Metadata,
|
||||
}
|
||||
switch update.Op {
|
||||
case naming.Add:
|
||||
var exist bool
|
||||
for _, v := range rr.addrs {
|
||||
if addr == v.addr {
|
||||
exist = true
|
||||
grpclog.Println("grpc: The name resolver wanted to add an existing address: ", addr)
|
||||
break
|
||||
}
|
||||
}
|
||||
if exist {
|
||||
continue
|
||||
}
|
||||
rr.addrs = append(rr.addrs, &addrInfo{addr: addr})
|
||||
case naming.Delete:
|
||||
for i, v := range rr.addrs {
|
||||
if addr == v.addr {
|
||||
copy(rr.addrs[i:], rr.addrs[i+1:])
|
||||
rr.addrs = rr.addrs[:len(rr.addrs)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
grpclog.Println("Unknown update.Op ", update.Op)
|
||||
}
|
||||
}
|
||||
// Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified.
|
||||
open := make([]Address, len(rr.addrs))
|
||||
for i, v := range rr.addrs {
|
||||
open[i] = v.addr
|
||||
}
|
||||
if rr.done {
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
rr.addrCh <- open
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rr *roundRobin) Start(target string) error {
|
||||
if rr.r == nil {
|
||||
// If there is no name resolver installed, it is not needed to
|
||||
// do name resolution. In this case, target is added into rr.addrs
|
||||
// as the only address available and rr.addrCh stays nil.
|
||||
rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}})
|
||||
return nil
|
||||
}
|
||||
w, err := rr.r.Resolve(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rr.w = w
|
||||
rr.addrCh = make(chan []Address)
|
||||
go func() {
|
||||
for {
|
||||
if err := rr.watchAddrUpdates(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Up sets the connected state of addr and sends notification if there are pending
|
||||
// Get() calls.
|
||||
func (rr *roundRobin) Up(addr Address) func(error) {
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
var cnt int
|
||||
for _, a := range rr.addrs {
|
||||
if a.addr == addr {
|
||||
if a.connected {
|
||||
return nil
|
||||
}
|
||||
a.connected = true
|
||||
}
|
||||
if a.connected {
|
||||
cnt++
|
||||
}
|
||||
}
|
||||
// addr is only one which is connected. Notify the Get() callers who are blocking.
|
||||
if cnt == 1 && rr.waitCh != nil {
|
||||
close(rr.waitCh)
|
||||
rr.waitCh = nil
|
||||
}
|
||||
return func(err error) {
|
||||
rr.down(addr, err)
|
||||
}
|
||||
}
|
||||
|
||||
// down unsets the connected state of addr.
|
||||
func (rr *roundRobin) down(addr Address, err error) {
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
for _, a := range rr.addrs {
|
||||
if addr == a.addr {
|
||||
a.connected = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the next addr in the rotation.
|
||||
func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
|
||||
var ch chan struct{}
|
||||
rr.mu.Lock()
|
||||
if rr.done {
|
||||
rr.mu.Unlock()
|
||||
err = ErrClientConnClosing
|
||||
return
|
||||
}
|
||||
|
||||
if len(rr.addrs) > 0 {
|
||||
if rr.next >= len(rr.addrs) {
|
||||
rr.next = 0
|
||||
}
|
||||
next := rr.next
|
||||
for {
|
||||
a := rr.addrs[next]
|
||||
next = (next + 1) % len(rr.addrs)
|
||||
if a.connected {
|
||||
addr = a.addr
|
||||
rr.next = next
|
||||
rr.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if next == rr.next {
|
||||
// Has iterated all the possible address but none is connected.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !opts.BlockingWait {
|
||||
if len(rr.addrs) == 0 {
|
||||
rr.mu.Unlock()
|
||||
err = fmt.Errorf("there is no address available")
|
||||
return
|
||||
}
|
||||
// Returns the next addr on rr.addrs for failfast RPCs.
|
||||
addr = rr.addrs[rr.next].addr
|
||||
rr.next++
|
||||
rr.mu.Unlock()
|
||||
return
|
||||
}
|
||||
// Wait on rr.waitCh for non-failfast RPCs.
|
||||
if rr.waitCh == nil {
|
||||
ch = make(chan struct{})
|
||||
rr.waitCh = ch
|
||||
} else {
|
||||
ch = rr.waitCh
|
||||
}
|
||||
rr.mu.Unlock()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
return
|
||||
case <-ch:
|
||||
rr.mu.Lock()
|
||||
if rr.done {
|
||||
rr.mu.Unlock()
|
||||
err = ErrClientConnClosing
|
||||
return
|
||||
}
|
||||
|
||||
if len(rr.addrs) > 0 {
|
||||
if rr.next >= len(rr.addrs) {
|
||||
rr.next = 0
|
||||
}
|
||||
next := rr.next
|
||||
for {
|
||||
a := rr.addrs[next]
|
||||
next = (next + 1) % len(rr.addrs)
|
||||
if a.connected {
|
||||
addr = a.addr
|
||||
rr.next = next
|
||||
rr.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if next == rr.next {
|
||||
// Has iterated all the possible address but none is connected.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// The newly added addr got removed by Down() again.
|
||||
if rr.waitCh == nil {
|
||||
ch = make(chan struct{})
|
||||
rr.waitCh = ch
|
||||
} else {
|
||||
ch = rr.waitCh
|
||||
}
|
||||
rr.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rr *roundRobin) Notify() <-chan []Address {
|
||||
return rr.addrCh
|
||||
}
|
||||
|
||||
func (rr *roundRobin) Close() error {
|
||||
rr.mu.Lock()
|
||||
defer rr.mu.Unlock()
|
||||
rr.done = true
|
||||
if rr.w != nil {
|
||||
rr.w.Close()
|
||||
}
|
||||
if rr.waitCh != nil {
|
||||
close(rr.waitCh)
|
||||
rr.waitCh = nil
|
||||
}
|
||||
if rr.addrCh != nil {
|
||||
close(rr.addrCh)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,226 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// recvResponse receives and parses an RPC response.
|
||||
// On error, it returns the error and indicates whether the call should be retried.
|
||||
//
|
||||
// TODO(zhaoq): Check whether the received message sequence is valid.
|
||||
func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error {
|
||||
// Try to acquire header metadata from the server if there is any.
|
||||
var err error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
t.CloseStream(stream, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
c.headerMD, err = stream.Header()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p := &parser{r: stream}
|
||||
for {
|
||||
if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
c.trailerMD = stream.Trailer()
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendRequest writes out various information of an RPC such as Context and Message.
|
||||
func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
|
||||
stream, err := t.NewStream(ctx, callHdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// If err is connection error, t will be closed, no need to close stream here.
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
t.CloseStream(stream, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
var cbuf *bytes.Buffer
|
||||
if compressor != nil {
|
||||
cbuf = new(bytes.Buffer)
|
||||
}
|
||||
outBuf, err := encode(codec, args, compressor, cbuf)
|
||||
if err != nil {
|
||||
return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err)
|
||||
}
|
||||
err = t.Write(stream, outBuf, opts)
|
||||
// t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method
|
||||
// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
|
||||
// recvResponse to get the final status.
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
// Sent successfully.
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
// Invoke sends the RPC request on the wire and returns after response is received.
|
||||
// Invoke is called by generated code. Also users can call Invoke directly when it
|
||||
// is really needed in their use cases.
|
||||
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) {
|
||||
c := defaultCallInfo
|
||||
for _, o := range opts {
|
||||
if err := o.before(&c); err != nil {
|
||||
return toRPCErr(err)
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
for _, o := range opts {
|
||||
o.after(&c)
|
||||
}
|
||||
}()
|
||||
if EnableTracing {
|
||||
c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
|
||||
defer c.traceInfo.tr.Finish()
|
||||
c.traceInfo.firstLine.client = true
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
|
||||
}
|
||||
c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
|
||||
// TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
c.traceInfo.tr.SetError()
|
||||
}
|
||||
}()
|
||||
}
|
||||
topts := &transport.Options{
|
||||
Last: true,
|
||||
Delay: false,
|
||||
}
|
||||
for {
|
||||
var (
|
||||
err error
|
||||
t transport.ClientTransport
|
||||
stream *transport.Stream
|
||||
// Record the put handler from Balancer.Get(...). It is called once the
|
||||
// RPC has completed or failed.
|
||||
put func()
|
||||
)
|
||||
// TODO(zhaoq): Need a formal spec of fail-fast.
|
||||
callHdr := &transport.CallHdr{
|
||||
Host: cc.authority,
|
||||
Method: method,
|
||||
}
|
||||
if cc.dopts.cp != nil {
|
||||
callHdr.SendCompress = cc.dopts.cp.Type()
|
||||
}
|
||||
gopts := BalancerGetOptions{
|
||||
BlockingWait: !c.failFast,
|
||||
}
|
||||
t, put, err = cc.getTransport(ctx, gopts)
|
||||
if err != nil {
|
||||
// TODO(zhaoq): Probably revisit the error handling.
|
||||
if _, ok := err.(*rpcError); ok {
|
||||
return err
|
||||
}
|
||||
if err == errConnClosing || err == errConnUnavailable {
|
||||
if c.failFast {
|
||||
return Errorf(codes.Unavailable, "%v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// All the other errors are treated as Internal errors.
|
||||
return Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
if c.traceInfo.tr != nil {
|
||||
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
|
||||
}
|
||||
stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts)
|
||||
if err != nil {
|
||||
if put != nil {
|
||||
put()
|
||||
put = nil
|
||||
}
|
||||
// Retry a non-failfast RPC when
|
||||
// i) there is a connection error; or
|
||||
// ii) the server started to drain before this RPC was initiated.
|
||||
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
|
||||
if c.failFast {
|
||||
return toRPCErr(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
err = recvResponse(cc.dopts, t, &c, stream, reply)
|
||||
if err != nil {
|
||||
if put != nil {
|
||||
put()
|
||||
put = nil
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
|
||||
if c.failFast {
|
||||
return toRPCErr(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
if c.traceInfo.tr != nil {
|
||||
c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
|
||||
}
|
||||
t.CloseStream(stream, nil)
|
||||
if put != nil {
|
||||
put()
|
||||
put = nil
|
||||
}
|
||||
return Errorf(stream.StatusCode(), "%s", stream.StatusDesc())
|
||||
}
|
||||
}
|
|
@ -0,0 +1,841 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrClientConnClosing indicates that the operation is illegal because
|
||||
// the ClientConn is closing.
|
||||
ErrClientConnClosing = errors.New("grpc: the client connection is closing")
|
||||
// ErrClientConnTimeout indicates that the ClientConn cannot establish the
|
||||
// underlying connections within the specified timeout.
|
||||
ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
|
||||
|
||||
// errNoTransportSecurity indicates that there is no transport security
|
||||
// being set for ClientConn. Users should either set one or explicitly
|
||||
// call WithInsecure DialOption to disable security.
|
||||
errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
|
||||
// errTransportCredentialsMissing indicates that users want to transmit security
|
||||
// information (e.g., oauth2 token) which requires secure connection on an insecure
|
||||
// connection.
|
||||
errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
|
||||
// errCredentialsConflict indicates that grpc.WithTransportCredentials()
|
||||
// and grpc.WithInsecure() are both called for a connection.
|
||||
errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)")
|
||||
// errNetworkIO indicates that the connection is down due to some network I/O error.
|
||||
errNetworkIO = errors.New("grpc: failed with network I/O error")
|
||||
// errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
|
||||
errConnDrain = errors.New("grpc: the connection is drained")
|
||||
// errConnClosing indicates that the connection is closing.
|
||||
errConnClosing = errors.New("grpc: the connection is closing")
|
||||
// errConnUnavailable indicates that the connection is unavailable.
|
||||
errConnUnavailable = errors.New("grpc: the connection is unavailable")
|
||||
errNoAddr = errors.New("grpc: there is no address available to dial")
|
||||
// minimum time to give a connection to complete
|
||||
minConnectTimeout = 20 * time.Second
|
||||
)
|
||||
|
||||
// dialOptions configure a Dial call. dialOptions are set by the DialOption
|
||||
// values passed to Dial.
|
||||
type dialOptions struct {
|
||||
codec Codec
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
bs backoffStrategy
|
||||
balancer Balancer
|
||||
block bool
|
||||
insecure bool
|
||||
timeout time.Duration
|
||||
copts transport.ConnectOptions
|
||||
}
|
||||
|
||||
// DialOption configures how we set up the connection.
|
||||
type DialOption func(*dialOptions)
|
||||
|
||||
// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.
|
||||
func WithCodec(c Codec) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.codec = c
|
||||
}
|
||||
}
|
||||
|
||||
// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message
|
||||
// compressor.
|
||||
func WithCompressor(cp Compressor) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.cp = cp
|
||||
}
|
||||
}
|
||||
|
||||
// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating
|
||||
// message decompressor.
|
||||
func WithDecompressor(dc Decompressor) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.dc = dc
|
||||
}
|
||||
}
|
||||
|
||||
// WithBalancer returns a DialOption which sets a load balancer.
|
||||
func WithBalancer(b Balancer) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.balancer = b
|
||||
}
|
||||
}
|
||||
|
||||
// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
|
||||
// when backing off after failed connection attempts.
|
||||
func WithBackoffMaxDelay(md time.Duration) DialOption {
|
||||
return WithBackoffConfig(BackoffConfig{MaxDelay: md})
|
||||
}
|
||||
|
||||
// WithBackoffConfig configures the dialer to use the provided backoff
|
||||
// parameters after connection failures.
|
||||
//
|
||||
// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
|
||||
// for use.
|
||||
func WithBackoffConfig(b BackoffConfig) DialOption {
|
||||
// Set defaults to ensure that provided BackoffConfig is valid and
|
||||
// unexported fields get default values.
|
||||
setDefaults(&b)
|
||||
return withBackoff(b)
|
||||
}
|
||||
|
||||
// withBackoff sets the backoff strategy used for retries after a
|
||||
// failed connection attempt.
|
||||
//
|
||||
// This can be exported if arbitrary backoff strategies are allowed by gRPC.
|
||||
func withBackoff(bs backoffStrategy) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.bs = bs
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying
|
||||
// connection is up. Without this, Dial returns immediately and connecting the server
|
||||
// happens in background.
|
||||
func WithBlock() DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.block = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithInsecure returns a DialOption which disables transport security for this ClientConn.
|
||||
// Note that transport security is required unless WithInsecure is set.
|
||||
func WithInsecure() DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.insecure = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithTransportCredentials returns a DialOption which configures a
|
||||
// connection level security credentials (e.g., TLS/SSL).
|
||||
func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.copts.TransportCredentials = creds
|
||||
}
|
||||
}
|
||||
|
||||
// WithPerRPCCredentials returns a DialOption which sets
|
||||
// credentials which will place auth state on each outbound RPC.
|
||||
func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
|
||||
}
|
||||
}
|
||||
|
||||
// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn
|
||||
// initially. This is valid if and only if WithBlock() is present.
|
||||
func WithTimeout(d time.Duration) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.timeout = d
|
||||
}
|
||||
}
|
||||
|
||||
// WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
|
||||
func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
return f(addr, deadline.Sub(time.Now()))
|
||||
}
|
||||
return f(addr, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs.
|
||||
func WithUserAgent(s string) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.copts.UserAgent = s
|
||||
}
|
||||
}
|
||||
|
||||
// Dial creates a client connection to the given target.
|
||||
func Dial(target string, opts ...DialOption) (*ClientConn, error) {
|
||||
return DialContext(context.Background(), target, opts...)
|
||||
}
|
||||
|
||||
// DialContext creates a client connection to the given target
|
||||
// using the supplied context.
|
||||
func DialContext(ctx context.Context, target string, opts ...DialOption) (*ClientConn, error) {
|
||||
cc := &ClientConn{
|
||||
target: target,
|
||||
conns: make(map[Address]*addrConn),
|
||||
}
|
||||
cc.ctx, cc.cancel = context.WithCancel(ctx)
|
||||
for _, opt := range opts {
|
||||
opt(&cc.dopts)
|
||||
}
|
||||
|
||||
// Set defaults.
|
||||
if cc.dopts.codec == nil {
|
||||
cc.dopts.codec = protoCodec{}
|
||||
}
|
||||
if cc.dopts.bs == nil {
|
||||
cc.dopts.bs = DefaultBackoffConfig
|
||||
}
|
||||
|
||||
var (
|
||||
ok bool
|
||||
addrs []Address
|
||||
)
|
||||
if cc.dopts.balancer == nil {
|
||||
// Connect to target directly if balancer is nil.
|
||||
addrs = append(addrs, Address{Addr: target})
|
||||
} else {
|
||||
if err := cc.dopts.balancer.Start(target); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ch := cc.dopts.balancer.Notify()
|
||||
if ch == nil {
|
||||
// There is no name resolver installed.
|
||||
addrs = append(addrs, Address{Addr: target})
|
||||
} else {
|
||||
addrs, ok = <-ch
|
||||
if !ok || len(addrs) == 0 {
|
||||
return nil, errNoAddr
|
||||
}
|
||||
}
|
||||
}
|
||||
waitC := make(chan error, 1)
|
||||
go func() {
|
||||
for _, a := range addrs {
|
||||
if err := cc.resetAddrConn(a, false, nil); err != nil {
|
||||
waitC <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
close(waitC)
|
||||
}()
|
||||
var timeoutCh <-chan time.Time
|
||||
if cc.dopts.timeout > 0 {
|
||||
timeoutCh = time.After(cc.dopts.timeout)
|
||||
}
|
||||
select {
|
||||
case err := <-waitC:
|
||||
if err != nil {
|
||||
cc.Close()
|
||||
return nil, err
|
||||
}
|
||||
case <-cc.ctx.Done():
|
||||
cc.Close()
|
||||
return nil, cc.ctx.Err()
|
||||
case <-timeoutCh:
|
||||
cc.Close()
|
||||
return nil, ErrClientConnTimeout
|
||||
}
|
||||
// If balancer is nil or balancer.Notify() is nil, ok will be false here.
|
||||
// The lbWatcher goroutine will not be created.
|
||||
if ok {
|
||||
go cc.lbWatcher()
|
||||
}
|
||||
colonPos := strings.LastIndex(target, ":")
|
||||
if colonPos == -1 {
|
||||
colonPos = len(target)
|
||||
}
|
||||
cc.authority = target[:colonPos]
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
// ConnectivityState indicates the state of a client connection.
|
||||
type ConnectivityState int
|
||||
|
||||
const (
|
||||
// Idle indicates the ClientConn is idle.
|
||||
Idle ConnectivityState = iota
|
||||
// Connecting indicates the ClienConn is connecting.
|
||||
Connecting
|
||||
// Ready indicates the ClientConn is ready for work.
|
||||
Ready
|
||||
// TransientFailure indicates the ClientConn has seen a failure but expects to recover.
|
||||
TransientFailure
|
||||
// Shutdown indicates the ClientConn has started shutting down.
|
||||
Shutdown
|
||||
)
|
||||
|
||||
func (s ConnectivityState) String() string {
|
||||
switch s {
|
||||
case Idle:
|
||||
return "IDLE"
|
||||
case Connecting:
|
||||
return "CONNECTING"
|
||||
case Ready:
|
||||
return "READY"
|
||||
case TransientFailure:
|
||||
return "TRANSIENT_FAILURE"
|
||||
case Shutdown:
|
||||
return "SHUTDOWN"
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown connectivity state: %d", s))
|
||||
}
|
||||
}
|
||||
|
||||
// ClientConn represents a client connection to an RPC server.
|
||||
type ClientConn struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
target string
|
||||
authority string
|
||||
dopts dialOptions
|
||||
|
||||
mu sync.RWMutex
|
||||
conns map[Address]*addrConn
|
||||
}
|
||||
|
||||
func (cc *ClientConn) lbWatcher() {
|
||||
for addrs := range cc.dopts.balancer.Notify() {
|
||||
var (
|
||||
add []Address // Addresses need to setup connections.
|
||||
del []*addrConn // Connections need to tear down.
|
||||
)
|
||||
cc.mu.Lock()
|
||||
for _, a := range addrs {
|
||||
if _, ok := cc.conns[a]; !ok {
|
||||
add = append(add, a)
|
||||
}
|
||||
}
|
||||
for k, c := range cc.conns {
|
||||
var keep bool
|
||||
for _, a := range addrs {
|
||||
if k == a {
|
||||
keep = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !keep {
|
||||
del = append(del, c)
|
||||
delete(cc.conns, c.addr)
|
||||
}
|
||||
}
|
||||
cc.mu.Unlock()
|
||||
for _, a := range add {
|
||||
cc.resetAddrConn(a, true, nil)
|
||||
}
|
||||
for _, c := range del {
|
||||
c.tearDown(errConnDrain)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// resetAddrConn creates an addrConn for addr and adds it to cc.conns.
|
||||
// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason.
|
||||
// If tearDownErr is nil, errConnDrain will be used instead.
|
||||
func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr error) error {
|
||||
ac := &addrConn{
|
||||
cc: cc,
|
||||
addr: addr,
|
||||
dopts: cc.dopts,
|
||||
}
|
||||
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
|
||||
ac.stateCV = sync.NewCond(&ac.mu)
|
||||
if EnableTracing {
|
||||
ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr)
|
||||
}
|
||||
if !ac.dopts.insecure {
|
||||
if ac.dopts.copts.TransportCredentials == nil {
|
||||
return errNoTransportSecurity
|
||||
}
|
||||
} else {
|
||||
if ac.dopts.copts.TransportCredentials != nil {
|
||||
return errCredentialsConflict
|
||||
}
|
||||
for _, cd := range ac.dopts.copts.PerRPCCredentials {
|
||||
if cd.RequireTransportSecurity() {
|
||||
return errTransportCredentialsMissing
|
||||
}
|
||||
}
|
||||
}
|
||||
// Track ac in cc. This needs to be done before any getTransport(...) is called.
|
||||
cc.mu.Lock()
|
||||
if cc.conns == nil {
|
||||
cc.mu.Unlock()
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
stale := cc.conns[ac.addr]
|
||||
cc.conns[ac.addr] = ac
|
||||
cc.mu.Unlock()
|
||||
if stale != nil {
|
||||
// There is an addrConn alive on ac.addr already. This could be due to
|
||||
// 1) a buggy Balancer notifies duplicated Addresses;
|
||||
// 2) goaway was received, a new ac will replace the old ac.
|
||||
// The old ac should be deleted from cc.conns, but the
|
||||
// underlying transport should drain rather than close.
|
||||
if tearDownErr == nil {
|
||||
// tearDownErr is nil if resetAddrConn is called by
|
||||
// 1) Dial
|
||||
// 2) lbWatcher
|
||||
// In both cases, the stale ac should drain, not close.
|
||||
stale.tearDown(errConnDrain)
|
||||
} else {
|
||||
stale.tearDown(tearDownErr)
|
||||
}
|
||||
}
|
||||
// skipWait may overwrite the decision in ac.dopts.block.
|
||||
if ac.dopts.block && !skipWait {
|
||||
if err := ac.resetTransport(false); err != nil {
|
||||
if err != errConnClosing {
|
||||
// Tear down ac and delete it from cc.conns.
|
||||
cc.mu.Lock()
|
||||
delete(cc.conns, ac.addr)
|
||||
cc.mu.Unlock()
|
||||
ac.tearDown(err)
|
||||
}
|
||||
if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
|
||||
return e.Origin()
|
||||
}
|
||||
return err
|
||||
}
|
||||
// Start to monitor the error status of transport.
|
||||
go ac.transportMonitor()
|
||||
} else {
|
||||
// Start a goroutine connecting to the server asynchronously.
|
||||
go func() {
|
||||
if err := ac.resetTransport(false); err != nil {
|
||||
grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err)
|
||||
if err != errConnClosing {
|
||||
// Keep this ac in cc.conns, to get the reason it's torn down.
|
||||
ac.tearDown(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
ac.transportMonitor()
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) {
|
||||
var (
|
||||
ac *addrConn
|
||||
ok bool
|
||||
put func()
|
||||
)
|
||||
if cc.dopts.balancer == nil {
|
||||
// If balancer is nil, there should be only one addrConn available.
|
||||
cc.mu.RLock()
|
||||
for _, ac = range cc.conns {
|
||||
// Break after the first iteration to get the first addrConn.
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
cc.mu.RUnlock()
|
||||
} else {
|
||||
var (
|
||||
addr Address
|
||||
err error
|
||||
)
|
||||
addr, put, err = cc.dopts.balancer.Get(ctx, opts)
|
||||
if err != nil {
|
||||
return nil, nil, toRPCErr(err)
|
||||
}
|
||||
cc.mu.RLock()
|
||||
if cc.conns == nil {
|
||||
cc.mu.RUnlock()
|
||||
return nil, nil, toRPCErr(ErrClientConnClosing)
|
||||
}
|
||||
ac, ok = cc.conns[addr]
|
||||
cc.mu.RUnlock()
|
||||
}
|
||||
if !ok {
|
||||
if put != nil {
|
||||
put()
|
||||
}
|
||||
return nil, nil, errConnClosing
|
||||
}
|
||||
t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait)
|
||||
if err != nil {
|
||||
if put != nil {
|
||||
put()
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
return t, put, nil
|
||||
}
|
||||
|
||||
// Close tears down the ClientConn and all underlying connections.
|
||||
func (cc *ClientConn) Close() error {
|
||||
cc.cancel()
|
||||
|
||||
cc.mu.Lock()
|
||||
if cc.conns == nil {
|
||||
cc.mu.Unlock()
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
conns := cc.conns
|
||||
cc.conns = nil
|
||||
cc.mu.Unlock()
|
||||
if cc.dopts.balancer != nil {
|
||||
cc.dopts.balancer.Close()
|
||||
}
|
||||
for _, ac := range conns {
|
||||
ac.tearDown(ErrClientConnClosing)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// addrConn is a network connection to a given address.
|
||||
type addrConn struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
cc *ClientConn
|
||||
addr Address
|
||||
dopts dialOptions
|
||||
events trace.EventLog
|
||||
|
||||
mu sync.Mutex
|
||||
state ConnectivityState
|
||||
stateCV *sync.Cond
|
||||
down func(error) // the handler called when a connection is down.
|
||||
// ready is closed and becomes nil when a new transport is up or failed
|
||||
// due to timeout.
|
||||
ready chan struct{}
|
||||
transport transport.ClientTransport
|
||||
|
||||
// The reason this addrConn is torn down.
|
||||
tearDownErr error
|
||||
}
|
||||
|
||||
// printf records an event in ac's event log, unless ac has been closed.
|
||||
// REQUIRES ac.mu is held.
|
||||
func (ac *addrConn) printf(format string, a ...interface{}) {
|
||||
if ac.events != nil {
|
||||
ac.events.Printf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// errorf records an error in ac's event log, unless ac has been closed.
|
||||
// REQUIRES ac.mu is held.
|
||||
func (ac *addrConn) errorf(format string, a ...interface{}) {
|
||||
if ac.events != nil {
|
||||
ac.events.Errorf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// getState returns the connectivity state of the Conn
|
||||
func (ac *addrConn) getState() ConnectivityState {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
return ac.state
|
||||
}
|
||||
|
||||
// waitForStateChange blocks until the state changes to something other than the sourceState.
|
||||
func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
if sourceState != ac.state {
|
||||
return ac.state, nil
|
||||
}
|
||||
done := make(chan struct{})
|
||||
var err error
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
ac.mu.Lock()
|
||||
err = ctx.Err()
|
||||
ac.stateCV.Broadcast()
|
||||
ac.mu.Unlock()
|
||||
case <-done:
|
||||
}
|
||||
}()
|
||||
defer close(done)
|
||||
for sourceState == ac.state {
|
||||
ac.stateCV.Wait()
|
||||
if err != nil {
|
||||
return ac.state, err
|
||||
}
|
||||
}
|
||||
return ac.state, nil
|
||||
}
|
||||
|
||||
func (ac *addrConn) resetTransport(closeTransport bool) error {
|
||||
for retries := 0; ; retries++ {
|
||||
ac.mu.Lock()
|
||||
ac.printf("connecting")
|
||||
if ac.state == Shutdown {
|
||||
// ac.tearDown(...) has been invoked.
|
||||
ac.mu.Unlock()
|
||||
return errConnClosing
|
||||
}
|
||||
if ac.down != nil {
|
||||
ac.down(downErrorf(false, true, "%v", errNetworkIO))
|
||||
ac.down = nil
|
||||
}
|
||||
ac.state = Connecting
|
||||
ac.stateCV.Broadcast()
|
||||
t := ac.transport
|
||||
ac.mu.Unlock()
|
||||
if closeTransport && t != nil {
|
||||
t.Close()
|
||||
}
|
||||
sleepTime := ac.dopts.bs.backoff(retries)
|
||||
timeout := minConnectTimeout
|
||||
if timeout < sleepTime {
|
||||
timeout = sleepTime
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ac.ctx, timeout)
|
||||
connectTime := time.Now()
|
||||
newTransport, err := transport.NewClientTransport(ctx, ac.addr.Addr, ac.dopts.copts)
|
||||
if err != nil {
|
||||
cancel()
|
||||
|
||||
if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
|
||||
return err
|
||||
}
|
||||
grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, ac.addr)
|
||||
ac.mu.Lock()
|
||||
if ac.state == Shutdown {
|
||||
// ac.tearDown(...) has been invoked.
|
||||
ac.mu.Unlock()
|
||||
return errConnClosing
|
||||
}
|
||||
ac.errorf("transient failure: %v", err)
|
||||
ac.state = TransientFailure
|
||||
ac.stateCV.Broadcast()
|
||||
if ac.ready != nil {
|
||||
close(ac.ready)
|
||||
ac.ready = nil
|
||||
}
|
||||
ac.mu.Unlock()
|
||||
closeTransport = false
|
||||
select {
|
||||
case <-time.After(sleepTime - time.Since(connectTime)):
|
||||
case <-ac.ctx.Done():
|
||||
return ac.ctx.Err()
|
||||
}
|
||||
continue
|
||||
}
|
||||
ac.mu.Lock()
|
||||
ac.printf("ready")
|
||||
if ac.state == Shutdown {
|
||||
// ac.tearDown(...) has been invoked.
|
||||
ac.mu.Unlock()
|
||||
newTransport.Close()
|
||||
return errConnClosing
|
||||
}
|
||||
ac.state = Ready
|
||||
ac.stateCV.Broadcast()
|
||||
ac.transport = newTransport
|
||||
if ac.ready != nil {
|
||||
close(ac.ready)
|
||||
ac.ready = nil
|
||||
}
|
||||
if ac.cc.dopts.balancer != nil {
|
||||
ac.down = ac.cc.dopts.balancer.Up(ac.addr)
|
||||
}
|
||||
ac.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Run in a goroutine to track the error in transport and create the
|
||||
// new transport if an error happens. It returns when the channel is closing.
|
||||
func (ac *addrConn) transportMonitor() {
|
||||
for {
|
||||
ac.mu.Lock()
|
||||
t := ac.transport
|
||||
ac.mu.Unlock()
|
||||
select {
|
||||
// This is needed to detect the teardown when
|
||||
// the addrConn is idle (i.e., no RPC in flight).
|
||||
case <-ac.ctx.Done():
|
||||
select {
|
||||
case <-t.Error():
|
||||
t.Close()
|
||||
default:
|
||||
}
|
||||
return
|
||||
case <-t.GoAway():
|
||||
// If GoAway happens without any network I/O error, ac is closed without shutting down the
|
||||
// underlying transport (the transport will be closed when all the pending RPCs finished or
|
||||
// failed.).
|
||||
// If GoAway and some network I/O error happen concurrently, ac and its underlying transport
|
||||
// are closed.
|
||||
// In both cases, a new ac is created.
|
||||
select {
|
||||
case <-t.Error():
|
||||
ac.cc.resetAddrConn(ac.addr, true, errNetworkIO)
|
||||
default:
|
||||
ac.cc.resetAddrConn(ac.addr, true, errConnDrain)
|
||||
}
|
||||
return
|
||||
case <-t.Error():
|
||||
select {
|
||||
case <-ac.ctx.Done():
|
||||
t.Close()
|
||||
return
|
||||
case <-t.GoAway():
|
||||
ac.cc.resetAddrConn(ac.addr, true, errNetworkIO)
|
||||
return
|
||||
default:
|
||||
}
|
||||
ac.mu.Lock()
|
||||
if ac.state == Shutdown {
|
||||
// ac has been shutdown.
|
||||
ac.mu.Unlock()
|
||||
return
|
||||
}
|
||||
ac.state = TransientFailure
|
||||
ac.stateCV.Broadcast()
|
||||
ac.mu.Unlock()
|
||||
if err := ac.resetTransport(true); err != nil {
|
||||
ac.mu.Lock()
|
||||
ac.printf("transport exiting: %v", err)
|
||||
ac.mu.Unlock()
|
||||
grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err)
|
||||
if err != errConnClosing {
|
||||
// Keep this ac in cc.conns, to get the reason it's torn down.
|
||||
ac.tearDown(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or
|
||||
// iv) transport is in TransientFailure and there's no balancer/failfast is true.
|
||||
func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) {
|
||||
for {
|
||||
ac.mu.Lock()
|
||||
switch {
|
||||
case ac.state == Shutdown:
|
||||
if failfast || !hasBalancer {
|
||||
// RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr.
|
||||
err := ac.tearDownErr
|
||||
ac.mu.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
ac.mu.Unlock()
|
||||
return nil, errConnClosing
|
||||
case ac.state == Ready:
|
||||
ct := ac.transport
|
||||
ac.mu.Unlock()
|
||||
return ct, nil
|
||||
case ac.state == TransientFailure:
|
||||
if failfast || hasBalancer {
|
||||
ac.mu.Unlock()
|
||||
return nil, errConnUnavailable
|
||||
}
|
||||
}
|
||||
ready := ac.ready
|
||||
if ready == nil {
|
||||
ready = make(chan struct{})
|
||||
ac.ready = ready
|
||||
}
|
||||
ac.mu.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, toRPCErr(ctx.Err())
|
||||
// Wait until the new transport is ready or failed.
|
||||
case <-ready:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// tearDown starts to tear down the addrConn.
|
||||
// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in
|
||||
// some edge cases (e.g., the caller opens and closes many addrConn's in a
|
||||
// tight loop.
|
||||
// tearDown doesn't remove ac from ac.cc.conns.
|
||||
func (ac *addrConn) tearDown(err error) {
|
||||
ac.cancel()
|
||||
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
if ac.down != nil {
|
||||
ac.down(downErrorf(false, false, "%v", err))
|
||||
ac.down = nil
|
||||
}
|
||||
if err == errConnDrain && ac.transport != nil {
|
||||
// GracefulClose(...) may be executed multiple times when
|
||||
// i) receiving multiple GoAway frames from the server; or
|
||||
// ii) there are concurrent name resolver/Balancer triggered
|
||||
// address removal and GoAway.
|
||||
ac.transport.GracefulClose()
|
||||
}
|
||||
if ac.state == Shutdown {
|
||||
return
|
||||
}
|
||||
ac.state = Shutdown
|
||||
ac.tearDownErr = err
|
||||
ac.stateCV.Broadcast()
|
||||
if ac.events != nil {
|
||||
ac.events.Finish()
|
||||
ac.events = nil
|
||||
}
|
||||
if ac.ready != nil {
|
||||
close(ac.ready)
|
||||
ac.ready = nil
|
||||
}
|
||||
if ac.transport != nil && err != errConnDrain {
|
||||
ac.transport.Close()
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This script serves as an example to demonstrate how to generate the gRPC-Go
|
||||
# interface and the related messages from .proto file.
|
||||
#
|
||||
# It assumes the installation of i) Google proto buffer compiler at
|
||||
# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen
|
||||
# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have
|
||||
# not, please install them first.
|
||||
#
|
||||
# We recommend running this script at $GOPATH/src.
|
||||
#
|
||||
# If this is not what you need, feel free to make your own scripts. Again, this
|
||||
# script is for demonstration purpose.
|
||||
#
|
||||
proto=$1
|
||||
protoc --go_out=plugins=grpc:. $proto
|
|
@ -0,0 +1,16 @@
|
|||
// generated by stringer -type=Code; DO NOT EDIT
|
||||
|
||||
package codes
|
||||
|
||||
import "fmt"
|
||||
|
||||
const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated"
|
||||
|
||||
var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}
|
||||
|
||||
func (i Code) String() string {
|
||||
if i+1 >= Code(len(_Code_index)) {
|
||||
return fmt.Sprintf("Code(%d)", i)
|
||||
}
|
||||
return _Code_name[_Code_index[i]:_Code_index[i+1]]
|
||||
}
|
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package codes defines the canonical error codes used by gRPC. It is
|
||||
// consistent across various languages.
|
||||
package codes // import "google.golang.org/grpc/codes"
|
||||
|
||||
// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
|
||||
type Code uint32
|
||||
|
||||
//go:generate stringer -type=Code
|
||||
|
||||
const (
|
||||
// OK is returned on success.
|
||||
OK Code = 0
|
||||
|
||||
// Canceled indicates the operation was cancelled (typically by the caller).
|
||||
Canceled Code = 1
|
||||
|
||||
// Unknown error. An example of where this error may be returned is
|
||||
// if a Status value received from another address space belongs to
|
||||
// an error-space that is not known in this address space. Also
|
||||
// errors raised by APIs that do not return enough error information
|
||||
// may be converted to this error.
|
||||
Unknown Code = 2
|
||||
|
||||
// InvalidArgument indicates client specified an invalid argument.
|
||||
// Note that this differs from FailedPrecondition. It indicates arguments
|
||||
// that are problematic regardless of the state of the system
|
||||
// (e.g., a malformed file name).
|
||||
InvalidArgument Code = 3
|
||||
|
||||
// DeadlineExceeded means operation expired before completion.
|
||||
// For operations that change the state of the system, this error may be
|
||||
// returned even if the operation has completed successfully. For
|
||||
// example, a successful response from a server could have been delayed
|
||||
// long enough for the deadline to expire.
|
||||
DeadlineExceeded Code = 4
|
||||
|
||||
// NotFound means some requested entity (e.g., file or directory) was
|
||||
// not found.
|
||||
NotFound Code = 5
|
||||
|
||||
// AlreadyExists means an attempt to create an entity failed because one
|
||||
// already exists.
|
||||
AlreadyExists Code = 6
|
||||
|
||||
// PermissionDenied indicates the caller does not have permission to
|
||||
// execute the specified operation. It must not be used for rejections
|
||||
// caused by exhausting some resource (use ResourceExhausted
|
||||
// instead for those errors). It must not be
|
||||
// used if the caller cannot be identified (use Unauthenticated
|
||||
// instead for those errors).
|
||||
PermissionDenied Code = 7
|
||||
|
||||
// Unauthenticated indicates the request does not have valid
|
||||
// authentication credentials for the operation.
|
||||
Unauthenticated Code = 16
|
||||
|
||||
// ResourceExhausted indicates some resource has been exhausted, perhaps
|
||||
// a per-user quota, or perhaps the entire file system is out of space.
|
||||
ResourceExhausted Code = 8
|
||||
|
||||
// FailedPrecondition indicates operation was rejected because the
|
||||
// system is not in a state required for the operation's execution.
|
||||
// For example, directory to be deleted may be non-empty, an rmdir
|
||||
// operation is applied to a non-directory, etc.
|
||||
//
|
||||
// A litmus test that may help a service implementor in deciding
|
||||
// between FailedPrecondition, Aborted, and Unavailable:
|
||||
// (a) Use Unavailable if the client can retry just the failing call.
|
||||
// (b) Use Aborted if the client should retry at a higher-level
|
||||
// (e.g., restarting a read-modify-write sequence).
|
||||
// (c) Use FailedPrecondition if the client should not retry until
|
||||
// the system state has been explicitly fixed. E.g., if an "rmdir"
|
||||
// fails because the directory is non-empty, FailedPrecondition
|
||||
// should be returned since the client should not retry unless
|
||||
// they have first fixed up the directory by deleting files from it.
|
||||
// (d) Use FailedPrecondition if the client performs conditional
|
||||
// REST Get/Update/Delete on a resource and the resource on the
|
||||
// server does not match the condition. E.g., conflicting
|
||||
// read-modify-write on the same resource.
|
||||
FailedPrecondition Code = 9
|
||||
|
||||
// Aborted indicates the operation was aborted, typically due to a
|
||||
// concurrency issue like sequencer check failures, transaction aborts,
|
||||
// etc.
|
||||
//
|
||||
// See litmus test above for deciding between FailedPrecondition,
|
||||
// Aborted, and Unavailable.
|
||||
Aborted Code = 10
|
||||
|
||||
// OutOfRange means operation was attempted past the valid range.
|
||||
// E.g., seeking or reading past end of file.
|
||||
//
|
||||
// Unlike InvalidArgument, this error indicates a problem that may
|
||||
// be fixed if the system state changes. For example, a 32-bit file
|
||||
// system will generate InvalidArgument if asked to read at an
|
||||
// offset that is not in the range [0,2^32-1], but it will generate
|
||||
// OutOfRange if asked to read from an offset past the current
|
||||
// file size.
|
||||
//
|
||||
// There is a fair bit of overlap between FailedPrecondition and
|
||||
// OutOfRange. We recommend using OutOfRange (the more specific
|
||||
// error) when it applies so that callers who are iterating through
|
||||
// a space can easily look for an OutOfRange error to detect when
|
||||
// they are done.
|
||||
OutOfRange Code = 11
|
||||
|
||||
// Unimplemented indicates operation is not implemented or not
|
||||
// supported/enabled in this service.
|
||||
Unimplemented Code = 12
|
||||
|
||||
// Internal errors. Means some invariants expected by underlying
|
||||
// system has been broken. If you see one of these errors,
|
||||
// something is very broken.
|
||||
Internal Code = 13
|
||||
|
||||
// Unavailable indicates the service is currently unavailable.
|
||||
// This is a most likely a transient condition and may be corrected
|
||||
// by retrying with a backoff.
|
||||
//
|
||||
// See litmus test above for deciding between FailedPrecondition,
|
||||
// Aborted, and Unavailable.
|
||||
Unavailable Code = 14
|
||||
|
||||
// DataLoss indicates unrecoverable data loss or corruption.
|
||||
DataLoss Code = 15
|
||||
)
|
|
@ -0,0 +1,47 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
workdir=.cover
|
||||
profile="$workdir/cover.out"
|
||||
mode=set
|
||||
end2endtest="google.golang.org/grpc/test"
|
||||
|
||||
generate_cover_data() {
|
||||
rm -rf "$workdir"
|
||||
mkdir "$workdir"
|
||||
|
||||
for pkg in "$@"; do
|
||||
if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ]
|
||||
then
|
||||
f="$workdir/$(echo $pkg | tr / -)"
|
||||
go test -covermode="$mode" -coverprofile="$f.cover" "$pkg"
|
||||
go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "mode: $mode" >"$profile"
|
||||
grep -h -v "^mode:" "$workdir"/*.cover >>"$profile"
|
||||
}
|
||||
|
||||
show_cover_report() {
|
||||
go tool cover -${1}="$profile"
|
||||
}
|
||||
|
||||
push_to_coveralls() {
|
||||
goveralls -coverprofile="$profile"
|
||||
}
|
||||
|
||||
generate_cover_data $(go list ./...)
|
||||
show_cover_report func
|
||||
case "$1" in
|
||||
"")
|
||||
;;
|
||||
--html)
|
||||
show_cover_report html ;;
|
||||
--coveralls)
|
||||
push_to_coveralls ;;
|
||||
*)
|
||||
echo >&2 "error: invalid option: $1" ;;
|
||||
esac
|
||||
rm -rf "$workdir"
|
|
@ -0,0 +1,213 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package credentials implements various credentials supported by gRPC library,
|
||||
// which encapsulate all the state needed by a client to authenticate with a
|
||||
// server and make various assertions, e.g., about the client's identity, role,
|
||||
// or whether it is authorized to make a particular call.
|
||||
package credentials // import "google.golang.org/grpc/credentials"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
// alpnProtoStr are the specified application level protocols for gRPC.
|
||||
alpnProtoStr = []string{"h2"}
|
||||
)
|
||||
|
||||
// PerRPCCredentials defines the common interface for the credentials which need to
|
||||
// attach security information to every RPC (e.g., oauth2).
|
||||
type PerRPCCredentials interface {
|
||||
// GetRequestMetadata gets the current request metadata, refreshing
|
||||
// tokens if required. This should be called by the transport layer on
|
||||
// each request, and the data should be populated in headers or other
|
||||
// context. uri is the URI of the entry point for the request. When
|
||||
// supported by the underlying implementation, ctx can be used for
|
||||
// timeout and cancellation.
|
||||
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
|
||||
// it as an arbitrary string.
|
||||
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
|
||||
// RequireTransportSecurity indicates whether the credentials requires
|
||||
// transport security.
|
||||
RequireTransportSecurity() bool
|
||||
}
|
||||
|
||||
// ProtocolInfo provides information regarding the gRPC wire protocol version,
|
||||
// security protocol, security protocol version in use, etc.
|
||||
type ProtocolInfo struct {
|
||||
// ProtocolVersion is the gRPC wire protocol version.
|
||||
ProtocolVersion string
|
||||
// SecurityProtocol is the security protocol in use.
|
||||
SecurityProtocol string
|
||||
// SecurityVersion is the security protocol version.
|
||||
SecurityVersion string
|
||||
}
|
||||
|
||||
// AuthInfo defines the common interface for the auth information the users are interested in.
|
||||
type AuthInfo interface {
|
||||
AuthType() string
|
||||
}
|
||||
|
||||
// TransportCredentials defines the common interface for all the live gRPC wire
|
||||
// protocols and supported transport security protocols (e.g., TLS, SSL).
|
||||
type TransportCredentials interface {
|
||||
// ClientHandshake does the authentication handshake specified by the corresponding
|
||||
// authentication protocol on rawConn for clients. It returns the authenticated
|
||||
// connection and the corresponding auth information about the connection.
|
||||
// Implementations must use the provided context to implement timely cancellation.
|
||||
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
|
||||
// ServerHandshake does the authentication handshake for servers. It returns
|
||||
// the authenticated connection and the corresponding auth information about
|
||||
// the connection.
|
||||
ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
|
||||
// Info provides the ProtocolInfo of this TransportCredentials.
|
||||
Info() ProtocolInfo
|
||||
}
|
||||
|
||||
// TLSInfo contains the auth information for a TLS authenticated connection.
|
||||
// It implements the AuthInfo interface.
|
||||
type TLSInfo struct {
|
||||
State tls.ConnectionState
|
||||
}
|
||||
|
||||
// AuthType returns the type of TLSInfo as a string.
|
||||
func (t TLSInfo) AuthType() string {
|
||||
return "tls"
|
||||
}
|
||||
|
||||
// tlsCreds is the credentials required for authenticating a connection using TLS.
|
||||
type tlsCreds struct {
|
||||
// TLS configuration
|
||||
config *tls.Config
|
||||
}
|
||||
|
||||
func (c tlsCreds) Info() ProtocolInfo {
|
||||
return ProtocolInfo{
|
||||
SecurityProtocol: "tls",
|
||||
SecurityVersion: "1.2",
|
||||
}
|
||||
}
|
||||
|
||||
// GetRequestMetadata returns nil, nil since TLS credentials does not have
|
||||
// metadata.
|
||||
func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
|
||||
// use local cfg to avoid clobbering ServerName if using multiple endpoints
|
||||
cfg := cloneTLSConfig(c.config)
|
||||
if cfg.ServerName == "" {
|
||||
colonPos := strings.LastIndex(addr, ":")
|
||||
if colonPos == -1 {
|
||||
colonPos = len(addr)
|
||||
}
|
||||
cfg.ServerName = addr[:colonPos]
|
||||
}
|
||||
conn := tls.Client(rawConn, cfg)
|
||||
errChannel := make(chan error, 1)
|
||||
go func() {
|
||||
errChannel <- conn.Handshake()
|
||||
}()
|
||||
select {
|
||||
case err := <-errChannel:
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
// TODO(zhaoq): Omit the auth info for client now. It is more for
|
||||
// information than anything else.
|
||||
return conn, nil, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
|
||||
conn := tls.Server(rawConn, c.config)
|
||||
if err := conn.Handshake(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return conn, TLSInfo{conn.ConnectionState()}, nil
|
||||
}
|
||||
|
||||
// NewTLS uses c to construct a TransportCredentials based on TLS.
|
||||
func NewTLS(c *tls.Config) TransportCredentials {
|
||||
tc := &tlsCreds{cloneTLSConfig(c)}
|
||||
tc.config.NextProtos = alpnProtoStr
|
||||
return tc
|
||||
}
|
||||
|
||||
// NewClientTLSFromCert constructs a TLS from the input certificate for client.
|
||||
func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportCredentials {
|
||||
return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp})
|
||||
}
|
||||
|
||||
// NewClientTLSFromFile constructs a TLS from the input certificate file for client.
|
||||
func NewClientTLSFromFile(certFile, serverName string) (TransportCredentials, error) {
|
||||
b, err := ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cp := x509.NewCertPool()
|
||||
if !cp.AppendCertsFromPEM(b) {
|
||||
return nil, fmt.Errorf("credentials: failed to append certificates")
|
||||
}
|
||||
return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}), nil
|
||||
}
|
||||
|
||||
// NewServerTLSFromCert constructs a TLS from the input certificate for server.
|
||||
func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
|
||||
}
|
||||
|
||||
// NewServerTLSFromFile constructs a TLS from the input certificate file and key
|
||||
// file for server.
|
||||
func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
|
||||
}
|
76
vendor/google.golang.org/grpc/credentials/credentials_util_go17.go
generated
vendored
Normal file
76
vendor/google.golang.org/grpc/credentials/credentials_util_go17.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
// +build go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2016, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
// cloneTLSConfig returns a shallow clone of the exported
|
||||
// fields of cfg, ignoring the unexported sync.Once, which
|
||||
// contains a mutex and must not be copied.
|
||||
//
|
||||
// If cfg is nil, a new zero tls.Config is returned.
|
||||
//
|
||||
// TODO replace this function with official clone function.
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return &tls.Config{
|
||||
Rand: cfg.Rand,
|
||||
Time: cfg.Time,
|
||||
Certificates: cfg.Certificates,
|
||||
NameToCertificate: cfg.NameToCertificate,
|
||||
GetCertificate: cfg.GetCertificate,
|
||||
RootCAs: cfg.RootCAs,
|
||||
NextProtos: cfg.NextProtos,
|
||||
ServerName: cfg.ServerName,
|
||||
ClientAuth: cfg.ClientAuth,
|
||||
ClientCAs: cfg.ClientCAs,
|
||||
InsecureSkipVerify: cfg.InsecureSkipVerify,
|
||||
CipherSuites: cfg.CipherSuites,
|
||||
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
|
||||
SessionTicketsDisabled: cfg.SessionTicketsDisabled,
|
||||
SessionTicketKey: cfg.SessionTicketKey,
|
||||
ClientSessionCache: cfg.ClientSessionCache,
|
||||
MinVersion: cfg.MinVersion,
|
||||
MaxVersion: cfg.MaxVersion,
|
||||
CurvePreferences: cfg.CurvePreferences,
|
||||
DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled,
|
||||
Renegotiation: cfg.Renegotiation,
|
||||
}
|
||||
}
|
74
vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go
generated
vendored
Normal file
74
vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
// +build !go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2016, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
// cloneTLSConfig returns a shallow clone of the exported
|
||||
// fields of cfg, ignoring the unexported sync.Once, which
|
||||
// contains a mutex and must not be copied.
|
||||
//
|
||||
// If cfg is nil, a new zero tls.Config is returned.
|
||||
//
|
||||
// TODO replace this function with official clone function.
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return &tls.Config{
|
||||
Rand: cfg.Rand,
|
||||
Time: cfg.Time,
|
||||
Certificates: cfg.Certificates,
|
||||
NameToCertificate: cfg.NameToCertificate,
|
||||
GetCertificate: cfg.GetCertificate,
|
||||
RootCAs: cfg.RootCAs,
|
||||
NextProtos: cfg.NextProtos,
|
||||
ServerName: cfg.ServerName,
|
||||
ClientAuth: cfg.ClientAuth,
|
||||
ClientCAs: cfg.ClientCAs,
|
||||
InsecureSkipVerify: cfg.InsecureSkipVerify,
|
||||
CipherSuites: cfg.CipherSuites,
|
||||
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
|
||||
SessionTicketsDisabled: cfg.SessionTicketsDisabled,
|
||||
SessionTicketKey: cfg.SessionTicketKey,
|
||||
ClientSessionCache: cfg.ClientSessionCache,
|
||||
MinVersion: cfg.MinVersion,
|
||||
MaxVersion: cfg.MaxVersion,
|
||||
CurvePreferences: cfg.CurvePreferences,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
/*
|
||||
Package grpc implements an RPC system called gRPC.
|
||||
|
||||
See www.grpc.io for more information about gRPC.
|
||||
*/
|
||||
package grpc // import "google.golang.org/grpc"
|
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2015, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
Package grpclog defines logging for grpc.
|
||||
*/
|
||||
package grpclog // import "google.golang.org/grpc/grpclog"
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Use golang's standard logger by default.
|
||||
// Access is not mutex-protected: do not modify except in init()
|
||||
// functions.
|
||||
var logger Logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
|
||||
// Logger mimics golang's standard Logger as an interface.
|
||||
type Logger interface {
|
||||
Fatal(args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
Fatalln(args ...interface{})
|
||||
Print(args ...interface{})
|
||||
Printf(format string, args ...interface{})
|
||||
Println(args ...interface{})
|
||||
}
|
||||
|
||||
// SetLogger sets the logger that is used in grpc. Call only from
|
||||
// init() functions.
|
||||
func SetLogger(l Logger) {
|
||||
logger = l
|
||||
}
|
||||
|
||||
// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code.
|
||||
func Fatal(args ...interface{}) {
|
||||
logger.Fatal(args...)
|
||||
}
|
||||
|
||||
// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
logger.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code.
|
||||
func Fatalln(args ...interface{}) {
|
||||
logger.Fatalln(args...)
|
||||
}
|
||||
|
||||
// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
|
||||
func Print(args ...interface{}) {
|
||||
logger.Print(args...)
|
||||
}
|
||||
|
||||
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
logger.Printf(format, args...)
|
||||
}
|
||||
|
||||
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
|
||||
func Println(args ...interface{}) {
|
||||
logger.Println(args...)
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2016, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// UnaryServerInfo consists of various information about a unary RPC on
|
||||
// server side. All per-rpc information may be mutated by the interceptor.
|
||||
type UnaryServerInfo struct {
|
||||
// Server is the service implementation the user provides. This is read-only.
|
||||
Server interface{}
|
||||
// FullMethod is the full RPC method string, i.e., /package.service/method.
|
||||
FullMethod string
|
||||
}
|
||||
|
||||
// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
|
||||
// execution of a unary RPC.
|
||||
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
|
||||
|
||||
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
|
||||
// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper
|
||||
// of the service method implementation. It is the responsibility of the interceptor to invoke handler
|
||||
// to complete the RPC.
|
||||
type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error)
|
||||
|
||||
// StreamServerInfo consists of various information about a streaming RPC on
|
||||
// server side. All per-rpc information may be mutated by the interceptor.
|
||||
type StreamServerInfo struct {
|
||||
// FullMethod is the full RPC method string, i.e., /package.service/method.
|
||||
FullMethod string
|
||||
// IsClientStream indicates whether the RPC is a client streaming RPC.
|
||||
IsClientStream bool
|
||||
// IsServerStream indicates whether the RPC is a server streaming RPC.
|
||||
IsServerStream bool
|
||||
}
|
||||
|
||||
// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server.
|
||||
// info contains all the information of this RPC the interceptor can operate on. And handler is the
|
||||
// service method implementation. It is the responsibility of the interceptor to invoke handler to
|
||||
// complete the RPC.
|
||||
type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright 2016, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package internal contains gRPC-internal code for testing, to avoid polluting
|
||||
// the godoc of the top-level grpc package.
|
||||
package internal
|
||||
|
||||
// TestingCloseConns closes all existing transports but keeps
|
||||
// grpcServer.lis accepting new connections.
|
||||
//
|
||||
// The provided grpcServer must be of type *grpc.Server. It is untyped
|
||||
// for circular dependency reasons.
|
||||
var TestingCloseConns func(grpcServer interface{})
|
||||
|
||||
// TestingUseHandlerImpl enables the http.Handler-based server implementation.
|
||||
// It must be called before Serve and requires TLS credentials.
|
||||
//
|
||||
// The provided grpcServer must be of type *grpc.Server. It is untyped
|
||||
// for circular dependency reasons.
|
||||
var TestingUseHandlerImpl func(grpcServer interface{})
|
|
@ -0,0 +1,140 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package metadata define the structure of the metadata supported by gRPC library.
|
||||
package metadata // import "google.golang.org/grpc/metadata"
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
binHdrSuffix = "-bin"
|
||||
)
|
||||
|
||||
// encodeKeyValue encodes key and value qualified for transmission via gRPC.
|
||||
// Transmitting binary headers violates HTTP/2 spec.
|
||||
// TODO(zhaoq): Maybe check if k is ASCII also.
|
||||
func encodeKeyValue(k, v string) (string, string) {
|
||||
k = strings.ToLower(k)
|
||||
if strings.HasSuffix(k, binHdrSuffix) {
|
||||
val := base64.StdEncoding.EncodeToString([]byte(v))
|
||||
v = string(val)
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// DecodeKeyValue returns the original key and value corresponding to the
|
||||
// encoded data in k, v.
|
||||
// If k is a binary header and v contains comma, v is split on comma before decoded,
|
||||
// and the decoded v will be joined with comma before returned.
|
||||
func DecodeKeyValue(k, v string) (string, string, error) {
|
||||
if !strings.HasSuffix(k, binHdrSuffix) {
|
||||
return k, v, nil
|
||||
}
|
||||
vvs := strings.Split(v, ",")
|
||||
for i, vv := range vvs {
|
||||
val, err := base64.StdEncoding.DecodeString(vv)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
vvs[i] = string(val)
|
||||
}
|
||||
return k, strings.Join(vvs, ","), nil
|
||||
}
|
||||
|
||||
// MD is a mapping from metadata keys to values. Users should use the following
|
||||
// two convenience functions New and Pairs to generate MD.
|
||||
type MD map[string][]string
|
||||
|
||||
// New creates a MD from given key-value map.
|
||||
func New(m map[string]string) MD {
|
||||
md := MD{}
|
||||
for k, v := range m {
|
||||
key, val := encodeKeyValue(k, v)
|
||||
md[key] = append(md[key], val)
|
||||
}
|
||||
return md
|
||||
}
|
||||
|
||||
// Pairs returns an MD formed by the mapping of key, value ...
|
||||
// Pairs panics if len(kv) is odd.
|
||||
func Pairs(kv ...string) MD {
|
||||
if len(kv)%2 == 1 {
|
||||
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
|
||||
}
|
||||
md := MD{}
|
||||
var k string
|
||||
for i, s := range kv {
|
||||
if i%2 == 0 {
|
||||
k = s
|
||||
continue
|
||||
}
|
||||
key, val := encodeKeyValue(k, s)
|
||||
md[key] = append(md[key], val)
|
||||
}
|
||||
return md
|
||||
}
|
||||
|
||||
// Len returns the number of items in md.
|
||||
func (md MD) Len() int {
|
||||
return len(md)
|
||||
}
|
||||
|
||||
// Copy returns a copy of md.
|
||||
func (md MD) Copy() MD {
|
||||
out := MD{}
|
||||
for k, v := range md {
|
||||
for _, i := range v {
|
||||
out[k] = append(out[k], i)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type mdKey struct{}
|
||||
|
||||
// NewContext creates a new context with md attached.
|
||||
func NewContext(ctx context.Context, md MD) context.Context {
|
||||
return context.WithValue(ctx, mdKey{}, md)
|
||||
}
|
||||
|
||||
// FromContext returns the MD in ctx if it exists.
|
||||
func FromContext(ctx context.Context) (md MD, ok bool) {
|
||||
md, ok = ctx.Value(mdKey{}).(MD)
|
||||
return
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package naming defines the naming API and related data structures for gRPC.
|
||||
// The interface is EXPERIMENTAL and may be suject to change.
|
||||
package naming
|
||||
|
||||
// Operation defines the corresponding operations for a name resolution change.
|
||||
type Operation uint8
|
||||
|
||||
const (
|
||||
// Add indicates a new address is added.
|
||||
Add Operation = iota
|
||||
// Delete indicates an exisiting address is deleted.
|
||||
Delete
|
||||
)
|
||||
|
||||
// Update defines a name resolution update. Notice that it is not valid having both
|
||||
// empty string Addr and nil Metadata in an Update.
|
||||
type Update struct {
|
||||
// Op indicates the operation of the update.
|
||||
Op Operation
|
||||
// Addr is the updated address. It is empty string if there is no address update.
|
||||
Addr string
|
||||
// Metadata is the updated metadata. It is nil if there is no metadata update.
|
||||
// Metadata is not required for a custom naming implementation.
|
||||
Metadata interface{}
|
||||
}
|
||||
|
||||
// Resolver creates a Watcher for a target to track its resolution changes.
|
||||
type Resolver interface {
|
||||
// Resolve creates a Watcher for target.
|
||||
Resolve(target string) (Watcher, error)
|
||||
}
|
||||
|
||||
// Watcher watches for the updates on the specified target.
|
||||
type Watcher interface {
|
||||
// Next blocks until an update or error happens. It may return one or more
|
||||
// updates. The first call should get the full set of the results. It should
|
||||
// return an error if and only if Watcher cannot recover.
|
||||
Next() ([]*Update, error)
|
||||
// Close closes the Watcher.
|
||||
Close()
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package peer defines various peer information associated with RPCs and
|
||||
// corresponding utils.
|
||||
package peer
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
// Peer contains the information of the peer for an RPC.
|
||||
type Peer struct {
|
||||
// Addr is the peer address.
|
||||
Addr net.Addr
|
||||
// AuthInfo is the authentication information of the transport.
|
||||
// It is nil if there is no transport security being used.
|
||||
AuthInfo credentials.AuthInfo
|
||||
}
|
||||
|
||||
type peerKey struct{}
|
||||
|
||||
// NewContext creates a new context with peer information attached.
|
||||
func NewContext(ctx context.Context, p *Peer) context.Context {
|
||||
return context.WithValue(ctx, peerKey{}, p)
|
||||
}
|
||||
|
||||
// FromContext returns the peer information in ctx if it exists.
|
||||
func FromContext(ctx context.Context) (p *Peer, ok bool) {
|
||||
p, ok = ctx.Value(peerKey{}).(*Peer)
|
||||
return
|
||||
}
|
|
@ -0,0 +1,457 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// Codec defines the interface gRPC uses to encode and decode messages.
|
||||
type Codec interface {
|
||||
// Marshal returns the wire format of v.
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
// Unmarshal parses the wire format into v.
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
// String returns the name of the Codec implementation. The returned
|
||||
// string will be used as part of content type in transmission.
|
||||
String() string
|
||||
}
|
||||
|
||||
// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
|
||||
type protoCodec struct{}
|
||||
|
||||
func (protoCodec) Marshal(v interface{}) ([]byte, error) {
|
||||
return proto.Marshal(v.(proto.Message))
|
||||
}
|
||||
|
||||
func (protoCodec) Unmarshal(data []byte, v interface{}) error {
|
||||
return proto.Unmarshal(data, v.(proto.Message))
|
||||
}
|
||||
|
||||
func (protoCodec) String() string {
|
||||
return "proto"
|
||||
}
|
||||
|
||||
// Compressor defines the interface gRPC uses to compress a message.
|
||||
type Compressor interface {
|
||||
// Do compresses p into w.
|
||||
Do(w io.Writer, p []byte) error
|
||||
// Type returns the compression algorithm the Compressor uses.
|
||||
Type() string
|
||||
}
|
||||
|
||||
// NewGZIPCompressor creates a Compressor based on GZIP.
|
||||
func NewGZIPCompressor() Compressor {
|
||||
return &gzipCompressor{}
|
||||
}
|
||||
|
||||
type gzipCompressor struct {
|
||||
}
|
||||
|
||||
func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
|
||||
z := gzip.NewWriter(w)
|
||||
if _, err := z.Write(p); err != nil {
|
||||
return err
|
||||
}
|
||||
return z.Close()
|
||||
}
|
||||
|
||||
func (c *gzipCompressor) Type() string {
|
||||
return "gzip"
|
||||
}
|
||||
|
||||
// Decompressor defines the interface gRPC uses to decompress a message.
|
||||
type Decompressor interface {
|
||||
// Do reads the data from r and uncompress them.
|
||||
Do(r io.Reader) ([]byte, error)
|
||||
// Type returns the compression algorithm the Decompressor uses.
|
||||
Type() string
|
||||
}
|
||||
|
||||
type gzipDecompressor struct {
|
||||
}
|
||||
|
||||
// NewGZIPDecompressor creates a Decompressor based on GZIP.
|
||||
func NewGZIPDecompressor() Decompressor {
|
||||
return &gzipDecompressor{}
|
||||
}
|
||||
|
||||
func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
|
||||
z, err := gzip.NewReader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer z.Close()
|
||||
return ioutil.ReadAll(z)
|
||||
}
|
||||
|
||||
func (d *gzipDecompressor) Type() string {
|
||||
return "gzip"
|
||||
}
|
||||
|
||||
// callInfo contains all related configuration and information about an RPC.
|
||||
type callInfo struct {
|
||||
failFast bool
|
||||
headerMD metadata.MD
|
||||
trailerMD metadata.MD
|
||||
traceInfo traceInfo // in trace.go
|
||||
}
|
||||
|
||||
var defaultCallInfo = callInfo{failFast: true}
|
||||
|
||||
// CallOption configures a Call before it starts or extracts information from
|
||||
// a Call after it completes.
|
||||
type CallOption interface {
|
||||
// before is called before the call is sent to any server. If before
|
||||
// returns a non-nil error, the RPC fails with that error.
|
||||
before(*callInfo) error
|
||||
|
||||
// after is called after the call has completed. after cannot return an
|
||||
// error, so any failures should be reported via output parameters.
|
||||
after(*callInfo)
|
||||
}
|
||||
|
||||
type beforeCall func(c *callInfo) error
|
||||
|
||||
func (o beforeCall) before(c *callInfo) error { return o(c) }
|
||||
func (o beforeCall) after(c *callInfo) {}
|
||||
|
||||
type afterCall func(c *callInfo)
|
||||
|
||||
func (o afterCall) before(c *callInfo) error { return nil }
|
||||
func (o afterCall) after(c *callInfo) { o(c) }
|
||||
|
||||
// Header returns a CallOptions that retrieves the header metadata
|
||||
// for a unary RPC.
|
||||
func Header(md *metadata.MD) CallOption {
|
||||
return afterCall(func(c *callInfo) {
|
||||
*md = c.headerMD
|
||||
})
|
||||
}
|
||||
|
||||
// Trailer returns a CallOptions that retrieves the trailer metadata
|
||||
// for a unary RPC.
|
||||
func Trailer(md *metadata.MD) CallOption {
|
||||
return afterCall(func(c *callInfo) {
|
||||
*md = c.trailerMD
|
||||
})
|
||||
}
|
||||
|
||||
// FailFast configures the action to take when an RPC is attempted on broken
|
||||
// connections or unreachable servers. If failfast is true, the RPC will fail
|
||||
// immediately. Otherwise, the RPC client will block the call until a
|
||||
// connection is available (or the call is canceled or times out) and will retry
|
||||
// the call if it fails due to a transient error. Please refer to
|
||||
// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md
|
||||
func FailFast(failFast bool) CallOption {
|
||||
return beforeCall(func(c *callInfo) error {
|
||||
c.failFast = failFast
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// The format of the payload: compressed or not?
|
||||
type payloadFormat uint8
|
||||
|
||||
const (
|
||||
compressionNone payloadFormat = iota // no compression
|
||||
compressionMade
|
||||
)
|
||||
|
||||
// parser reads complete gRPC messages from the underlying reader.
|
||||
type parser struct {
|
||||
// r is the underlying reader.
|
||||
// See the comment on recvMsg for the permissible
|
||||
// error types.
|
||||
r io.Reader
|
||||
|
||||
// The header of a gRPC message. Find more detail
|
||||
// at http://www.grpc.io/docs/guides/wire.html.
|
||||
header [5]byte
|
||||
}
|
||||
|
||||
// recvMsg reads a complete gRPC message from the stream.
|
||||
//
|
||||
// It returns the message and its payload (compression/encoding)
|
||||
// format. The caller owns the returned msg memory.
|
||||
//
|
||||
// If there is an error, possible values are:
|
||||
// * io.EOF, when no messages remain
|
||||
// * io.ErrUnexpectedEOF
|
||||
// * of type transport.ConnectionError
|
||||
// * of type transport.StreamError
|
||||
// No other error values or types must be returned, which also means
|
||||
// that the underlying io.Reader must not return an incompatible
|
||||
// error.
|
||||
func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err error) {
|
||||
if _, err := io.ReadFull(p.r, p.header[:]); err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
pf = payloadFormat(p.header[0])
|
||||
length := binary.BigEndian.Uint32(p.header[1:])
|
||||
|
||||
if length == 0 {
|
||||
return pf, nil, nil
|
||||
}
|
||||
if length > uint32(maxMsgSize) {
|
||||
return 0, nil, Errorf(codes.Internal, "grpc: received message length %d exceeding the max size %d", length, maxMsgSize)
|
||||
}
|
||||
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
|
||||
// of making it for each message:
|
||||
msg = make([]byte, int(length))
|
||||
if _, err := io.ReadFull(p.r, msg); err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return 0, nil, err
|
||||
}
|
||||
return pf, msg, nil
|
||||
}
|
||||
|
||||
// encode serializes msg and prepends the message header. If msg is nil, it
|
||||
// generates the message header of 0 message length.
|
||||
func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte, error) {
|
||||
var b []byte
|
||||
var length uint
|
||||
if msg != nil {
|
||||
var err error
|
||||
// TODO(zhaoq): optimize to reduce memory alloc and copying.
|
||||
b, err = c.Marshal(msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cp != nil {
|
||||
if err := cp.Do(cbuf, b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = cbuf.Bytes()
|
||||
}
|
||||
length = uint(len(b))
|
||||
}
|
||||
if length > math.MaxUint32 {
|
||||
return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length)
|
||||
}
|
||||
|
||||
const (
|
||||
payloadLen = 1
|
||||
sizeLen = 4
|
||||
)
|
||||
|
||||
var buf = make([]byte, payloadLen+sizeLen+len(b))
|
||||
|
||||
// Write payload format
|
||||
if cp == nil {
|
||||
buf[0] = byte(compressionNone)
|
||||
} else {
|
||||
buf[0] = byte(compressionMade)
|
||||
}
|
||||
// Write length of b into buf
|
||||
binary.BigEndian.PutUint32(buf[1:], uint32(length))
|
||||
// Copy encoded msg to buf
|
||||
copy(buf[5:], b)
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error {
|
||||
switch pf {
|
||||
case compressionNone:
|
||||
case compressionMade:
|
||||
if dc == nil || recvCompress != dc.Type() {
|
||||
return transport.StreamErrorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
|
||||
}
|
||||
default:
|
||||
return transport.StreamErrorf(codes.Internal, "grpc: received unexpected payload format %d", pf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int) error {
|
||||
pf, d, err := p.recvMsg(maxMsgSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil {
|
||||
return err
|
||||
}
|
||||
if pf == compressionMade {
|
||||
d, err = dc.Do(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
}
|
||||
if len(d) > maxMsgSize {
|
||||
// TODO: Revisit the error code. Currently keep it consistent with java
|
||||
// implementation.
|
||||
return Errorf(codes.Internal, "grpc: received a message of %d bytes exceeding %d limit", len(d), maxMsgSize)
|
||||
}
|
||||
if err := c.Unmarshal(d, m); err != nil {
|
||||
return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rpcError defines the status from an RPC.
|
||||
type rpcError struct {
|
||||
code codes.Code
|
||||
desc string
|
||||
}
|
||||
|
||||
func (e *rpcError) Error() string {
|
||||
return fmt.Sprintf("rpc error: code = %d desc = %s", e.code, e.desc)
|
||||
}
|
||||
|
||||
// Code returns the error code for err if it was produced by the rpc system.
|
||||
// Otherwise, it returns codes.Unknown.
|
||||
func Code(err error) codes.Code {
|
||||
if err == nil {
|
||||
return codes.OK
|
||||
}
|
||||
if e, ok := err.(*rpcError); ok {
|
||||
return e.code
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
|
||||
// ErrorDesc returns the error description of err if it was produced by the rpc system.
|
||||
// Otherwise, it returns err.Error() or empty string when err is nil.
|
||||
func ErrorDesc(err error) string {
|
||||
if err == nil {
|
||||
return ""
|
||||
}
|
||||
if e, ok := err.(*rpcError); ok {
|
||||
return e.desc
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
// Errorf returns an error containing an error code and a description;
|
||||
// Errorf returns nil if c is OK.
|
||||
func Errorf(c codes.Code, format string, a ...interface{}) error {
|
||||
if c == codes.OK {
|
||||
return nil
|
||||
}
|
||||
return &rpcError{
|
||||
code: c,
|
||||
desc: fmt.Sprintf(format, a...),
|
||||
}
|
||||
}
|
||||
|
||||
// toRPCErr converts an error into a rpcError.
|
||||
func toRPCErr(err error) error {
|
||||
switch e := err.(type) {
|
||||
case *rpcError:
|
||||
return err
|
||||
case transport.StreamError:
|
||||
return &rpcError{
|
||||
code: e.Code,
|
||||
desc: e.Desc,
|
||||
}
|
||||
case transport.ConnectionError:
|
||||
return &rpcError{
|
||||
code: codes.Internal,
|
||||
desc: e.Desc,
|
||||
}
|
||||
default:
|
||||
switch err {
|
||||
case context.DeadlineExceeded:
|
||||
return &rpcError{
|
||||
code: codes.DeadlineExceeded,
|
||||
desc: err.Error(),
|
||||
}
|
||||
case context.Canceled:
|
||||
return &rpcError{
|
||||
code: codes.Canceled,
|
||||
desc: err.Error(),
|
||||
}
|
||||
case ErrClientConnClosing:
|
||||
return &rpcError{
|
||||
code: codes.FailedPrecondition,
|
||||
desc: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return Errorf(codes.Unknown, "%v", err)
|
||||
}
|
||||
|
||||
// convertCode converts a standard Go error into its canonical code. Note that
|
||||
// this is only used to translate the error returned by the server applications.
|
||||
func convertCode(err error) codes.Code {
|
||||
switch err {
|
||||
case nil:
|
||||
return codes.OK
|
||||
case io.EOF:
|
||||
return codes.OutOfRange
|
||||
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
|
||||
return codes.FailedPrecondition
|
||||
case os.ErrInvalid:
|
||||
return codes.InvalidArgument
|
||||
case context.Canceled:
|
||||
return codes.Canceled
|
||||
case context.DeadlineExceeded:
|
||||
return codes.DeadlineExceeded
|
||||
}
|
||||
switch {
|
||||
case os.IsExist(err):
|
||||
return codes.AlreadyExists
|
||||
case os.IsNotExist(err):
|
||||
return codes.NotFound
|
||||
case os.IsPermission(err):
|
||||
return codes.PermissionDenied
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
|
||||
// SupportPackageIsVersion3 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the grpc package.
|
||||
//
|
||||
// This constant may be renamed in the future if a change in the generated code
|
||||
// requires a synchronised update of grpc-go and protoc-gen-go. This constant
|
||||
// should not be referenced from any other code.
|
||||
const SupportPackageIsVersion3 = true
|
|
@ -0,0 +1,894 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
|
||||
|
||||
// MethodDesc represents an RPC service's method specification.
|
||||
type MethodDesc struct {
|
||||
MethodName string
|
||||
Handler methodHandler
|
||||
}
|
||||
|
||||
// ServiceDesc represents an RPC service's specification.
|
||||
type ServiceDesc struct {
|
||||
ServiceName string
|
||||
// The pointer to the service interface. Used to check whether the user
|
||||
// provided implementation satisfies the interface requirements.
|
||||
HandlerType interface{}
|
||||
Methods []MethodDesc
|
||||
Streams []StreamDesc
|
||||
Metadata interface{}
|
||||
}
|
||||
|
||||
// service consists of the information of the server serving this service and
|
||||
// the methods in this service.
|
||||
type service struct {
|
||||
server interface{} // the server for service methods
|
||||
md map[string]*MethodDesc
|
||||
sd map[string]*StreamDesc
|
||||
mdata interface{}
|
||||
}
|
||||
|
||||
// Server is a gRPC server to serve RPC requests.
|
||||
type Server struct {
|
||||
opts options
|
||||
|
||||
mu sync.Mutex // guards following
|
||||
lis map[net.Listener]bool
|
||||
conns map[io.Closer]bool
|
||||
drain bool
|
||||
// A CondVar to let GracefulStop() blocks until all the pending RPCs are finished
|
||||
// and all the transport goes away.
|
||||
cv *sync.Cond
|
||||
m map[string]*service // service name -> service info
|
||||
events trace.EventLog
|
||||
}
|
||||
|
||||
type options struct {
|
||||
creds credentials.TransportCredentials
|
||||
codec Codec
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
maxMsgSize int
|
||||
unaryInt UnaryServerInterceptor
|
||||
streamInt StreamServerInterceptor
|
||||
maxConcurrentStreams uint32
|
||||
useHandlerImpl bool // use http.Handler-based server
|
||||
}
|
||||
|
||||
var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit
|
||||
|
||||
// A ServerOption sets options.
|
||||
type ServerOption func(*options)
|
||||
|
||||
// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
|
||||
func CustomCodec(codec Codec) ServerOption {
|
||||
return func(o *options) {
|
||||
o.codec = codec
|
||||
}
|
||||
}
|
||||
|
||||
// RPCCompressor returns a ServerOption that sets a compressor for outbound messages.
|
||||
func RPCCompressor(cp Compressor) ServerOption {
|
||||
return func(o *options) {
|
||||
o.cp = cp
|
||||
}
|
||||
}
|
||||
|
||||
// RPCDecompressor returns a ServerOption that sets a decompressor for inbound messages.
|
||||
func RPCDecompressor(dc Decompressor) ServerOption {
|
||||
return func(o *options) {
|
||||
o.dc = dc
|
||||
}
|
||||
}
|
||||
|
||||
// MaxMsgSize returns a ServerOption to set the max message size in bytes for inbound mesages.
|
||||
// If this is not set, gRPC uses the default 4MB.
|
||||
func MaxMsgSize(m int) ServerOption {
|
||||
return func(o *options) {
|
||||
o.maxMsgSize = m
|
||||
}
|
||||
}
|
||||
|
||||
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
|
||||
// of concurrent streams to each ServerTransport.
|
||||
func MaxConcurrentStreams(n uint32) ServerOption {
|
||||
return func(o *options) {
|
||||
o.maxConcurrentStreams = n
|
||||
}
|
||||
}
|
||||
|
||||
// Creds returns a ServerOption that sets credentials for server connections.
|
||||
func Creds(c credentials.TransportCredentials) ServerOption {
|
||||
return func(o *options) {
|
||||
o.creds = c
|
||||
}
|
||||
}
|
||||
|
||||
// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the
|
||||
// server. Only one unary interceptor can be installed. The construction of multiple
|
||||
// interceptors (e.g., chaining) can be implemented at the caller.
|
||||
func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
|
||||
return func(o *options) {
|
||||
if o.unaryInt != nil {
|
||||
panic("The unary server interceptor has been set.")
|
||||
}
|
||||
o.unaryInt = i
|
||||
}
|
||||
}
|
||||
|
||||
// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
|
||||
// server. Only one stream interceptor can be installed.
|
||||
func StreamInterceptor(i StreamServerInterceptor) ServerOption {
|
||||
return func(o *options) {
|
||||
if o.streamInt != nil {
|
||||
panic("The stream server interceptor has been set.")
|
||||
}
|
||||
o.streamInt = i
|
||||
}
|
||||
}
|
||||
|
||||
// NewServer creates a gRPC server which has no service registered and has not
|
||||
// started to accept requests yet.
|
||||
func NewServer(opt ...ServerOption) *Server {
|
||||
var opts options
|
||||
opts.maxMsgSize = defaultMaxMsgSize
|
||||
for _, o := range opt {
|
||||
o(&opts)
|
||||
}
|
||||
if opts.codec == nil {
|
||||
// Set the default codec.
|
||||
opts.codec = protoCodec{}
|
||||
}
|
||||
s := &Server{
|
||||
lis: make(map[net.Listener]bool),
|
||||
opts: opts,
|
||||
conns: make(map[io.Closer]bool),
|
||||
m: make(map[string]*service),
|
||||
}
|
||||
s.cv = sync.NewCond(&s.mu)
|
||||
if EnableTracing {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// printf records an event in s's event log, unless s has been stopped.
|
||||
// REQUIRES s.mu is held.
|
||||
func (s *Server) printf(format string, a ...interface{}) {
|
||||
if s.events != nil {
|
||||
s.events.Printf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// errorf records an error in s's event log, unless s has been stopped.
|
||||
// REQUIRES s.mu is held.
|
||||
func (s *Server) errorf(format string, a ...interface{}) {
|
||||
if s.events != nil {
|
||||
s.events.Errorf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterService register a service and its implementation to the gRPC
|
||||
// server. Called from the IDL generated code. This must be called before
|
||||
// invoking Serve.
|
||||
func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
|
||||
ht := reflect.TypeOf(sd.HandlerType).Elem()
|
||||
st := reflect.TypeOf(ss)
|
||||
if !st.Implements(ht) {
|
||||
grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
|
||||
}
|
||||
s.register(sd, ss)
|
||||
}
|
||||
|
||||
func (s *Server) register(sd *ServiceDesc, ss interface{}) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.printf("RegisterService(%q)", sd.ServiceName)
|
||||
if _, ok := s.m[sd.ServiceName]; ok {
|
||||
grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
|
||||
}
|
||||
srv := &service{
|
||||
server: ss,
|
||||
md: make(map[string]*MethodDesc),
|
||||
sd: make(map[string]*StreamDesc),
|
||||
mdata: sd.Metadata,
|
||||
}
|
||||
for i := range sd.Methods {
|
||||
d := &sd.Methods[i]
|
||||
srv.md[d.MethodName] = d
|
||||
}
|
||||
for i := range sd.Streams {
|
||||
d := &sd.Streams[i]
|
||||
srv.sd[d.StreamName] = d
|
||||
}
|
||||
s.m[sd.ServiceName] = srv
|
||||
}
|
||||
|
||||
// MethodInfo contains the information of an RPC including its method name and type.
|
||||
type MethodInfo struct {
|
||||
// Name is the method name only, without the service name or package name.
|
||||
Name string
|
||||
// IsClientStream indicates whether the RPC is a client streaming RPC.
|
||||
IsClientStream bool
|
||||
// IsServerStream indicates whether the RPC is a server streaming RPC.
|
||||
IsServerStream bool
|
||||
}
|
||||
|
||||
// ServiceInfo contains unary RPC method info, streaming RPC methid info and metadata for a service.
|
||||
type ServiceInfo struct {
|
||||
Methods []MethodInfo
|
||||
// Metadata is the metadata specified in ServiceDesc when registering service.
|
||||
Metadata interface{}
|
||||
}
|
||||
|
||||
// GetServiceInfo returns a map from service names to ServiceInfo.
|
||||
// Service names include the package names, in the form of <package>.<service>.
|
||||
func (s *Server) GetServiceInfo() map[string]ServiceInfo {
|
||||
ret := make(map[string]ServiceInfo)
|
||||
for n, srv := range s.m {
|
||||
methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd))
|
||||
for m := range srv.md {
|
||||
methods = append(methods, MethodInfo{
|
||||
Name: m,
|
||||
IsClientStream: false,
|
||||
IsServerStream: false,
|
||||
})
|
||||
}
|
||||
for m, d := range srv.sd {
|
||||
methods = append(methods, MethodInfo{
|
||||
Name: m,
|
||||
IsClientStream: d.ClientStreams,
|
||||
IsServerStream: d.ServerStreams,
|
||||
})
|
||||
}
|
||||
|
||||
ret[n] = ServiceInfo{
|
||||
Methods: methods,
|
||||
Metadata: srv.mdata,
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrServerStopped indicates that the operation is now illegal because of
|
||||
// the server being stopped.
|
||||
ErrServerStopped = errors.New("grpc: the server has been stopped")
|
||||
)
|
||||
|
||||
func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
|
||||
if s.opts.creds == nil {
|
||||
return rawConn, nil, nil
|
||||
}
|
||||
return s.opts.creds.ServerHandshake(rawConn)
|
||||
}
|
||||
|
||||
// Serve accepts incoming connections on the listener lis, creating a new
|
||||
// ServerTransport and service goroutine for each. The service goroutines
|
||||
// read gRPC requests and then call the registered handlers to reply to them.
|
||||
// Service returns when lis.Accept fails. lis will be closed when
|
||||
// this method returns.
|
||||
func (s *Server) Serve(lis net.Listener) error {
|
||||
s.mu.Lock()
|
||||
s.printf("serving")
|
||||
if s.lis == nil {
|
||||
s.mu.Unlock()
|
||||
lis.Close()
|
||||
return ErrServerStopped
|
||||
}
|
||||
s.lis[lis] = true
|
||||
s.mu.Unlock()
|
||||
defer func() {
|
||||
s.mu.Lock()
|
||||
if s.lis != nil && s.lis[lis] {
|
||||
lis.Close()
|
||||
delete(s.lis, lis)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}()
|
||||
for {
|
||||
rawConn, err := lis.Accept()
|
||||
if err != nil {
|
||||
s.mu.Lock()
|
||||
s.printf("done serving; Accept = %v", err)
|
||||
s.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
// Start a new goroutine to deal with rawConn
|
||||
// so we don't stall this Accept loop goroutine.
|
||||
go s.handleRawConn(rawConn)
|
||||
}
|
||||
}
|
||||
|
||||
// handleRawConn is run in its own goroutine and handles a just-accepted
|
||||
// connection that has not had any I/O performed on it yet.
|
||||
func (s *Server) handleRawConn(rawConn net.Conn) {
|
||||
conn, authInfo, err := s.useTransportAuthenticator(rawConn)
|
||||
if err != nil {
|
||||
s.mu.Lock()
|
||||
s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
|
||||
s.mu.Unlock()
|
||||
grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
|
||||
rawConn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
if s.conns == nil {
|
||||
s.mu.Unlock()
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
if s.opts.useHandlerImpl {
|
||||
s.serveUsingHandler(conn)
|
||||
} else {
|
||||
s.serveNewHTTP2Transport(conn, authInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// serveNewHTTP2Transport sets up a new http/2 transport (using the
|
||||
// gRPC http2 server transport in transport/http2_server.go) and
|
||||
// serves streams on it.
|
||||
// This is run in its own goroutine (it does network I/O in
|
||||
// transport.NewServerTransport).
|
||||
func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) {
|
||||
st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo)
|
||||
if err != nil {
|
||||
s.mu.Lock()
|
||||
s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
|
||||
s.mu.Unlock()
|
||||
c.Close()
|
||||
grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err)
|
||||
return
|
||||
}
|
||||
if !s.addConn(st) {
|
||||
st.Close()
|
||||
return
|
||||
}
|
||||
s.serveStreams(st)
|
||||
}
|
||||
|
||||
func (s *Server) serveStreams(st transport.ServerTransport) {
|
||||
defer s.removeConn(st)
|
||||
defer st.Close()
|
||||
var wg sync.WaitGroup
|
||||
st.HandleStreams(func(stream *transport.Stream) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
s.handleStream(st, stream, s.traceInfo(st, stream))
|
||||
}()
|
||||
})
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
var _ http.Handler = (*Server)(nil)
|
||||
|
||||
// serveUsingHandler is called from handleRawConn when s is configured
|
||||
// to handle requests via the http.Handler interface. It sets up a
|
||||
// net/http.Server to handle the just-accepted conn. The http.Server
|
||||
// is configured to route all incoming requests (all HTTP/2 streams)
|
||||
// to ServeHTTP, which creates a new ServerTransport for each stream.
|
||||
// serveUsingHandler blocks until conn closes.
|
||||
//
|
||||
// This codepath is only used when Server.TestingUseHandlerImpl has
|
||||
// been configured. This lets the end2end tests exercise the ServeHTTP
|
||||
// method as one of the environment types.
|
||||
//
|
||||
// conn is the *tls.Conn that's already been authenticated.
|
||||
func (s *Server) serveUsingHandler(conn net.Conn) {
|
||||
if !s.addConn(conn) {
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
defer s.removeConn(conn)
|
||||
h2s := &http2.Server{
|
||||
MaxConcurrentStreams: s.opts.maxConcurrentStreams,
|
||||
}
|
||||
h2s.ServeConn(conn, &http2.ServeConnOpts{
|
||||
Handler: s,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
st, err := transport.NewServerHandlerTransport(w, r)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if !s.addConn(st) {
|
||||
st.Close()
|
||||
return
|
||||
}
|
||||
defer s.removeConn(st)
|
||||
s.serveStreams(st)
|
||||
}
|
||||
|
||||
// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
|
||||
// If tracing is not enabled, it returns nil.
|
||||
func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
|
||||
if !EnableTracing {
|
||||
return nil
|
||||
}
|
||||
trInfo = &traceInfo{
|
||||
tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()),
|
||||
}
|
||||
trInfo.firstLine.client = false
|
||||
trInfo.firstLine.remoteAddr = st.RemoteAddr()
|
||||
stream.TraceContext(trInfo.tr)
|
||||
if dl, ok := stream.Context().Deadline(); ok {
|
||||
trInfo.firstLine.deadline = dl.Sub(time.Now())
|
||||
}
|
||||
return trInfo
|
||||
}
|
||||
|
||||
func (s *Server) addConn(c io.Closer) bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.conns == nil || s.drain {
|
||||
return false
|
||||
}
|
||||
s.conns[c] = true
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Server) removeConn(c io.Closer) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.conns != nil {
|
||||
delete(s.conns, c)
|
||||
s.cv.Signal()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error {
|
||||
var cbuf *bytes.Buffer
|
||||
if cp != nil {
|
||||
cbuf = new(bytes.Buffer)
|
||||
}
|
||||
p, err := encode(s.opts.codec, msg, cp, cbuf)
|
||||
if err != nil {
|
||||
// This typically indicates a fatal issue (e.g., memory
|
||||
// corruption or hardware faults) the application program
|
||||
// cannot handle.
|
||||
//
|
||||
// TODO(zhaoq): There exist other options also such as only closing the
|
||||
// faulty stream locally and remotely (Other streams can keep going). Find
|
||||
// the optimal option.
|
||||
grpclog.Fatalf("grpc: Server failed to encode response %v", err)
|
||||
}
|
||||
return t.Write(stream, p, opts)
|
||||
}
|
||||
|
||||
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
|
||||
if trInfo != nil {
|
||||
defer trInfo.tr.Finish()
|
||||
trInfo.firstLine.client = false
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
defer func() {
|
||||
if err != nil && err != io.EOF {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
}()
|
||||
}
|
||||
if s.opts.cp != nil {
|
||||
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
|
||||
stream.SetSendCompress(s.opts.cp.Type())
|
||||
}
|
||||
p := &parser{r: stream}
|
||||
for {
|
||||
pf, req, err := p.recvMsg(s.opts.maxMsgSize)
|
||||
if err == io.EOF {
|
||||
// The entire stream is done (for unary RPC only).
|
||||
return err
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
err = transport.StreamError{Code: codes.Internal, Desc: "io.ErrUnexpectedEOF"}
|
||||
}
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *rpcError:
|
||||
if err := t.WriteStatus(stream, err.code, err.desc); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
|
||||
}
|
||||
case transport.ConnectionError:
|
||||
// Nothing to do here.
|
||||
case transport.StreamError:
|
||||
if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
|
||||
switch err := err.(type) {
|
||||
case transport.StreamError:
|
||||
if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
|
||||
}
|
||||
default:
|
||||
if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
return err
|
||||
}
|
||||
statusCode := codes.OK
|
||||
statusDesc := ""
|
||||
df := func(v interface{}) error {
|
||||
if pf == compressionMade {
|
||||
var err error
|
||||
req, err = s.opts.dc.Do(bytes.NewReader(req))
|
||||
if err != nil {
|
||||
if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(req) > s.opts.maxMsgSize {
|
||||
// TODO: Revisit the error code. Currently keep it consistent with
|
||||
// java implementation.
|
||||
statusCode = codes.Internal
|
||||
statusDesc = fmt.Sprintf("grpc: server received a message of %d bytes exceeding %d limit", len(req), s.opts.maxMsgSize)
|
||||
}
|
||||
if err := s.opts.codec.Unmarshal(req, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt)
|
||||
if appErr != nil {
|
||||
if err, ok := appErr.(*rpcError); ok {
|
||||
statusCode = err.code
|
||||
statusDesc = err.desc
|
||||
} else {
|
||||
statusCode = convertCode(appErr)
|
||||
statusDesc = appErr.Error()
|
||||
}
|
||||
if trInfo != nil && statusCode != codes.OK {
|
||||
trInfo.tr.LazyLog(stringer(statusDesc), true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(stringer("OK"), false)
|
||||
}
|
||||
opts := &transport.Options{
|
||||
Last: true,
|
||||
Delay: false,
|
||||
}
|
||||
if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
|
||||
switch err := err.(type) {
|
||||
case transport.ConnectionError:
|
||||
// Nothing to do here.
|
||||
case transport.StreamError:
|
||||
statusCode = err.Code
|
||||
statusDesc = err.Desc
|
||||
default:
|
||||
statusCode = codes.Unknown
|
||||
statusDesc = err.Error()
|
||||
}
|
||||
return err
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
|
||||
}
|
||||
return t.WriteStatus(stream, statusCode, statusDesc)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
||||
if s.opts.cp != nil {
|
||||
stream.SetSendCompress(s.opts.cp.Type())
|
||||
}
|
||||
ss := &serverStream{
|
||||
t: t,
|
||||
s: stream,
|
||||
p: &parser{r: stream},
|
||||
codec: s.opts.codec,
|
||||
cp: s.opts.cp,
|
||||
dc: s.opts.dc,
|
||||
maxMsgSize: s.opts.maxMsgSize,
|
||||
trInfo: trInfo,
|
||||
}
|
||||
if ss.cp != nil {
|
||||
ss.cbuf = new(bytes.Buffer)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
defer func() {
|
||||
ss.mu.Lock()
|
||||
if err != nil && err != io.EOF {
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
ss.trInfo.tr.SetError()
|
||||
}
|
||||
ss.trInfo.tr.Finish()
|
||||
ss.trInfo.tr = nil
|
||||
ss.mu.Unlock()
|
||||
}()
|
||||
}
|
||||
var appErr error
|
||||
if s.opts.streamInt == nil {
|
||||
appErr = sd.Handler(srv.server, ss)
|
||||
} else {
|
||||
info := &StreamServerInfo{
|
||||
FullMethod: stream.Method(),
|
||||
IsClientStream: sd.ClientStreams,
|
||||
IsServerStream: sd.ServerStreams,
|
||||
}
|
||||
appErr = s.opts.streamInt(srv.server, ss, info, sd.Handler)
|
||||
}
|
||||
if appErr != nil {
|
||||
if err, ok := appErr.(*rpcError); ok {
|
||||
ss.statusCode = err.code
|
||||
ss.statusDesc = err.desc
|
||||
} else if err, ok := appErr.(transport.StreamError); ok {
|
||||
ss.statusCode = err.Code
|
||||
ss.statusDesc = err.Desc
|
||||
} else {
|
||||
ss.statusCode = convertCode(appErr)
|
||||
ss.statusDesc = appErr.Error()
|
||||
}
|
||||
}
|
||||
if trInfo != nil {
|
||||
ss.mu.Lock()
|
||||
if ss.statusCode != codes.OK {
|
||||
ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true)
|
||||
ss.trInfo.tr.SetError()
|
||||
} else {
|
||||
ss.trInfo.tr.LazyLog(stringer("OK"), false)
|
||||
}
|
||||
ss.mu.Unlock()
|
||||
}
|
||||
return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc)
|
||||
|
||||
}
|
||||
|
||||
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
|
||||
sm := stream.Method()
|
||||
if sm != "" && sm[0] == '/' {
|
||||
sm = sm[1:]
|
||||
}
|
||||
pos := strings.LastIndex(sm, "/")
|
||||
if pos == -1 {
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil {
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.Finish()
|
||||
}
|
||||
return
|
||||
}
|
||||
service := sm[:pos]
|
||||
method := sm[pos+1:]
|
||||
srv, ok := s.m[service]
|
||||
if !ok {
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil {
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.Finish()
|
||||
}
|
||||
return
|
||||
}
|
||||
// Unary RPC or Streaming RPC?
|
||||
if md, ok := srv.md[method]; ok {
|
||||
s.processUnaryRPC(t, stream, srv, md, trInfo)
|
||||
return
|
||||
}
|
||||
if sd, ok := srv.sd[method]; ok {
|
||||
s.processStreamingRPC(t, stream, srv, sd, trInfo)
|
||||
return
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil {
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.Finish()
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the gRPC server. It immediately closes all open
|
||||
// connections and listeners.
|
||||
// It cancels all active RPCs on the server side and the corresponding
|
||||
// pending RPCs on the client side will get notified by connection
|
||||
// errors.
|
||||
func (s *Server) Stop() {
|
||||
s.mu.Lock()
|
||||
listeners := s.lis
|
||||
s.lis = nil
|
||||
st := s.conns
|
||||
s.conns = nil
|
||||
// interrupt GracefulStop if Stop and GracefulStop are called concurrently.
|
||||
s.cv.Signal()
|
||||
s.mu.Unlock()
|
||||
|
||||
for lis := range listeners {
|
||||
lis.Close()
|
||||
}
|
||||
for c := range st {
|
||||
c.Close()
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
if s.events != nil {
|
||||
s.events.Finish()
|
||||
s.events = nil
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// GracefulStop stops the gRPC server gracefully. It stops the server to accept new
|
||||
// connections and RPCs and blocks until all the pending RPCs are finished.
|
||||
func (s *Server) GracefulStop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.drain == true || s.conns == nil {
|
||||
return
|
||||
}
|
||||
s.drain = true
|
||||
for lis := range s.lis {
|
||||
lis.Close()
|
||||
}
|
||||
s.lis = nil
|
||||
for c := range s.conns {
|
||||
c.(transport.ServerTransport).Drain()
|
||||
}
|
||||
for len(s.conns) != 0 {
|
||||
s.cv.Wait()
|
||||
}
|
||||
s.conns = nil
|
||||
if s.events != nil {
|
||||
s.events.Finish()
|
||||
s.events = nil
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
internal.TestingCloseConns = func(arg interface{}) {
|
||||
arg.(*Server).testingCloseConns()
|
||||
}
|
||||
internal.TestingUseHandlerImpl = func(arg interface{}) {
|
||||
arg.(*Server).opts.useHandlerImpl = true
|
||||
}
|
||||
}
|
||||
|
||||
// testingCloseConns closes all existing transports but keeps s.lis
|
||||
// accepting new connections.
|
||||
func (s *Server) testingCloseConns() {
|
||||
s.mu.Lock()
|
||||
for c := range s.conns {
|
||||
c.Close()
|
||||
delete(s.conns, c)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// SendHeader sends header metadata. It may be called at most once from a unary
|
||||
// RPC handler. The ctx is the RPC handler's Context or one derived from it.
|
||||
func SendHeader(ctx context.Context, md metadata.MD) error {
|
||||
if md.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
stream, ok := transport.StreamFromContext(ctx)
|
||||
if !ok {
|
||||
return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx)
|
||||
}
|
||||
t := stream.ServerTransport()
|
||||
if t == nil {
|
||||
grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream)
|
||||
}
|
||||
return t.WriteHeader(stream, md)
|
||||
}
|
||||
|
||||
// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
|
||||
// It may be called at most once from a unary RPC handler. The ctx is the RPC
|
||||
// handler's Context or one derived from it.
|
||||
func SetTrailer(ctx context.Context, md metadata.MD) error {
|
||||
if md.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
stream, ok := transport.StreamFromContext(ctx)
|
||||
if !ok {
|
||||
return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx)
|
||||
}
|
||||
return stream.SetTrailer(md)
|
||||
}
|
|
@ -0,0 +1,493 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// StreamHandler defines the handler called by gRPC server to complete the
|
||||
// execution of a streaming RPC.
|
||||
type StreamHandler func(srv interface{}, stream ServerStream) error
|
||||
|
||||
// StreamDesc represents a streaming RPC service's method specification.
|
||||
type StreamDesc struct {
|
||||
StreamName string
|
||||
Handler StreamHandler
|
||||
|
||||
// At least one of these is true.
|
||||
ServerStreams bool
|
||||
ClientStreams bool
|
||||
}
|
||||
|
||||
// Stream defines the common interface a client or server stream has to satisfy.
|
||||
type Stream interface {
|
||||
// Context returns the context for this stream.
|
||||
Context() context.Context
|
||||
// SendMsg blocks until it sends m, the stream is done or the stream
|
||||
// breaks.
|
||||
// On error, it aborts the stream and returns an RPC status on client
|
||||
// side. On server side, it simply returns the error to the caller.
|
||||
// SendMsg is called by generated code. Also Users can call SendMsg
|
||||
// directly when it is really needed in their use cases.
|
||||
SendMsg(m interface{}) error
|
||||
// RecvMsg blocks until it receives a message or the stream is
|
||||
// done. On client side, it returns io.EOF when the stream is done. On
|
||||
// any other error, it aborts the stream and returns an RPC status. On
|
||||
// server side, it simply returns the error to the caller.
|
||||
RecvMsg(m interface{}) error
|
||||
}
|
||||
|
||||
// ClientStream defines the interface a client stream has to satisfy.
|
||||
type ClientStream interface {
|
||||
// Header returns the header metadata received from the server if there
|
||||
// is any. It blocks if the metadata is not ready to read.
|
||||
Header() (metadata.MD, error)
|
||||
// Trailer returns the trailer metadata from the server, if there is any.
|
||||
// It must only be called after stream.CloseAndRecv has returned, or
|
||||
// stream.Recv has returned a non-nil error (including io.EOF).
|
||||
Trailer() metadata.MD
|
||||
// CloseSend closes the send direction of the stream. It closes the stream
|
||||
// when non-nil error is met.
|
||||
CloseSend() error
|
||||
Stream
|
||||
}
|
||||
|
||||
// NewClientStream creates a new Stream for the client side. This is called
|
||||
// by generated code.
|
||||
func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
||||
var (
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
put func()
|
||||
)
|
||||
c := defaultCallInfo
|
||||
for _, o := range opts {
|
||||
if err := o.before(&c); err != nil {
|
||||
return nil, toRPCErr(err)
|
||||
}
|
||||
}
|
||||
callHdr := &transport.CallHdr{
|
||||
Host: cc.authority,
|
||||
Method: method,
|
||||
Flush: desc.ServerStreams && desc.ClientStreams,
|
||||
}
|
||||
if cc.dopts.cp != nil {
|
||||
callHdr.SendCompress = cc.dopts.cp.Type()
|
||||
}
|
||||
var trInfo traceInfo
|
||||
if EnableTracing {
|
||||
trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
|
||||
trInfo.firstLine.client = true
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
trInfo.firstLine.deadline = deadline.Sub(time.Now())
|
||||
}
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
ctx = trace.NewContext(ctx, trInfo.tr)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// Need to call tr.finish() if error is returned.
|
||||
// Because tr will not be returned to caller.
|
||||
trInfo.tr.LazyPrintf("RPC: [%v]", err)
|
||||
trInfo.tr.SetError()
|
||||
trInfo.tr.Finish()
|
||||
}
|
||||
}()
|
||||
}
|
||||
gopts := BalancerGetOptions{
|
||||
BlockingWait: !c.failFast,
|
||||
}
|
||||
for {
|
||||
t, put, err = cc.getTransport(ctx, gopts)
|
||||
if err != nil {
|
||||
// TODO(zhaoq): Probably revisit the error handling.
|
||||
if _, ok := err.(*rpcError); ok {
|
||||
return nil, err
|
||||
}
|
||||
if err == errConnClosing || err == errConnUnavailable {
|
||||
if c.failFast {
|
||||
return nil, Errorf(codes.Unavailable, "%v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// All the other errors are treated as Internal errors.
|
||||
return nil, Errorf(codes.Internal, "%v", err)
|
||||
}
|
||||
|
||||
s, err = t.NewStream(ctx, callHdr)
|
||||
if err != nil {
|
||||
if put != nil {
|
||||
put()
|
||||
put = nil
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
|
||||
if c.failFast {
|
||||
return nil, toRPCErr(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
return nil, toRPCErr(err)
|
||||
}
|
||||
break
|
||||
}
|
||||
cs := &clientStream{
|
||||
opts: opts,
|
||||
c: c,
|
||||
desc: desc,
|
||||
codec: cc.dopts.codec,
|
||||
cp: cc.dopts.cp,
|
||||
dc: cc.dopts.dc,
|
||||
|
||||
put: put,
|
||||
t: t,
|
||||
s: s,
|
||||
p: &parser{r: s},
|
||||
|
||||
tracing: EnableTracing,
|
||||
trInfo: trInfo,
|
||||
}
|
||||
if cc.dopts.cp != nil {
|
||||
cs.cbuf = new(bytes.Buffer)
|
||||
}
|
||||
// Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination
|
||||
// when there is no pending I/O operations on this stream.
|
||||
go func() {
|
||||
select {
|
||||
case <-t.Error():
|
||||
// Incur transport error, simply exit.
|
||||
case <-s.Done():
|
||||
// TODO: The trace of the RPC is terminated here when there is no pending
|
||||
// I/O, which is probably not the optimal solution.
|
||||
if s.StatusCode() == codes.OK {
|
||||
cs.finish(nil)
|
||||
} else {
|
||||
cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc()))
|
||||
}
|
||||
cs.closeTransportStream(nil)
|
||||
case <-s.GoAway():
|
||||
cs.finish(errConnDrain)
|
||||
cs.closeTransportStream(errConnDrain)
|
||||
case <-s.Context().Done():
|
||||
err := s.Context().Err()
|
||||
cs.finish(err)
|
||||
cs.closeTransportStream(transport.ContextErr(err))
|
||||
}
|
||||
}()
|
||||
return cs, nil
|
||||
}
|
||||
|
||||
// clientStream implements a client side Stream.
|
||||
type clientStream struct {
|
||||
opts []CallOption
|
||||
c callInfo
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
p *parser
|
||||
desc *StreamDesc
|
||||
codec Codec
|
||||
cp Compressor
|
||||
cbuf *bytes.Buffer
|
||||
dc Decompressor
|
||||
|
||||
tracing bool // set to EnableTracing when the clientStream is created.
|
||||
|
||||
mu sync.Mutex
|
||||
put func()
|
||||
closed bool
|
||||
// trInfo.tr is set when the clientStream is created (if EnableTracing is true),
|
||||
// and is set to nil when the clientStream's finish method is called.
|
||||
trInfo traceInfo
|
||||
}
|
||||
|
||||
func (cs *clientStream) Context() context.Context {
|
||||
return cs.s.Context()
|
||||
}
|
||||
|
||||
func (cs *clientStream) Header() (metadata.MD, error) {
|
||||
m, err := cs.s.Header()
|
||||
if err != nil {
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
cs.closeTransportStream(err)
|
||||
}
|
||||
}
|
||||
return m, err
|
||||
}
|
||||
|
||||
func (cs *clientStream) Trailer() metadata.MD {
|
||||
return cs.s.Trailer()
|
||||
}
|
||||
|
||||
func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
||||
if cs.tracing {
|
||||
cs.mu.Lock()
|
||||
if cs.trInfo.tr != nil {
|
||||
cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
|
||||
}
|
||||
cs.mu.Unlock()
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cs.finish(err)
|
||||
}
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if err == io.EOF {
|
||||
// Specialize the process for server streaming. SendMesg is only called
|
||||
// once when creating the stream object. io.EOF needs to be skipped when
|
||||
// the rpc is early finished (before the stream object is created.).
|
||||
// TODO: It is probably better to move this into the generated code.
|
||||
if !cs.desc.ClientStreams && cs.desc.ServerStreams {
|
||||
err = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
cs.closeTransportStream(err)
|
||||
}
|
||||
err = toRPCErr(err)
|
||||
}()
|
||||
out, err := encode(cs.codec, m, cs.cp, cs.cbuf)
|
||||
defer func() {
|
||||
if cs.cbuf != nil {
|
||||
cs.cbuf.Reset()
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return transport.StreamErrorf(codes.Internal, "grpc: %v", err)
|
||||
}
|
||||
return cs.t.Write(cs.s, out, &transport.Options{Last: false})
|
||||
}
|
||||
|
||||
func (cs *clientStream) RecvMsg(m interface{}) (err error) {
|
||||
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32)
|
||||
defer func() {
|
||||
// err != nil indicates the termination of the stream.
|
||||
if err != nil {
|
||||
cs.finish(err)
|
||||
}
|
||||
}()
|
||||
if err == nil {
|
||||
if cs.tracing {
|
||||
cs.mu.Lock()
|
||||
if cs.trInfo.tr != nil {
|
||||
cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
|
||||
}
|
||||
cs.mu.Unlock()
|
||||
}
|
||||
if !cs.desc.ClientStreams || cs.desc.ServerStreams {
|
||||
return
|
||||
}
|
||||
// Special handling for client streaming rpc.
|
||||
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32)
|
||||
cs.closeTransportStream(err)
|
||||
if err == nil {
|
||||
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
|
||||
}
|
||||
if err == io.EOF {
|
||||
if cs.s.StatusCode() == codes.OK {
|
||||
cs.finish(err)
|
||||
return nil
|
||||
}
|
||||
return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
cs.closeTransportStream(err)
|
||||
}
|
||||
if err == io.EOF {
|
||||
if cs.s.StatusCode() == codes.OK {
|
||||
// Returns io.EOF to indicate the end of the stream.
|
||||
return
|
||||
}
|
||||
return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
|
||||
func (cs *clientStream) CloseSend() (err error) {
|
||||
err = cs.t.Write(cs.s, nil, &transport.Options{Last: true})
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cs.finish(err)
|
||||
}
|
||||
}()
|
||||
if err == nil || err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
cs.closeTransportStream(err)
|
||||
}
|
||||
err = toRPCErr(err)
|
||||
return
|
||||
}
|
||||
|
||||
func (cs *clientStream) closeTransportStream(err error) {
|
||||
cs.mu.Lock()
|
||||
if cs.closed {
|
||||
cs.mu.Unlock()
|
||||
return
|
||||
}
|
||||
cs.closed = true
|
||||
cs.mu.Unlock()
|
||||
cs.t.CloseStream(cs.s, err)
|
||||
}
|
||||
|
||||
func (cs *clientStream) finish(err error) {
|
||||
cs.mu.Lock()
|
||||
defer cs.mu.Unlock()
|
||||
for _, o := range cs.opts {
|
||||
o.after(&cs.c)
|
||||
}
|
||||
if cs.put != nil {
|
||||
cs.put()
|
||||
cs.put = nil
|
||||
}
|
||||
if !cs.tracing {
|
||||
return
|
||||
}
|
||||
if cs.trInfo.tr != nil {
|
||||
if err == nil || err == io.EOF {
|
||||
cs.trInfo.tr.LazyPrintf("RPC: [OK]")
|
||||
} else {
|
||||
cs.trInfo.tr.LazyPrintf("RPC: [%v]", err)
|
||||
cs.trInfo.tr.SetError()
|
||||
}
|
||||
cs.trInfo.tr.Finish()
|
||||
cs.trInfo.tr = nil
|
||||
}
|
||||
}
|
||||
|
||||
// ServerStream defines the interface a server stream has to satisfy.
|
||||
type ServerStream interface {
|
||||
// SendHeader sends the header metadata. It should not be called
|
||||
// after SendProto. It fails if called multiple times or if
|
||||
// called after SendProto.
|
||||
SendHeader(metadata.MD) error
|
||||
// SetTrailer sets the trailer metadata which will be sent with the
|
||||
// RPC status.
|
||||
SetTrailer(metadata.MD)
|
||||
Stream
|
||||
}
|
||||
|
||||
// serverStream implements a server side Stream.
|
||||
type serverStream struct {
|
||||
t transport.ServerTransport
|
||||
s *transport.Stream
|
||||
p *parser
|
||||
codec Codec
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
cbuf *bytes.Buffer
|
||||
maxMsgSize int
|
||||
statusCode codes.Code
|
||||
statusDesc string
|
||||
trInfo *traceInfo
|
||||
|
||||
mu sync.Mutex // protects trInfo.tr after the service handler runs.
|
||||
}
|
||||
|
||||
func (ss *serverStream) Context() context.Context {
|
||||
return ss.s.Context()
|
||||
}
|
||||
|
||||
func (ss *serverStream) SendHeader(md metadata.MD) error {
|
||||
return ss.t.WriteHeader(ss.s, md)
|
||||
}
|
||||
|
||||
func (ss *serverStream) SetTrailer(md metadata.MD) {
|
||||
if md.Len() == 0 {
|
||||
return
|
||||
}
|
||||
ss.s.SetTrailer(md)
|
||||
return
|
||||
}
|
||||
|
||||
func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
||||
defer func() {
|
||||
if ss.trInfo != nil {
|
||||
ss.mu.Lock()
|
||||
if ss.trInfo.tr != nil {
|
||||
if err == nil {
|
||||
ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
|
||||
} else {
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
ss.trInfo.tr.SetError()
|
||||
}
|
||||
}
|
||||
ss.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
out, err := encode(ss.codec, m, ss.cp, ss.cbuf)
|
||||
defer func() {
|
||||
if ss.cbuf != nil {
|
||||
ss.cbuf.Reset()
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
err = transport.StreamErrorf(codes.Internal, "grpc: %v", err)
|
||||
return err
|
||||
}
|
||||
return ss.t.Write(ss.s, out, &transport.Options{Last: false})
|
||||
}
|
||||
|
||||
func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
||||
defer func() {
|
||||
if ss.trInfo != nil {
|
||||
ss.mu.Lock()
|
||||
if ss.trInfo.tr != nil {
|
||||
if err == nil {
|
||||
ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
|
||||
} else if err != io.EOF {
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
ss.trInfo.tr.SetError()
|
||||
}
|
||||
}
|
||||
ss.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
return recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize)
|
||||
}
|
|
@ -0,0 +1,119 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2015, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/trace"
|
||||
)
|
||||
|
||||
// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
|
||||
// This should only be set before any RPCs are sent or received by this program.
|
||||
var EnableTracing = true
|
||||
|
||||
// methodFamily returns the trace family for the given method.
|
||||
// It turns "/pkg.Service/GetFoo" into "pkg.Service".
|
||||
func methodFamily(m string) string {
|
||||
m = strings.TrimPrefix(m, "/") // remove leading slash
|
||||
if i := strings.Index(m, "/"); i >= 0 {
|
||||
m = m[:i] // remove everything from second slash
|
||||
}
|
||||
if i := strings.LastIndex(m, "."); i >= 0 {
|
||||
m = m[i+1:] // cut down to last dotted component
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// traceInfo contains tracing information for an RPC.
|
||||
type traceInfo struct {
|
||||
tr trace.Trace
|
||||
firstLine firstLine
|
||||
}
|
||||
|
||||
// firstLine is the first line of an RPC trace.
|
||||
type firstLine struct {
|
||||
client bool // whether this is a client (outgoing) RPC
|
||||
remoteAddr net.Addr
|
||||
deadline time.Duration // may be zero
|
||||
}
|
||||
|
||||
func (f *firstLine) String() string {
|
||||
var line bytes.Buffer
|
||||
io.WriteString(&line, "RPC: ")
|
||||
if f.client {
|
||||
io.WriteString(&line, "to")
|
||||
} else {
|
||||
io.WriteString(&line, "from")
|
||||
}
|
||||
fmt.Fprintf(&line, " %v deadline:", f.remoteAddr)
|
||||
if f.deadline != 0 {
|
||||
fmt.Fprint(&line, f.deadline)
|
||||
} else {
|
||||
io.WriteString(&line, "none")
|
||||
}
|
||||
return line.String()
|
||||
}
|
||||
|
||||
// payload represents an RPC request or response payload.
|
||||
type payload struct {
|
||||
sent bool // whether this is an outgoing payload
|
||||
msg interface{} // e.g. a proto.Message
|
||||
// TODO(dsymonds): add stringifying info to codec, and limit how much we hold here?
|
||||
}
|
||||
|
||||
func (p payload) String() string {
|
||||
if p.sent {
|
||||
return fmt.Sprintf("sent: %v", p.msg)
|
||||
}
|
||||
return fmt.Sprintf("recv: %v", p.msg)
|
||||
}
|
||||
|
||||
type fmtStringer struct {
|
||||
format string
|
||||
a []interface{}
|
||||
}
|
||||
|
||||
func (f *fmtStringer) String() string {
|
||||
return fmt.Sprintf(f.format, f.a...)
|
||||
}
|
||||
|
||||
type stringer string
|
||||
|
||||
func (s stringer) String() string { return string(s) }
|
|
@ -0,0 +1,215 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
)
|
||||
|
||||
const (
|
||||
// The default value of flow control window size in HTTP2 spec.
|
||||
defaultWindowSize = 65535
|
||||
// The initial window size for flow control.
|
||||
initialWindowSize = defaultWindowSize // for an RPC
|
||||
initialConnWindowSize = defaultWindowSize * 16 // for a connection
|
||||
)
|
||||
|
||||
// The following defines various control items which could flow through
|
||||
// the control buffer of transport. They represent different aspects of
|
||||
// control tasks, e.g., flow control, settings, streaming resetting, etc.
|
||||
type windowUpdate struct {
|
||||
streamID uint32
|
||||
increment uint32
|
||||
}
|
||||
|
||||
func (*windowUpdate) item() {}
|
||||
|
||||
type settings struct {
|
||||
ack bool
|
||||
ss []http2.Setting
|
||||
}
|
||||
|
||||
func (*settings) item() {}
|
||||
|
||||
type resetStream struct {
|
||||
streamID uint32
|
||||
code http2.ErrCode
|
||||
}
|
||||
|
||||
func (*resetStream) item() {}
|
||||
|
||||
type goAway struct {
|
||||
}
|
||||
|
||||
func (*goAway) item() {}
|
||||
|
||||
type flushIO struct {
|
||||
}
|
||||
|
||||
func (*flushIO) item() {}
|
||||
|
||||
type ping struct {
|
||||
ack bool
|
||||
data [8]byte
|
||||
}
|
||||
|
||||
func (*ping) item() {}
|
||||
|
||||
// quotaPool is a pool which accumulates the quota and sends it to acquire()
|
||||
// when it is available.
|
||||
type quotaPool struct {
|
||||
c chan int
|
||||
|
||||
mu sync.Mutex
|
||||
quota int
|
||||
}
|
||||
|
||||
// newQuotaPool creates a quotaPool which has quota q available to consume.
|
||||
func newQuotaPool(q int) *quotaPool {
|
||||
qb := "aPool{
|
||||
c: make(chan int, 1),
|
||||
}
|
||||
if q > 0 {
|
||||
qb.c <- q
|
||||
} else {
|
||||
qb.quota = q
|
||||
}
|
||||
return qb
|
||||
}
|
||||
|
||||
// add adds n to the available quota and tries to send it on acquire.
|
||||
func (qb *quotaPool) add(n int) {
|
||||
qb.mu.Lock()
|
||||
defer qb.mu.Unlock()
|
||||
qb.quota += n
|
||||
if qb.quota <= 0 {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case qb.c <- qb.quota:
|
||||
qb.quota = 0
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// cancel cancels the pending quota sent on acquire, if any.
|
||||
func (qb *quotaPool) cancel() {
|
||||
qb.mu.Lock()
|
||||
defer qb.mu.Unlock()
|
||||
select {
|
||||
case n := <-qb.c:
|
||||
qb.quota += n
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// reset cancels the pending quota sent on acquired, incremented by v and sends
|
||||
// it back on acquire.
|
||||
func (qb *quotaPool) reset(v int) {
|
||||
qb.mu.Lock()
|
||||
defer qb.mu.Unlock()
|
||||
select {
|
||||
case n := <-qb.c:
|
||||
qb.quota += n
|
||||
default:
|
||||
}
|
||||
qb.quota += v
|
||||
if qb.quota <= 0 {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case qb.c <- qb.quota:
|
||||
qb.quota = 0
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// acquire returns the channel on which available quota amounts are sent.
|
||||
func (qb *quotaPool) acquire() <-chan int {
|
||||
return qb.c
|
||||
}
|
||||
|
||||
// inFlow deals with inbound flow control
|
||||
type inFlow struct {
|
||||
// The inbound flow control limit for pending data.
|
||||
limit uint32
|
||||
|
||||
mu sync.Mutex
|
||||
// pendingData is the overall data which have been received but not been
|
||||
// consumed by applications.
|
||||
pendingData uint32
|
||||
// The amount of data the application has consumed but grpc has not sent
|
||||
// window update for them. Used to reduce window update frequency.
|
||||
pendingUpdate uint32
|
||||
}
|
||||
|
||||
// onData is invoked when some data frame is received. It updates pendingData.
|
||||
func (f *inFlow) onData(n uint32) error {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
f.pendingData += n
|
||||
if f.pendingData+f.pendingUpdate > f.limit {
|
||||
return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// onRead is invoked when the application reads the data. It returns the window size
|
||||
// to be sent to the peer.
|
||||
func (f *inFlow) onRead(n uint32) uint32 {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
if f.pendingData == 0 {
|
||||
return 0
|
||||
}
|
||||
f.pendingData -= n
|
||||
f.pendingUpdate += n
|
||||
if f.pendingUpdate >= f.limit/4 {
|
||||
wu := f.pendingUpdate
|
||||
f.pendingUpdate = 0
|
||||
return wu
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f *inFlow) resetPendingData() uint32 {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
n := f.pendingData
|
||||
f.pendingData = 0
|
||||
return n
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
// +build go1.6,!go1.7
|
||||
|
||||
/*
|
||||
* Copyright 2016, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// dialContext connects to the address on the named network.
|
||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
// +build go1.7
|
||||
|
||||
/*
|
||||
* Copyright 2016, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// dialContext connects to the address on the named network.
|
||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return (&net.Dialer{}).DialContext(ctx, network, address)
|
||||
}
|
|
@ -0,0 +1,397 @@
|
|||
/*
|
||||
* Copyright 2016, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
// This file is the implementation of a gRPC server using HTTP/2 which
|
||||
// uses the standard Go http2 Server implementation (via the
|
||||
// http.Handler interface), rather than speaking low-level HTTP/2
|
||||
// frames itself. It is the implementation of *grpc.Server.ServeHTTP.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
)
|
||||
|
||||
// NewServerHandlerTransport returns a ServerTransport handling gRPC
|
||||
// from inside an http.Handler. It requires that the http Server
|
||||
// supports HTTP/2.
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) {
|
||||
if r.ProtoMajor != 2 {
|
||||
return nil, errors.New("gRPC requires HTTP/2")
|
||||
}
|
||||
if r.Method != "POST" {
|
||||
return nil, errors.New("invalid gRPC request method")
|
||||
}
|
||||
if !validContentType(r.Header.Get("Content-Type")) {
|
||||
return nil, errors.New("invalid gRPC request content-type")
|
||||
}
|
||||
if _, ok := w.(http.Flusher); !ok {
|
||||
return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
|
||||
}
|
||||
if _, ok := w.(http.CloseNotifier); !ok {
|
||||
return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier")
|
||||
}
|
||||
|
||||
st := &serverHandlerTransport{
|
||||
rw: w,
|
||||
req: r,
|
||||
closedCh: make(chan struct{}),
|
||||
writes: make(chan func()),
|
||||
}
|
||||
|
||||
if v := r.Header.Get("grpc-timeout"); v != "" {
|
||||
to, err := decodeTimeout(v)
|
||||
if err != nil {
|
||||
return nil, StreamErrorf(codes.Internal, "malformed time-out: %v", err)
|
||||
}
|
||||
st.timeoutSet = true
|
||||
st.timeout = to
|
||||
}
|
||||
|
||||
var metakv []string
|
||||
if r.Host != "" {
|
||||
metakv = append(metakv, ":authority", r.Host)
|
||||
}
|
||||
for k, vv := range r.Header {
|
||||
k = strings.ToLower(k)
|
||||
if isReservedHeader(k) && !isWhitelistedPseudoHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
if k == "user-agent" {
|
||||
// user-agent is special. Copying logic of http_util.go.
|
||||
if i := strings.LastIndex(v, " "); i == -1 {
|
||||
// There is no application user agent string being set
|
||||
continue
|
||||
} else {
|
||||
v = v[:i]
|
||||
}
|
||||
}
|
||||
metakv = append(metakv, k, v)
|
||||
}
|
||||
}
|
||||
st.headerMD = metadata.Pairs(metakv...)
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// serverHandlerTransport is an implementation of ServerTransport
|
||||
// which replies to exactly one gRPC request (exactly one HTTP request),
|
||||
// using the net/http.Handler interface. This http.Handler is guaranteed
|
||||
// at this point to be speaking over HTTP/2, so it's able to speak valid
|
||||
// gRPC.
|
||||
type serverHandlerTransport struct {
|
||||
rw http.ResponseWriter
|
||||
req *http.Request
|
||||
timeoutSet bool
|
||||
timeout time.Duration
|
||||
didCommonHeaders bool
|
||||
|
||||
headerMD metadata.MD
|
||||
|
||||
closeOnce sync.Once
|
||||
closedCh chan struct{} // closed on Close
|
||||
|
||||
// writes is a channel of code to run serialized in the
|
||||
// ServeHTTP (HandleStreams) goroutine. The channel is closed
|
||||
// when WriteStatus is called.
|
||||
writes chan func()
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Close() error {
|
||||
ht.closeOnce.Do(ht.closeCloseChanOnce)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
|
||||
|
||||
func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
|
||||
|
||||
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
|
||||
// the empty string if unknown.
|
||||
type strAddr string
|
||||
|
||||
func (a strAddr) Network() string {
|
||||
if a != "" {
|
||||
// Per the documentation on net/http.Request.RemoteAddr, if this is
|
||||
// set, it's set to the IP:port of the peer (hence, TCP):
|
||||
// https://golang.org/pkg/net/http/#Request
|
||||
//
|
||||
// If we want to support Unix sockets later, we can
|
||||
// add our own grpc-specific convention within the
|
||||
// grpc codebase to set RemoteAddr to a different
|
||||
// format, or probably better: we can attach it to the
|
||||
// context and use that from serverHandlerTransport.RemoteAddr.
|
||||
return "tcp"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (a strAddr) String() string { return string(a) }
|
||||
|
||||
// do runs fn in the ServeHTTP goroutine.
|
||||
func (ht *serverHandlerTransport) do(fn func()) error {
|
||||
select {
|
||||
case ht.writes <- fn:
|
||||
return nil
|
||||
case <-ht.closedCh:
|
||||
return ErrConnClosing
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
|
||||
err := ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
|
||||
// And flush, in case no header or body has been sent yet.
|
||||
// This forces a separation of headers and trailers if this is the
|
||||
// first call (for example, in end2end tests's TestNoService).
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
|
||||
h := ht.rw.Header()
|
||||
h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode))
|
||||
if statusDesc != "" {
|
||||
h.Set("Grpc-Message", encodeGrpcMessage(statusDesc))
|
||||
}
|
||||
if md := s.Trailer(); len(md) > 0 {
|
||||
for k, vv := range md {
|
||||
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
// http2 ResponseWriter mechanism to
|
||||
// send undeclared Trailers after the
|
||||
// headers have possibly been written.
|
||||
h.Add(http2.TrailerPrefix+k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
close(ht.writes)
|
||||
return err
|
||||
}
|
||||
|
||||
// writeCommonHeaders sets common headers on the first write
|
||||
// call (Write, WriteHeader, or WriteStatus).
|
||||
func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
|
||||
if ht.didCommonHeaders {
|
||||
return
|
||||
}
|
||||
ht.didCommonHeaders = true
|
||||
|
||||
h := ht.rw.Header()
|
||||
h["Date"] = nil // suppress Date to make tests happy; TODO: restore
|
||||
h.Set("Content-Type", "application/grpc")
|
||||
|
||||
// Predeclare trailers we'll set later in WriteStatus (after the body).
|
||||
// This is a SHOULD in the HTTP RFC, and the way you add (known)
|
||||
// Trailers per the net/http.ResponseWriter contract.
|
||||
// See https://golang.org/pkg/net/http/#ResponseWriter
|
||||
// and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
|
||||
h.Add("Trailer", "Grpc-Status")
|
||||
h.Add("Trailer", "Grpc-Message")
|
||||
|
||||
if s.sendCompress != "" {
|
||||
h.Set("Grpc-Encoding", s.sendCompress)
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error {
|
||||
return ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
ht.rw.Write(data)
|
||||
if !opts.Delay {
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
return ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
h := ht.rw.Header()
|
||||
for k, vv := range md {
|
||||
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
h.Add(k, v)
|
||||
}
|
||||
}
|
||||
ht.rw.WriteHeader(200)
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
})
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) {
|
||||
// With this transport type there will be exactly 1 stream: this HTTP request.
|
||||
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
if ht.timeoutSet {
|
||||
ctx, cancel = context.WithTimeout(context.Background(), ht.timeout)
|
||||
} else {
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
}
|
||||
|
||||
// requestOver is closed when either the request's context is done
|
||||
// or the status has been written via WriteStatus.
|
||||
requestOver := make(chan struct{})
|
||||
|
||||
// clientGone receives a single value if peer is gone, either
|
||||
// because the underlying connection is dead or because the
|
||||
// peer sends an http2 RST_STREAM.
|
||||
clientGone := ht.rw.(http.CloseNotifier).CloseNotify()
|
||||
go func() {
|
||||
select {
|
||||
case <-requestOver:
|
||||
return
|
||||
case <-ht.closedCh:
|
||||
case <-clientGone:
|
||||
}
|
||||
cancel()
|
||||
}()
|
||||
|
||||
req := ht.req
|
||||
|
||||
s := &Stream{
|
||||
id: 0, // irrelevant
|
||||
windowHandler: func(int) {}, // nothing
|
||||
cancel: cancel,
|
||||
buf: newRecvBuffer(),
|
||||
st: ht,
|
||||
method: req.URL.Path,
|
||||
recvCompress: req.Header.Get("grpc-encoding"),
|
||||
}
|
||||
pr := &peer.Peer{
|
||||
Addr: ht.RemoteAddr(),
|
||||
}
|
||||
if req.TLS != nil {
|
||||
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
|
||||
}
|
||||
ctx = metadata.NewContext(ctx, ht.headerMD)
|
||||
ctx = peer.NewContext(ctx, pr)
|
||||
s.ctx = newContextWithStream(ctx, s)
|
||||
s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf}
|
||||
|
||||
// readerDone is closed when the Body.Read-ing goroutine exits.
|
||||
readerDone := make(chan struct{})
|
||||
go func() {
|
||||
defer close(readerDone)
|
||||
|
||||
// TODO: minimize garbage, optimize recvBuffer code/ownership
|
||||
const readSize = 8196
|
||||
for buf := make([]byte, readSize); ; {
|
||||
n, err := req.Body.Read(buf)
|
||||
if n > 0 {
|
||||
s.buf.put(&recvMsg{data: buf[:n:n]})
|
||||
buf = buf[n:]
|
||||
}
|
||||
if err != nil {
|
||||
s.buf.put(&recvMsg{err: mapRecvMsgError(err)})
|
||||
return
|
||||
}
|
||||
if len(buf) == 0 {
|
||||
buf = make([]byte, readSize)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// startStream is provided by the *grpc.Server's serveStreams.
|
||||
// It starts a goroutine serving s and exits immediately.
|
||||
// The goroutine that is started is the one that then calls
|
||||
// into ht, calling WriteHeader, Write, WriteStatus, Close, etc.
|
||||
startStream(s)
|
||||
|
||||
ht.runStream()
|
||||
close(requestOver)
|
||||
|
||||
// Wait for reading goroutine to finish.
|
||||
req.Body.Close()
|
||||
<-readerDone
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) runStream() {
|
||||
for {
|
||||
select {
|
||||
case fn, ok := <-ht.writes:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
fn()
|
||||
case <-ht.closedCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Drain() {
|
||||
panic("Drain() is not implemented")
|
||||
}
|
||||
|
||||
// mapRecvMsgError returns the non-nil err into the appropriate
|
||||
// error value as expected by callers of *grpc.parser.recvMsg.
|
||||
// In particular, in can only be:
|
||||
// * io.EOF
|
||||
// * io.ErrUnexpectedEOF
|
||||
// * of type transport.ConnectionError
|
||||
// * of type transport.StreamError
|
||||
func mapRecvMsgError(err error) error {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
return err
|
||||
}
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
if code, ok := http2ErrConvTab[se.Code]; ok {
|
||||
return StreamError{
|
||||
Code: code,
|
||||
Desc: se.Error(),
|
||||
}
|
||||
}
|
||||
}
|
||||
return ConnectionError{Desc: err.Error()}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,774 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
)
|
||||
|
||||
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
|
||||
// the stream's state.
|
||||
var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
|
||||
|
||||
// http2Server implements the ServerTransport interface with HTTP2.
|
||||
type http2Server struct {
|
||||
conn net.Conn
|
||||
maxStreamID uint32 // max stream ID ever seen
|
||||
authInfo credentials.AuthInfo // auth info about the connection
|
||||
// writableChan synchronizes write access to the transport.
|
||||
// A writer acquires the write lock by receiving a value on writableChan
|
||||
// and releases it by sending on writableChan.
|
||||
writableChan chan int
|
||||
// shutdownChan is closed when Close is called.
|
||||
// Blocking operations should select on shutdownChan to avoid
|
||||
// blocking forever after Close.
|
||||
shutdownChan chan struct{}
|
||||
framer *framer
|
||||
hBuf *bytes.Buffer // the buffer for HPACK encoding
|
||||
hEnc *hpack.Encoder // HPACK encoder
|
||||
|
||||
// The max number of concurrent streams.
|
||||
maxStreams uint32
|
||||
// controlBuf delivers all the control related tasks (e.g., window
|
||||
// updates, reset streams, and various settings) to the controller.
|
||||
controlBuf *recvBuffer
|
||||
fc *inFlow
|
||||
// sendQuotaPool provides flow control to outbound message.
|
||||
sendQuotaPool *quotaPool
|
||||
|
||||
mu sync.Mutex // guard the following
|
||||
state transportState
|
||||
activeStreams map[uint32]*Stream
|
||||
// the per-stream outbound flow control window size set by the peer.
|
||||
streamSendQuota uint32
|
||||
}
|
||||
|
||||
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
|
||||
// returned if something goes wrong.
|
||||
func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (_ ServerTransport, err error) {
|
||||
framer := newFramer(conn)
|
||||
// Send initial settings as connection preface to client.
|
||||
var settings []http2.Setting
|
||||
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
|
||||
// permitted in the HTTP2 spec.
|
||||
if maxStreams == 0 {
|
||||
maxStreams = math.MaxUint32
|
||||
} else {
|
||||
settings = append(settings, http2.Setting{
|
||||
ID: http2.SettingMaxConcurrentStreams,
|
||||
Val: maxStreams,
|
||||
})
|
||||
}
|
||||
if initialWindowSize != defaultWindowSize {
|
||||
settings = append(settings, http2.Setting{
|
||||
ID: http2.SettingInitialWindowSize,
|
||||
Val: uint32(initialWindowSize)})
|
||||
}
|
||||
if err := framer.writeSettings(true, settings...); err != nil {
|
||||
return nil, ConnectionErrorf(true, err, "transport: %v", err)
|
||||
}
|
||||
// Adjust the connection flow control window if needed.
|
||||
if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
|
||||
if err := framer.writeWindowUpdate(true, 0, delta); err != nil {
|
||||
return nil, ConnectionErrorf(true, err, "transport: %v", err)
|
||||
}
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
t := &http2Server{
|
||||
conn: conn,
|
||||
authInfo: authInfo,
|
||||
framer: framer,
|
||||
hBuf: &buf,
|
||||
hEnc: hpack.NewEncoder(&buf),
|
||||
maxStreams: maxStreams,
|
||||
controlBuf: newRecvBuffer(),
|
||||
fc: &inFlow{limit: initialConnWindowSize},
|
||||
sendQuotaPool: newQuotaPool(defaultWindowSize),
|
||||
state: reachable,
|
||||
writableChan: make(chan int, 1),
|
||||
shutdownChan: make(chan struct{}),
|
||||
activeStreams: make(map[uint32]*Stream),
|
||||
streamSendQuota: defaultWindowSize,
|
||||
}
|
||||
go t.controller()
|
||||
t.writableChan <- 0
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// operateHeader takes action on the decoded headers.
|
||||
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) (close bool) {
|
||||
buf := newRecvBuffer()
|
||||
s := &Stream{
|
||||
id: frame.Header().StreamID,
|
||||
st: t,
|
||||
buf: buf,
|
||||
fc: &inFlow{limit: initialWindowSize},
|
||||
}
|
||||
|
||||
var state decodeState
|
||||
for _, hf := range frame.Fields {
|
||||
state.processHeaderField(hf)
|
||||
}
|
||||
if err := state.err; err != nil {
|
||||
if se, ok := err.(StreamError); ok {
|
||||
t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if frame.StreamEnded() {
|
||||
// s is just created by the caller. No lock needed.
|
||||
s.state = streamReadDone
|
||||
}
|
||||
s.recvCompress = state.encoding
|
||||
if state.timeoutSet {
|
||||
s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout)
|
||||
} else {
|
||||
s.ctx, s.cancel = context.WithCancel(context.TODO())
|
||||
}
|
||||
pr := &peer.Peer{
|
||||
Addr: t.conn.RemoteAddr(),
|
||||
}
|
||||
// Attach Auth info if there is any.
|
||||
if t.authInfo != nil {
|
||||
pr.AuthInfo = t.authInfo
|
||||
}
|
||||
s.ctx = peer.NewContext(s.ctx, pr)
|
||||
// Cache the current stream to the context so that the server application
|
||||
// can find out. Required when the server wants to send some metadata
|
||||
// back to the client (unary call only).
|
||||
s.ctx = newContextWithStream(s.ctx, s)
|
||||
// Attach the received metadata to the context.
|
||||
if len(state.mdata) > 0 {
|
||||
s.ctx = metadata.NewContext(s.ctx, state.mdata)
|
||||
}
|
||||
|
||||
s.dec = &recvBufferReader{
|
||||
ctx: s.ctx,
|
||||
recv: s.buf,
|
||||
}
|
||||
s.recvCompress = state.encoding
|
||||
s.method = state.method
|
||||
t.mu.Lock()
|
||||
if t.state != reachable {
|
||||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if uint32(len(t.activeStreams)) >= t.maxStreams {
|
||||
t.mu.Unlock()
|
||||
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
|
||||
return
|
||||
}
|
||||
if s.id%2 != 1 || s.id <= t.maxStreamID {
|
||||
t.mu.Unlock()
|
||||
// illegal gRPC stream id.
|
||||
grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", s.id)
|
||||
return true
|
||||
}
|
||||
t.maxStreamID = s.id
|
||||
s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
|
||||
t.activeStreams[s.id] = s
|
||||
t.mu.Unlock()
|
||||
s.windowHandler = func(n int) {
|
||||
t.updateWindow(s, uint32(n))
|
||||
}
|
||||
handle(s)
|
||||
return
|
||||
}
|
||||
|
||||
// HandleStreams receives incoming streams using the given handler. This is
|
||||
// typically run in a separate goroutine.
|
||||
func (t *http2Server) HandleStreams(handle func(*Stream)) {
|
||||
// Check the validity of client preface.
|
||||
preface := make([]byte, len(clientPreface))
|
||||
if _, err := io.ReadFull(t.conn, preface); err != nil {
|
||||
grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
if !bytes.Equal(preface, clientPreface) {
|
||||
grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
|
||||
frame, err := t.framer.readFrame()
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err)
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
sf, ok := frame.(*http2.SettingsFrame)
|
||||
if !ok {
|
||||
grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
t.handleSettings(sf)
|
||||
|
||||
for {
|
||||
frame, err := t.framer.readFrame()
|
||||
if err != nil {
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
t.mu.Lock()
|
||||
s := t.activeStreams[se.StreamID]
|
||||
t.mu.Unlock()
|
||||
if s != nil {
|
||||
t.closeStream(s)
|
||||
}
|
||||
t.controlBuf.put(&resetStream{se.StreamID, se.Code})
|
||||
continue
|
||||
}
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err)
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
switch frame := frame.(type) {
|
||||
case *http2.MetaHeadersFrame:
|
||||
if t.operateHeaders(frame, handle) {
|
||||
t.Close()
|
||||
break
|
||||
}
|
||||
case *http2.DataFrame:
|
||||
t.handleData(frame)
|
||||
case *http2.RSTStreamFrame:
|
||||
t.handleRSTStream(frame)
|
||||
case *http2.SettingsFrame:
|
||||
t.handleSettings(frame)
|
||||
case *http2.PingFrame:
|
||||
t.handlePing(frame)
|
||||
case *http2.WindowUpdateFrame:
|
||||
t.handleWindowUpdate(frame)
|
||||
case *http2.GoAwayFrame:
|
||||
// TODO: Handle GoAway from the client appropriately.
|
||||
default:
|
||||
grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.activeStreams == nil {
|
||||
// The transport is closing.
|
||||
return nil, false
|
||||
}
|
||||
s, ok := t.activeStreams[f.Header().StreamID]
|
||||
if !ok {
|
||||
// The stream is already done.
|
||||
return nil, false
|
||||
}
|
||||
return s, true
|
||||
}
|
||||
|
||||
// updateWindow adjusts the inbound quota for the stream and the transport.
|
||||
// Window updates will deliver to the controller for sending when
|
||||
// the cumulative quota exceeds the corresponding threshold.
|
||||
func (t *http2Server) updateWindow(s *Stream, n uint32) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.state == streamDone {
|
||||
return
|
||||
}
|
||||
if w := t.fc.onRead(n); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, w})
|
||||
}
|
||||
if w := s.fc.onRead(n); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{s.id, w})
|
||||
}
|
||||
}
|
||||
|
||||
func (t *http2Server) handleData(f *http2.DataFrame) {
|
||||
size := len(f.Data())
|
||||
if err := t.fc.onData(uint32(size)); err != nil {
|
||||
grpclog.Printf("transport: http2Server %v", err)
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
// Select the right stream to dispatch.
|
||||
s, ok := t.getStream(f)
|
||||
if !ok {
|
||||
if w := t.fc.onRead(uint32(size)); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, w})
|
||||
}
|
||||
return
|
||||
}
|
||||
if size > 0 {
|
||||
s.mu.Lock()
|
||||
if s.state == streamDone {
|
||||
s.mu.Unlock()
|
||||
// The stream has been closed. Release the corresponding quota.
|
||||
if w := t.fc.onRead(uint32(size)); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, w})
|
||||
}
|
||||
return
|
||||
}
|
||||
if err := s.fc.onData(uint32(size)); err != nil {
|
||||
s.mu.Unlock()
|
||||
t.closeStream(s)
|
||||
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
|
||||
return
|
||||
}
|
||||
s.mu.Unlock()
|
||||
// TODO(bradfitz, zhaoq): A copy is required here because there is no
|
||||
// guarantee f.Data() is consumed before the arrival of next frame.
|
||||
// Can this copy be eliminated?
|
||||
data := make([]byte, size)
|
||||
copy(data, f.Data())
|
||||
s.write(recvMsg{data: data})
|
||||
}
|
||||
if f.Header().Flags.Has(http2.FlagDataEndStream) {
|
||||
// Received the end of stream from the client.
|
||||
s.mu.Lock()
|
||||
if s.state != streamDone {
|
||||
s.state = streamReadDone
|
||||
}
|
||||
s.mu.Unlock()
|
||||
s.write(recvMsg{err: io.EOF})
|
||||
}
|
||||
}
|
||||
|
||||
func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
|
||||
s, ok := t.getStream(f)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
t.closeStream(s)
|
||||
}
|
||||
|
||||
func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
|
||||
if f.IsAck() {
|
||||
return
|
||||
}
|
||||
var ss []http2.Setting
|
||||
f.ForeachSetting(func(s http2.Setting) error {
|
||||
ss = append(ss, s)
|
||||
return nil
|
||||
})
|
||||
// The settings will be applied once the ack is sent.
|
||||
t.controlBuf.put(&settings{ack: true, ss: ss})
|
||||
}
|
||||
|
||||
func (t *http2Server) handlePing(f *http2.PingFrame) {
|
||||
pingAck := &ping{ack: true}
|
||||
copy(pingAck.data[:], f.Data[:])
|
||||
t.controlBuf.put(pingAck)
|
||||
}
|
||||
|
||||
func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
|
||||
id := f.Header().StreamID
|
||||
incr := f.Increment
|
||||
if id == 0 {
|
||||
t.sendQuotaPool.add(int(incr))
|
||||
return
|
||||
}
|
||||
if s, ok := t.getStream(f); ok {
|
||||
s.sendQuotaPool.add(int(incr))
|
||||
}
|
||||
}
|
||||
|
||||
func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error {
|
||||
first := true
|
||||
endHeaders := false
|
||||
var err error
|
||||
// Sends the headers in a single batch.
|
||||
for !endHeaders {
|
||||
size := t.hBuf.Len()
|
||||
if size > http2MaxFrameLen {
|
||||
size = http2MaxFrameLen
|
||||
} else {
|
||||
endHeaders = true
|
||||
}
|
||||
if first {
|
||||
p := http2.HeadersFrameParam{
|
||||
StreamID: s.id,
|
||||
BlockFragment: b.Next(size),
|
||||
EndStream: endStream,
|
||||
EndHeaders: endHeaders,
|
||||
}
|
||||
err = t.framer.writeHeaders(endHeaders, p)
|
||||
first = false
|
||||
} else {
|
||||
err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size))
|
||||
}
|
||||
if err != nil {
|
||||
t.Close()
|
||||
return ConnectionErrorf(true, err, "transport: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteHeader sends the header metedata md back to the client.
|
||||
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
s.mu.Lock()
|
||||
if s.headerOk || s.state == streamDone {
|
||||
s.mu.Unlock()
|
||||
return ErrIllegalHeaderWrite
|
||||
}
|
||||
s.headerOk = true
|
||||
s.mu.Unlock()
|
||||
if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
|
||||
return err
|
||||
}
|
||||
t.hBuf.Reset()
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
||||
if s.sendCompress != "" {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
|
||||
}
|
||||
for k, v := range md {
|
||||
if isReservedHeader(k) {
|
||||
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
|
||||
continue
|
||||
}
|
||||
for _, entry := range v {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
|
||||
}
|
||||
}
|
||||
if err := t.writeHeaders(s, t.hBuf, false); err != nil {
|
||||
return err
|
||||
}
|
||||
t.writableChan <- 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteStatus sends stream status to the client and terminates the stream.
|
||||
// There is no further I/O operations being able to perform on this stream.
|
||||
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
|
||||
// OK is adopted.
|
||||
func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
|
||||
var headersSent bool
|
||||
s.mu.Lock()
|
||||
if s.state == streamDone {
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
if s.headerOk {
|
||||
headersSent = true
|
||||
}
|
||||
s.mu.Unlock()
|
||||
if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
|
||||
return err
|
||||
}
|
||||
t.hBuf.Reset()
|
||||
if !headersSent {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
||||
}
|
||||
t.hEnc.WriteField(
|
||||
hpack.HeaderField{
|
||||
Name: "grpc-status",
|
||||
Value: strconv.Itoa(int(statusCode)),
|
||||
})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(statusDesc)})
|
||||
// Attach the trailer metadata.
|
||||
for k, v := range s.trailer {
|
||||
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, entry := range v {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
|
||||
}
|
||||
}
|
||||
if err := t.writeHeaders(s, t.hBuf, true); err != nil {
|
||||
t.Close()
|
||||
return err
|
||||
}
|
||||
t.closeStream(s)
|
||||
t.writableChan <- 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
|
||||
// is returns if it fails (e.g., framing error, transport error).
|
||||
func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
|
||||
// TODO(zhaoq): Support multi-writers for a single stream.
|
||||
var writeHeaderFrame bool
|
||||
s.mu.Lock()
|
||||
if s.state == streamDone {
|
||||
s.mu.Unlock()
|
||||
return StreamErrorf(codes.Unknown, "the stream has been done")
|
||||
}
|
||||
if !s.headerOk {
|
||||
writeHeaderFrame = true
|
||||
s.headerOk = true
|
||||
}
|
||||
s.mu.Unlock()
|
||||
if writeHeaderFrame {
|
||||
if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
|
||||
return err
|
||||
}
|
||||
t.hBuf.Reset()
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
||||
if s.sendCompress != "" {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
|
||||
}
|
||||
p := http2.HeadersFrameParam{
|
||||
StreamID: s.id,
|
||||
BlockFragment: t.hBuf.Bytes(),
|
||||
EndHeaders: true,
|
||||
}
|
||||
if err := t.framer.writeHeaders(false, p); err != nil {
|
||||
t.Close()
|
||||
return ConnectionErrorf(true, err, "transport: %v", err)
|
||||
}
|
||||
t.writableChan <- 0
|
||||
}
|
||||
r := bytes.NewBuffer(data)
|
||||
for {
|
||||
if r.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
size := http2MaxFrameLen
|
||||
s.sendQuotaPool.add(0)
|
||||
// Wait until the stream has some quota to send the data.
|
||||
sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.sendQuotaPool.add(0)
|
||||
// Wait until the transport has some quota to send the data.
|
||||
tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire())
|
||||
if err != nil {
|
||||
if _, ok := err.(StreamError); ok {
|
||||
t.sendQuotaPool.cancel()
|
||||
}
|
||||
return err
|
||||
}
|
||||
if sq < size {
|
||||
size = sq
|
||||
}
|
||||
if tq < size {
|
||||
size = tq
|
||||
}
|
||||
p := r.Next(size)
|
||||
ps := len(p)
|
||||
if ps < sq {
|
||||
// Overbooked stream quota. Return it back.
|
||||
s.sendQuotaPool.add(sq - ps)
|
||||
}
|
||||
if ps < tq {
|
||||
// Overbooked transport quota. Return it back.
|
||||
t.sendQuotaPool.add(tq - ps)
|
||||
}
|
||||
t.framer.adjustNumWriters(1)
|
||||
// Got some quota. Try to acquire writing privilege on the
|
||||
// transport.
|
||||
if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
|
||||
if _, ok := err.(StreamError); ok {
|
||||
// Return the connection quota back.
|
||||
t.sendQuotaPool.add(ps)
|
||||
}
|
||||
if t.framer.adjustNumWriters(-1) == 0 {
|
||||
// This writer is the last one in this batch and has the
|
||||
// responsibility to flush the buffered frames. It queues
|
||||
// a flush request to controlBuf instead of flushing directly
|
||||
// in order to avoid the race with other writing or flushing.
|
||||
t.controlBuf.put(&flushIO{})
|
||||
}
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
t.sendQuotaPool.add(ps)
|
||||
if t.framer.adjustNumWriters(-1) == 0 {
|
||||
t.controlBuf.put(&flushIO{})
|
||||
}
|
||||
t.writableChan <- 0
|
||||
return ContextErr(s.ctx.Err())
|
||||
default:
|
||||
}
|
||||
var forceFlush bool
|
||||
if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last {
|
||||
forceFlush = true
|
||||
}
|
||||
if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil {
|
||||
t.Close()
|
||||
return ConnectionErrorf(true, err, "transport: %v", err)
|
||||
}
|
||||
if t.framer.adjustNumWriters(-1) == 0 {
|
||||
t.framer.flushWrite()
|
||||
}
|
||||
t.writableChan <- 0
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (t *http2Server) applySettings(ss []http2.Setting) {
|
||||
for _, s := range ss {
|
||||
if s.ID == http2.SettingInitialWindowSize {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
for _, stream := range t.activeStreams {
|
||||
stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota))
|
||||
}
|
||||
t.streamSendQuota = s.Val
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// controller running in a separate goroutine takes charge of sending control
|
||||
// frames (e.g., window update, reset stream, setting, etc.) to the server.
|
||||
func (t *http2Server) controller() {
|
||||
for {
|
||||
select {
|
||||
case i := <-t.controlBuf.get():
|
||||
t.controlBuf.load()
|
||||
select {
|
||||
case <-t.writableChan:
|
||||
switch i := i.(type) {
|
||||
case *windowUpdate:
|
||||
t.framer.writeWindowUpdate(true, i.streamID, i.increment)
|
||||
case *settings:
|
||||
if i.ack {
|
||||
t.framer.writeSettingsAck(true)
|
||||
t.applySettings(i.ss)
|
||||
} else {
|
||||
t.framer.writeSettings(true, i.ss...)
|
||||
}
|
||||
case *resetStream:
|
||||
t.framer.writeRSTStream(true, i.streamID, i.code)
|
||||
case *goAway:
|
||||
t.mu.Lock()
|
||||
if t.state == closing {
|
||||
t.mu.Unlock()
|
||||
// The transport is closing.
|
||||
return
|
||||
}
|
||||
sid := t.maxStreamID
|
||||
t.state = draining
|
||||
t.mu.Unlock()
|
||||
t.framer.writeGoAway(true, sid, http2.ErrCodeNo, nil)
|
||||
case *flushIO:
|
||||
t.framer.flushWrite()
|
||||
case *ping:
|
||||
t.framer.writePing(true, i.ack, i.data)
|
||||
default:
|
||||
grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i)
|
||||
}
|
||||
t.writableChan <- 0
|
||||
continue
|
||||
case <-t.shutdownChan:
|
||||
return
|
||||
}
|
||||
case <-t.shutdownChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close starts shutting down the http2Server transport.
|
||||
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
|
||||
// could cause some resource issue. Revisit this later.
|
||||
func (t *http2Server) Close() (err error) {
|
||||
t.mu.Lock()
|
||||
if t.state == closing {
|
||||
t.mu.Unlock()
|
||||
return errors.New("transport: Close() was already called")
|
||||
}
|
||||
t.state = closing
|
||||
streams := t.activeStreams
|
||||
t.activeStreams = nil
|
||||
t.mu.Unlock()
|
||||
close(t.shutdownChan)
|
||||
err = t.conn.Close()
|
||||
// Cancel all active streams.
|
||||
for _, s := range streams {
|
||||
s.cancel()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// closeStream clears the footprint of a stream when the stream is not needed
|
||||
// any more.
|
||||
func (t *http2Server) closeStream(s *Stream) {
|
||||
t.mu.Lock()
|
||||
delete(t.activeStreams, s.id)
|
||||
if t.state == draining && len(t.activeStreams) == 0 {
|
||||
defer t.Close()
|
||||
}
|
||||
t.mu.Unlock()
|
||||
// In case stream sending and receiving are invoked in separate
|
||||
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||
// called to interrupt the potential blocking on other goroutines.
|
||||
s.cancel()
|
||||
s.mu.Lock()
|
||||
if q := s.fc.resetPendingData(); q > 0 {
|
||||
if w := t.fc.onRead(q); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, w})
|
||||
}
|
||||
}
|
||||
if s.state == streamDone {
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
s.state = streamDone
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (t *http2Server) RemoteAddr() net.Addr {
|
||||
return t.conn.RemoteAddr()
|
||||
}
|
||||
|
||||
func (t *http2Server) Drain() {
|
||||
t.controlBuf.put(&goAway{})
|
||||
}
|
|
@ -0,0 +1,510 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
const (
|
||||
// The primary user agent
|
||||
primaryUA = "grpc-go/0.11"
|
||||
// http2MaxFrameLen specifies the max length of a HTTP2 frame.
|
||||
http2MaxFrameLen = 16384 // 16KB frame
|
||||
// http://http2.github.io/http2-spec/#SettingValues
|
||||
http2InitHeaderTableSize = 4096
|
||||
// http2IOBufSize specifies the buffer size for sending frames.
|
||||
http2IOBufSize = 32 * 1024
|
||||
)
|
||||
|
||||
var (
|
||||
clientPreface = []byte(http2.ClientPreface)
|
||||
http2ErrConvTab = map[http2.ErrCode]codes.Code{
|
||||
http2.ErrCodeNo: codes.Internal,
|
||||
http2.ErrCodeProtocol: codes.Internal,
|
||||
http2.ErrCodeInternal: codes.Internal,
|
||||
http2.ErrCodeFlowControl: codes.ResourceExhausted,
|
||||
http2.ErrCodeSettingsTimeout: codes.Internal,
|
||||
http2.ErrCodeStreamClosed: codes.Internal,
|
||||
http2.ErrCodeFrameSize: codes.Internal,
|
||||
http2.ErrCodeRefusedStream: codes.Unavailable,
|
||||
http2.ErrCodeCancel: codes.Canceled,
|
||||
http2.ErrCodeCompression: codes.Internal,
|
||||
http2.ErrCodeConnect: codes.Internal,
|
||||
http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
|
||||
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
|
||||
http2.ErrCodeHTTP11Required: codes.FailedPrecondition,
|
||||
}
|
||||
statusCodeConvTab = map[codes.Code]http2.ErrCode{
|
||||
codes.Internal: http2.ErrCodeInternal,
|
||||
codes.Canceled: http2.ErrCodeCancel,
|
||||
codes.Unavailable: http2.ErrCodeRefusedStream,
|
||||
codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
|
||||
codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
|
||||
}
|
||||
)
|
||||
|
||||
// Records the states during HPACK decoding. Must be reset once the
|
||||
// decoding of the entire headers are finished.
|
||||
type decodeState struct {
|
||||
err error // first error encountered decoding
|
||||
|
||||
encoding string
|
||||
// statusCode caches the stream status received from the trailer
|
||||
// the server sent. Client side only.
|
||||
statusCode codes.Code
|
||||
statusDesc string
|
||||
// Server side only fields.
|
||||
timeoutSet bool
|
||||
timeout time.Duration
|
||||
method string
|
||||
// key-value metadata map from the peer.
|
||||
mdata map[string][]string
|
||||
}
|
||||
|
||||
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
||||
// reserved by gRPC protocol. Any other headers are classified as the
|
||||
// user-specified metadata.
|
||||
func isReservedHeader(hdr string) bool {
|
||||
if hdr != "" && hdr[0] == ':' {
|
||||
return true
|
||||
}
|
||||
switch hdr {
|
||||
case "content-type",
|
||||
"grpc-message-type",
|
||||
"grpc-encoding",
|
||||
"grpc-message",
|
||||
"grpc-status",
|
||||
"grpc-timeout",
|
||||
"te":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders
|
||||
// that should be propagated into metadata visible to users.
|
||||
func isWhitelistedPseudoHeader(hdr string) bool {
|
||||
switch hdr {
|
||||
case ":authority":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decodeState) setErr(err error) {
|
||||
if d.err == nil {
|
||||
d.err = err
|
||||
}
|
||||
}
|
||||
|
||||
func validContentType(t string) bool {
|
||||
e := "application/grpc"
|
||||
if !strings.HasPrefix(t, e) {
|
||||
return false
|
||||
}
|
||||
// Support variations on the content-type
|
||||
// (e.g. "application/grpc+blah", "application/grpc;blah").
|
||||
if len(t) > len(e) && t[len(e)] != '+' && t[len(e)] != ';' {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decodeState) processHeaderField(f hpack.HeaderField) {
|
||||
switch f.Name {
|
||||
case "content-type":
|
||||
if !validContentType(f.Value) {
|
||||
d.setErr(StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value))
|
||||
return
|
||||
}
|
||||
case "grpc-encoding":
|
||||
d.encoding = f.Value
|
||||
case "grpc-status":
|
||||
code, err := strconv.Atoi(f.Value)
|
||||
if err != nil {
|
||||
d.setErr(StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err))
|
||||
return
|
||||
}
|
||||
d.statusCode = codes.Code(code)
|
||||
case "grpc-message":
|
||||
d.statusDesc = decodeGrpcMessage(f.Value)
|
||||
case "grpc-timeout":
|
||||
d.timeoutSet = true
|
||||
var err error
|
||||
d.timeout, err = decodeTimeout(f.Value)
|
||||
if err != nil {
|
||||
d.setErr(StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err))
|
||||
return
|
||||
}
|
||||
case ":path":
|
||||
d.method = f.Value
|
||||
default:
|
||||
if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) {
|
||||
if f.Name == "user-agent" {
|
||||
i := strings.LastIndex(f.Value, " ")
|
||||
if i == -1 {
|
||||
// There is no application user agent string being set.
|
||||
return
|
||||
}
|
||||
// Extract the application user agent string.
|
||||
f.Value = f.Value[:i]
|
||||
}
|
||||
if d.mdata == nil {
|
||||
d.mdata = make(map[string][]string)
|
||||
}
|
||||
k, v, err := metadata.DecodeKeyValue(f.Name, f.Value)
|
||||
if err != nil {
|
||||
grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err)
|
||||
return
|
||||
}
|
||||
d.mdata[k] = append(d.mdata[k], v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type timeoutUnit uint8
|
||||
|
||||
const (
|
||||
hour timeoutUnit = 'H'
|
||||
minute timeoutUnit = 'M'
|
||||
second timeoutUnit = 'S'
|
||||
millisecond timeoutUnit = 'm'
|
||||
microsecond timeoutUnit = 'u'
|
||||
nanosecond timeoutUnit = 'n'
|
||||
)
|
||||
|
||||
func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) {
|
||||
switch u {
|
||||
case hour:
|
||||
return time.Hour, true
|
||||
case minute:
|
||||
return time.Minute, true
|
||||
case second:
|
||||
return time.Second, true
|
||||
case millisecond:
|
||||
return time.Millisecond, true
|
||||
case microsecond:
|
||||
return time.Microsecond, true
|
||||
case nanosecond:
|
||||
return time.Nanosecond, true
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const maxTimeoutValue int64 = 100000000 - 1
|
||||
|
||||
// div does integer division and round-up the result. Note that this is
|
||||
// equivalent to (d+r-1)/r but has less chance to overflow.
|
||||
func div(d, r time.Duration) int64 {
|
||||
if m := d % r; m > 0 {
|
||||
return int64(d/r + 1)
|
||||
}
|
||||
return int64(d / r)
|
||||
}
|
||||
|
||||
// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.
|
||||
func encodeTimeout(t time.Duration) string {
|
||||
if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "n"
|
||||
}
|
||||
if d := div(t, time.Microsecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "u"
|
||||
}
|
||||
if d := div(t, time.Millisecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "m"
|
||||
}
|
||||
if d := div(t, time.Second); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "S"
|
||||
}
|
||||
if d := div(t, time.Minute); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "M"
|
||||
}
|
||||
// Note that maxTimeoutValue * time.Hour > MaxInt64.
|
||||
return strconv.FormatInt(div(t, time.Hour), 10) + "H"
|
||||
}
|
||||
|
||||
func decodeTimeout(s string) (time.Duration, error) {
|
||||
size := len(s)
|
||||
if size < 2 {
|
||||
return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
|
||||
}
|
||||
unit := timeoutUnit(s[size-1])
|
||||
d, ok := timeoutUnitToDuration(unit)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s)
|
||||
}
|
||||
t, err := strconv.ParseInt(s[:size-1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return d * time.Duration(t), nil
|
||||
}
|
||||
|
||||
const (
|
||||
spaceByte = ' '
|
||||
tildaByte = '~'
|
||||
percentByte = '%'
|
||||
)
|
||||
|
||||
// encodeGrpcMessage is used to encode status code in header field
|
||||
// "grpc-message".
|
||||
// It checks to see if each individual byte in msg is an
|
||||
// allowable byte, and then either percent encoding or passing it through.
|
||||
// When percent encoding, the byte is converted into hexadecimal notation
|
||||
// with a '%' prepended.
|
||||
func encodeGrpcMessage(msg string) string {
|
||||
if msg == "" {
|
||||
return ""
|
||||
}
|
||||
lenMsg := len(msg)
|
||||
for i := 0; i < lenMsg; i++ {
|
||||
c := msg[i]
|
||||
if !(c >= spaceByte && c < tildaByte && c != percentByte) {
|
||||
return encodeGrpcMessageUnchecked(msg)
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func encodeGrpcMessageUnchecked(msg string) string {
|
||||
var buf bytes.Buffer
|
||||
lenMsg := len(msg)
|
||||
for i := 0; i < lenMsg; i++ {
|
||||
c := msg[i]
|
||||
if c >= spaceByte && c < tildaByte && c != percentByte {
|
||||
buf.WriteByte(c)
|
||||
} else {
|
||||
buf.WriteString(fmt.Sprintf("%%%02X", c))
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage.
|
||||
func decodeGrpcMessage(msg string) string {
|
||||
if msg == "" {
|
||||
return ""
|
||||
}
|
||||
lenMsg := len(msg)
|
||||
for i := 0; i < lenMsg; i++ {
|
||||
if msg[i] == percentByte && i+2 < lenMsg {
|
||||
return decodeGrpcMessageUnchecked(msg)
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func decodeGrpcMessageUnchecked(msg string) string {
|
||||
var buf bytes.Buffer
|
||||
lenMsg := len(msg)
|
||||
for i := 0; i < lenMsg; i++ {
|
||||
c := msg[i]
|
||||
if c == percentByte && i+2 < lenMsg {
|
||||
parsed, err := strconv.ParseInt(msg[i+1:i+3], 16, 8)
|
||||
if err != nil {
|
||||
buf.WriteByte(c)
|
||||
} else {
|
||||
buf.WriteByte(byte(parsed))
|
||||
i += 2
|
||||
}
|
||||
} else {
|
||||
buf.WriteByte(c)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type framer struct {
|
||||
numWriters int32
|
||||
reader io.Reader
|
||||
writer *bufio.Writer
|
||||
fr *http2.Framer
|
||||
}
|
||||
|
||||
func newFramer(conn net.Conn) *framer {
|
||||
f := &framer{
|
||||
reader: bufio.NewReaderSize(conn, http2IOBufSize),
|
||||
writer: bufio.NewWriterSize(conn, http2IOBufSize),
|
||||
}
|
||||
f.fr = http2.NewFramer(f.writer, f.reader)
|
||||
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *framer) adjustNumWriters(i int32) int32 {
|
||||
return atomic.AddInt32(&f.numWriters, i)
|
||||
}
|
||||
|
||||
// The following writeXXX functions can only be called when the caller gets
|
||||
// unblocked from writableChan channel (i.e., owns the privilege to write).
|
||||
|
||||
func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
|
||||
if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error {
|
||||
if err := f.fr.WriteData(streamID, endStream, data); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error {
|
||||
if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error {
|
||||
if err := f.fr.WriteHeaders(p); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error {
|
||||
if err := f.fr.WritePing(ack, data); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error {
|
||||
if err := f.fr.WritePriority(streamID, p); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error {
|
||||
if err := f.fr.WritePushPromise(p); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error {
|
||||
if err := f.fr.WriteRSTStream(streamID, code); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error {
|
||||
if err := f.fr.WriteSettings(settings...); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeSettingsAck(forceFlush bool) error {
|
||||
if err := f.fr.WriteSettingsAck(); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error {
|
||||
if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) flushWrite() error {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
|
||||
func (f *framer) readFrame() (http2.Frame, error) {
|
||||
return f.fr.ReadFrame()
|
||||
}
|
||||
|
||||
func (f *framer) errorDetail() error {
|
||||
return f.fr.ErrorDetail()
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
// +build !go1.6
|
||||
|
||||
/*
|
||||
* Copyright 2016, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// dialContext connects to the address on the named network.
|
||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
var dialer net.Dialer
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
dialer.Timeout = deadline.Sub(time.Now())
|
||||
}
|
||||
return dialer.Dial(network, address)
|
||||
}
|
|
@ -0,0 +1,578 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
Package transport defines and implements message oriented communication channel
|
||||
to complete various transactions (e.g., an RPC).
|
||||
*/
|
||||
package transport // import "google.golang.org/grpc/transport"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// recvMsg represents the received msg from the transport. All transport
|
||||
// protocol specific info has been removed.
|
||||
type recvMsg struct {
|
||||
data []byte
|
||||
// nil: received some data
|
||||
// io.EOF: stream is completed. data is nil.
|
||||
// other non-nil error: transport failure. data is nil.
|
||||
err error
|
||||
}
|
||||
|
||||
func (*recvMsg) item() {}
|
||||
|
||||
// All items in an out of a recvBuffer should be the same type.
|
||||
type item interface {
|
||||
item()
|
||||
}
|
||||
|
||||
// recvBuffer is an unbounded channel of item.
|
||||
type recvBuffer struct {
|
||||
c chan item
|
||||
mu sync.Mutex
|
||||
backlog []item
|
||||
}
|
||||
|
||||
func newRecvBuffer() *recvBuffer {
|
||||
b := &recvBuffer{
|
||||
c: make(chan item, 1),
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *recvBuffer) put(r item) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- r:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, r)
|
||||
}
|
||||
|
||||
func (b *recvBuffer) load() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
b.backlog = b.backlog[1:]
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get returns the channel that receives an item in the buffer.
|
||||
//
|
||||
// Upon receipt of an item, the caller should call load to send another
|
||||
// item onto the channel if there is any.
|
||||
func (b *recvBuffer) get() <-chan item {
|
||||
return b.c
|
||||
}
|
||||
|
||||
// recvBufferReader implements io.Reader interface to read the data from
|
||||
// recvBuffer.
|
||||
type recvBufferReader struct {
|
||||
ctx context.Context
|
||||
goAway chan struct{}
|
||||
recv *recvBuffer
|
||||
last *bytes.Reader // Stores the remaining data in the previous calls.
|
||||
err error
|
||||
}
|
||||
|
||||
// Read reads the next len(p) bytes from last. If last is drained, it tries to
|
||||
// read additional data from recv. It blocks if there no additional data available
|
||||
// in recv. If Read returns any non-nil error, it will continue to return that error.
|
||||
func (r *recvBufferReader) Read(p []byte) (n int, err error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
defer func() { r.err = err }()
|
||||
if r.last != nil && r.last.Len() > 0 {
|
||||
// Read remaining data left in last call.
|
||||
return r.last.Read(p)
|
||||
}
|
||||
select {
|
||||
case <-r.ctx.Done():
|
||||
return 0, ContextErr(r.ctx.Err())
|
||||
case <-r.goAway:
|
||||
return 0, ErrStreamDrain
|
||||
case i := <-r.recv.get():
|
||||
r.recv.load()
|
||||
m := i.(*recvMsg)
|
||||
if m.err != nil {
|
||||
return 0, m.err
|
||||
}
|
||||
r.last = bytes.NewReader(m.data)
|
||||
return r.last.Read(p)
|
||||
}
|
||||
}
|
||||
|
||||
type streamState uint8
|
||||
|
||||
const (
|
||||
streamActive streamState = iota
|
||||
streamWriteDone // EndStream sent
|
||||
streamReadDone // EndStream received
|
||||
streamDone // the entire stream is finished.
|
||||
)
|
||||
|
||||
// Stream represents an RPC in the transport layer.
|
||||
type Stream struct {
|
||||
id uint32
|
||||
// nil for client side Stream.
|
||||
st ServerTransport
|
||||
// ctx is the associated context of the stream.
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
// done is closed when the final status arrives.
|
||||
done chan struct{}
|
||||
// goAway is closed when the server sent GoAways signal before this stream was initiated.
|
||||
goAway chan struct{}
|
||||
// method records the associated RPC method of the stream.
|
||||
method string
|
||||
recvCompress string
|
||||
sendCompress string
|
||||
buf *recvBuffer
|
||||
dec io.Reader
|
||||
fc *inFlow
|
||||
recvQuota uint32
|
||||
// The accumulated inbound quota pending for window update.
|
||||
updateQuota uint32
|
||||
// The handler to control the window update procedure for both this
|
||||
// particular stream and the associated transport.
|
||||
windowHandler func(int)
|
||||
|
||||
sendQuotaPool *quotaPool
|
||||
// Close headerChan to indicate the end of reception of header metadata.
|
||||
headerChan chan struct{}
|
||||
// header caches the received header metadata.
|
||||
header metadata.MD
|
||||
// The key-value map of trailer metadata.
|
||||
trailer metadata.MD
|
||||
|
||||
mu sync.RWMutex // guard the following
|
||||
// headerOK becomes true from the first header is about to send.
|
||||
headerOk bool
|
||||
state streamState
|
||||
// true iff headerChan is closed. Used to avoid closing headerChan
|
||||
// multiple times.
|
||||
headerDone bool
|
||||
// the status received from the server.
|
||||
statusCode codes.Code
|
||||
statusDesc string
|
||||
}
|
||||
|
||||
// RecvCompress returns the compression algorithm applied to the inbound
|
||||
// message. It is empty string if there is no compression applied.
|
||||
func (s *Stream) RecvCompress() string {
|
||||
return s.recvCompress
|
||||
}
|
||||
|
||||
// SetSendCompress sets the compression algorithm to the stream.
|
||||
func (s *Stream) SetSendCompress(str string) {
|
||||
s.sendCompress = str
|
||||
}
|
||||
|
||||
// Done returns a chanel which is closed when it receives the final status
|
||||
// from the server.
|
||||
func (s *Stream) Done() <-chan struct{} {
|
||||
return s.done
|
||||
}
|
||||
|
||||
// GoAway returns a channel which is closed when the server sent GoAways signal
|
||||
// before this stream was initiated.
|
||||
func (s *Stream) GoAway() <-chan struct{} {
|
||||
return s.goAway
|
||||
}
|
||||
|
||||
// Header acquires the key-value pairs of header metadata once it
|
||||
// is available. It blocks until i) the metadata is ready or ii) there is no
|
||||
// header metadata or iii) the stream is cancelled/expired.
|
||||
func (s *Stream) Header() (metadata.MD, error) {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return nil, ContextErr(s.ctx.Err())
|
||||
case <-s.goAway:
|
||||
return nil, ErrStreamDrain
|
||||
case <-s.headerChan:
|
||||
return s.header.Copy(), nil
|
||||
}
|
||||
}
|
||||
|
||||
// Trailer returns the cached trailer metedata. Note that if it is not called
|
||||
// after the entire stream is done, it could return an empty MD. Client
|
||||
// side only.
|
||||
func (s *Stream) Trailer() metadata.MD {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.trailer.Copy()
|
||||
}
|
||||
|
||||
// ServerTransport returns the underlying ServerTransport for the stream.
|
||||
// The client side stream always returns nil.
|
||||
func (s *Stream) ServerTransport() ServerTransport {
|
||||
return s.st
|
||||
}
|
||||
|
||||
// Context returns the context of the stream.
|
||||
func (s *Stream) Context() context.Context {
|
||||
return s.ctx
|
||||
}
|
||||
|
||||
// TraceContext recreates the context of s with a trace.Trace.
|
||||
func (s *Stream) TraceContext(tr trace.Trace) {
|
||||
s.ctx = trace.NewContext(s.ctx, tr)
|
||||
}
|
||||
|
||||
// Method returns the method for the stream.
|
||||
func (s *Stream) Method() string {
|
||||
return s.method
|
||||
}
|
||||
|
||||
// StatusCode returns statusCode received from the server.
|
||||
func (s *Stream) StatusCode() codes.Code {
|
||||
return s.statusCode
|
||||
}
|
||||
|
||||
// StatusDesc returns statusDesc received from the server.
|
||||
func (s *Stream) StatusDesc() string {
|
||||
return s.statusDesc
|
||||
}
|
||||
|
||||
// ErrIllegalTrailerSet indicates that the trailer has already been set or it
|
||||
// is too late to do so.
|
||||
var ErrIllegalTrailerSet = errors.New("transport: trailer has been set")
|
||||
|
||||
// SetTrailer sets the trailer metadata which will be sent with the RPC status
|
||||
// by the server. This can only be called at most once. Server side only.
|
||||
func (s *Stream) SetTrailer(md metadata.MD) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.trailer != nil {
|
||||
return ErrIllegalTrailerSet
|
||||
}
|
||||
s.trailer = md.Copy()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Stream) write(m recvMsg) {
|
||||
s.buf.put(&m)
|
||||
}
|
||||
|
||||
// Read reads all the data available for this Stream from the transport and
|
||||
// passes them into the decoder, which converts them into a gRPC message stream.
|
||||
// The error is io.EOF when the stream is done or another non-nil error if
|
||||
// the stream broke.
|
||||
func (s *Stream) Read(p []byte) (n int, err error) {
|
||||
n, err = s.dec.Read(p)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
s.windowHandler(n)
|
||||
return
|
||||
}
|
||||
|
||||
// The key to save transport.Stream in the context.
|
||||
type streamKey struct{}
|
||||
|
||||
// newContextWithStream creates a new context from ctx and attaches stream
|
||||
// to it.
|
||||
func newContextWithStream(ctx context.Context, stream *Stream) context.Context {
|
||||
return context.WithValue(ctx, streamKey{}, stream)
|
||||
}
|
||||
|
||||
// StreamFromContext returns the stream saved in ctx.
|
||||
func StreamFromContext(ctx context.Context) (s *Stream, ok bool) {
|
||||
s, ok = ctx.Value(streamKey{}).(*Stream)
|
||||
return
|
||||
}
|
||||
|
||||
// state of transport
|
||||
type transportState int
|
||||
|
||||
const (
|
||||
reachable transportState = iota
|
||||
unreachable
|
||||
closing
|
||||
draining
|
||||
)
|
||||
|
||||
// NewServerTransport creates a ServerTransport with conn or non-nil error
|
||||
// if it fails.
|
||||
func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (ServerTransport, error) {
|
||||
return newHTTP2Server(conn, maxStreams, authInfo)
|
||||
}
|
||||
|
||||
// ConnectOptions covers all relevant options for dialing a server.
|
||||
type ConnectOptions struct {
|
||||
// UserAgent is the application user agent.
|
||||
UserAgent string
|
||||
// Dialer specifies how to dial a network address.
|
||||
Dialer func(context.Context, string) (net.Conn, error)
|
||||
// PerRPCCredentials stores the PerRPCCredentials required to issue RPCs.
|
||||
PerRPCCredentials []credentials.PerRPCCredentials
|
||||
// TransportCredentials stores the Authenticator required to setup a client connection.
|
||||
TransportCredentials credentials.TransportCredentials
|
||||
}
|
||||
|
||||
// NewClientTransport establishes the transport with the required ConnectOptions
|
||||
// and returns it to the caller.
|
||||
func NewClientTransport(ctx context.Context, target string, opts ConnectOptions) (ClientTransport, error) {
|
||||
return newHTTP2Client(ctx, target, opts)
|
||||
}
|
||||
|
||||
// Options provides additional hints and information for message
|
||||
// transmission.
|
||||
type Options struct {
|
||||
// Last indicates whether this write is the last piece for
|
||||
// this stream.
|
||||
Last bool
|
||||
|
||||
// Delay is a hint to the transport implementation for whether
|
||||
// the data could be buffered for a batching write. The
|
||||
// Transport implementation may ignore the hint.
|
||||
Delay bool
|
||||
}
|
||||
|
||||
// CallHdr carries the information of a particular RPC.
|
||||
type CallHdr struct {
|
||||
// Host specifies the peer's host.
|
||||
Host string
|
||||
|
||||
// Method specifies the operation to perform.
|
||||
Method string
|
||||
|
||||
// RecvCompress specifies the compression algorithm applied on
|
||||
// inbound messages.
|
||||
RecvCompress string
|
||||
|
||||
// SendCompress specifies the compression algorithm applied on
|
||||
// outbound message.
|
||||
SendCompress string
|
||||
|
||||
// Flush indicates whether a new stream command should be sent
|
||||
// to the peer without waiting for the first data. This is
|
||||
// only a hint. The transport may modify the flush decision
|
||||
// for performance purposes.
|
||||
Flush bool
|
||||
}
|
||||
|
||||
// ClientTransport is the common interface for all gRPC client-side transport
|
||||
// implementations.
|
||||
type ClientTransport interface {
|
||||
// Close tears down this transport. Once it returns, the transport
|
||||
// should not be accessed any more. The caller must make sure this
|
||||
// is called only once.
|
||||
Close() error
|
||||
|
||||
// GracefulClose starts to tear down the transport. It stops accepting
|
||||
// new RPCs and wait the completion of the pending RPCs.
|
||||
GracefulClose() error
|
||||
|
||||
// Write sends the data for the given stream. A nil stream indicates
|
||||
// the write is to be performed on the transport as a whole.
|
||||
Write(s *Stream, data []byte, opts *Options) error
|
||||
|
||||
// NewStream creates a Stream for an RPC.
|
||||
NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
|
||||
|
||||
// CloseStream clears the footprint of a stream when the stream is
|
||||
// not needed any more. The err indicates the error incurred when
|
||||
// CloseStream is called. Must be called when a stream is finished
|
||||
// unless the associated transport is closing.
|
||||
CloseStream(stream *Stream, err error)
|
||||
|
||||
// Error returns a channel that is closed when some I/O error
|
||||
// happens. Typically the caller should have a goroutine to monitor
|
||||
// this in order to take action (e.g., close the current transport
|
||||
// and create a new one) in error case. It should not return nil
|
||||
// once the transport is initiated.
|
||||
Error() <-chan struct{}
|
||||
|
||||
// GoAway returns a channel that is closed when ClientTranspor
|
||||
// receives the draining signal from the server (e.g., GOAWAY frame in
|
||||
// HTTP/2).
|
||||
GoAway() <-chan struct{}
|
||||
}
|
||||
|
||||
// ServerTransport is the common interface for all gRPC server-side transport
|
||||
// implementations.
|
||||
//
|
||||
// Methods may be called concurrently from multiple goroutines, but
|
||||
// Write methods for a given Stream will be called serially.
|
||||
type ServerTransport interface {
|
||||
// HandleStreams receives incoming streams using the given handler.
|
||||
HandleStreams(func(*Stream))
|
||||
|
||||
// WriteHeader sends the header metadata for the given stream.
|
||||
// WriteHeader may not be called on all streams.
|
||||
WriteHeader(s *Stream, md metadata.MD) error
|
||||
|
||||
// Write sends the data for the given stream.
|
||||
// Write may not be called on all streams.
|
||||
Write(s *Stream, data []byte, opts *Options) error
|
||||
|
||||
// WriteStatus sends the status of a stream to the client.
|
||||
// WriteStatus is the final call made on a stream and always
|
||||
// occurs.
|
||||
WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error
|
||||
|
||||
// Close tears down the transport. Once it is called, the transport
|
||||
// should not be accessed any more. All the pending streams and their
|
||||
// handlers will be terminated asynchronously.
|
||||
Close() error
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
RemoteAddr() net.Addr
|
||||
|
||||
// Drain notifies the client this ServerTransport stops accepting new RPCs.
|
||||
Drain()
|
||||
}
|
||||
|
||||
// StreamErrorf creates an StreamError with the specified error code and description.
|
||||
func StreamErrorf(c codes.Code, format string, a ...interface{}) StreamError {
|
||||
return StreamError{
|
||||
Code: c,
|
||||
Desc: fmt.Sprintf(format, a...),
|
||||
}
|
||||
}
|
||||
|
||||
// ConnectionErrorf creates an ConnectionError with the specified error description.
|
||||
func ConnectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError {
|
||||
return ConnectionError{
|
||||
Desc: fmt.Sprintf(format, a...),
|
||||
temp: temp,
|
||||
err: e,
|
||||
}
|
||||
}
|
||||
|
||||
// ConnectionError is an error that results in the termination of the
|
||||
// entire connection and the retry of all the active streams.
|
||||
type ConnectionError struct {
|
||||
Desc string
|
||||
temp bool
|
||||
err error
|
||||
}
|
||||
|
||||
func (e ConnectionError) Error() string {
|
||||
return fmt.Sprintf("connection error: desc = %q", e.Desc)
|
||||
}
|
||||
|
||||
// Temporary indicates if this connection error is temporary or fatal.
|
||||
func (e ConnectionError) Temporary() bool {
|
||||
return e.temp
|
||||
}
|
||||
|
||||
// Origin returns the original error of this connection error.
|
||||
func (e ConnectionError) Origin() error {
|
||||
// Never return nil error here.
|
||||
// If the original error is nil, return itself.
|
||||
if e.err == nil {
|
||||
return e
|
||||
}
|
||||
return e.err
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrConnClosing indicates that the transport is closing.
|
||||
ErrConnClosing = ConnectionError{Desc: "transport is closing", temp: true}
|
||||
// ErrStreamDrain indicates that the stream is rejected by the server because
|
||||
// the server stops accepting new RPCs.
|
||||
ErrStreamDrain = StreamErrorf(codes.Unavailable, "the server stops accepting new RPCs")
|
||||
)
|
||||
|
||||
// StreamError is an error that only affects one stream within a connection.
|
||||
type StreamError struct {
|
||||
Code codes.Code
|
||||
Desc string
|
||||
}
|
||||
|
||||
func (e StreamError) Error() string {
|
||||
return fmt.Sprintf("stream error: code = %d desc = %q", e.Code, e.Desc)
|
||||
}
|
||||
|
||||
// ContextErr converts the error from context package into a StreamError.
|
||||
func ContextErr(err error) StreamError {
|
||||
switch err {
|
||||
case context.DeadlineExceeded:
|
||||
return StreamErrorf(codes.DeadlineExceeded, "%v", err)
|
||||
case context.Canceled:
|
||||
return StreamErrorf(codes.Canceled, "%v", err)
|
||||
}
|
||||
panic(fmt.Sprintf("Unexpected error from context packet: %v", err))
|
||||
}
|
||||
|
||||
// wait blocks until it can receive from ctx.Done, closing, or proceed.
|
||||
// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err.
|
||||
// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise
|
||||
// it return the StreamError for ctx.Err.
|
||||
// If it receives from goAway, it returns 0, ErrStreamDrain.
|
||||
// If it receives from closing, it returns 0, ErrConnClosing.
|
||||
// If it receives from proceed, it returns the received integer, nil.
|
||||
func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <-chan int) (int, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, ContextErr(ctx.Err())
|
||||
case <-done:
|
||||
// User cancellation has precedence.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, ContextErr(ctx.Err())
|
||||
default:
|
||||
}
|
||||
return 0, io.EOF
|
||||
case <-goAway:
|
||||
return 0, ErrStreamDrain
|
||||
case <-closing:
|
||||
return 0, ErrConnClosing
|
||||
case i := <-proceed:
|
||||
return i, nil
|
||||
}
|
||||
}
|
|
@ -831,8 +831,8 @@
|
|||
{
|
||||
"checksumSHA1": "9jjO5GjLa0XF/nfWihF02RoH4qc=",
|
||||
"path": "golang.org/x/net/context",
|
||||
"revision": "9f2c271364b418388d150f9c12ecbf12794095c1",
|
||||
"revisionTime": "2016-07-26T08:08:57Z"
|
||||
"revision": "07b51741c1d6423d4a6abab1c49940ec09cb1aaf",
|
||||
"revisionTime": "2016-08-11T10:50:59Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "g4lA8FNetHh8as26V8cLl5EGy78=",
|
||||
|
@ -846,12 +846,24 @@
|
|||
"revision": "07b51741c1d6423d4a6abab1c49940ec09cb1aaf",
|
||||
"revisionTime": "2016-08-11T10:50:59Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "/k7k6eJDkxXx6K9Zpo/OwNm58XM=",
|
||||
"path": "golang.org/x/net/internal/timeseries",
|
||||
"revision": "07b51741c1d6423d4a6abab1c49940ec09cb1aaf",
|
||||
"revisionTime": "2016-08-11T10:50:59Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "yhndhWXMs/VSEDLks4dNyFMQStA=",
|
||||
"path": "golang.org/x/net/lex/httplex",
|
||||
"revision": "07b51741c1d6423d4a6abab1c49940ec09cb1aaf",
|
||||
"revisionTime": "2016-08-11T10:50:59Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "WpST9lFOHvWrjDVy0GJNqHe+R3E=",
|
||||
"path": "golang.org/x/net/trace",
|
||||
"revision": "07b51741c1d6423d4a6abab1c49940ec09cb1aaf",
|
||||
"revisionTime": "2016-08-11T10:50:59Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "8GUYZXntrAzpG1cThk8/TtR3kj4=",
|
||||
"path": "golang.org/x/oauth2",
|
||||
|
@ -912,6 +924,60 @@
|
|||
"revision": "267c27e7492265b84fc6719503b14a1e17975d79",
|
||||
"revisionTime": "2016-06-21T05:59:22Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "hdOtcPZaCh15JIjxJi3aAD/aLDI=",
|
||||
"path": "google.golang.org/grpc",
|
||||
"revision": "8a81ddda27e5fdd3b56f3ce841b37dbf7387dd26",
|
||||
"revisionTime": "2016-08-17T18:18:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "08icuA15HRkdYCt6H+Cs90RPQsY=",
|
||||
"path": "google.golang.org/grpc/codes",
|
||||
"revision": "8a81ddda27e5fdd3b56f3ce841b37dbf7387dd26",
|
||||
"revisionTime": "2016-08-17T18:18:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "4Wx8hSn+xOJAk2iBVFquOdijGOU=",
|
||||
"path": "google.golang.org/grpc/credentials",
|
||||
"revision": "8a81ddda27e5fdd3b56f3ce841b37dbf7387dd26",
|
||||
"revisionTime": "2016-08-17T18:18:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "3Lt5hNAG8qJAYSsNghR5uA1zQns=",
|
||||
"path": "google.golang.org/grpc/grpclog",
|
||||
"revision": "8a81ddda27e5fdd3b56f3ce841b37dbf7387dd26",
|
||||
"revisionTime": "2016-08-17T18:18:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "T3Q0p8kzvXFnRkMaK/G8mCv6mc0=",
|
||||
"path": "google.golang.org/grpc/internal",
|
||||
"revision": "8a81ddda27e5fdd3b56f3ce841b37dbf7387dd26",
|
||||
"revisionTime": "2016-08-17T18:18:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "rxtneb9jDjKPHNZhC5obfG++6bI=",
|
||||
"path": "google.golang.org/grpc/metadata",
|
||||
"revision": "8a81ddda27e5fdd3b56f3ce841b37dbf7387dd26",
|
||||
"revisionTime": "2016-08-17T18:18:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "4GSUFhOQ0kdFlBH4D5OTeKy78z0=",
|
||||
"path": "google.golang.org/grpc/naming",
|
||||
"revision": "8a81ddda27e5fdd3b56f3ce841b37dbf7387dd26",
|
||||
"revisionTime": "2016-08-17T18:18:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "3RRoLeH6X2//7tVClOVzxW2bY+E=",
|
||||
"path": "google.golang.org/grpc/peer",
|
||||
"revision": "8a81ddda27e5fdd3b56f3ce841b37dbf7387dd26",
|
||||
"revisionTime": "2016-08-17T18:18:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "h/4MgY+Mz/K20Hce+QAD64y4Y58=",
|
||||
"path": "google.golang.org/grpc/transport",
|
||||
"revision": "8a81ddda27e5fdd3b56f3ce841b37dbf7387dd26",
|
||||
"revisionTime": "2016-08-17T18:18:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "wSu8owMAP7GixsYoSZ4CmKUVhnU=",
|
||||
"path": "gopkg.in/asn1-ber.v1",
|
||||
|
|
Loading…
Reference in New Issue