Update deps (#5520)

This commit is contained in:
Brian Kassouf 2018-10-15 14:36:55 -07:00 committed by GitHub
parent f52cd4950f
commit d987a3c230
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
262 changed files with 7323 additions and 10063 deletions

View File

@ -18,7 +18,6 @@ package spanner
import (
"fmt"
"log"
"regexp"
"sync/atomic"
"time"
@ -255,26 +254,12 @@ func (c *Client) BatchReadOnlyTransaction(ctx context.Context, tb TimestampBound
)
defer func() {
if err != nil && sh != nil {
e := runRetryable(ctx, func(ctx context.Context) error {
_, e := s.client.DeleteSession(ctx, &sppb.DeleteSessionRequest{Name: s.getID()})
return e
})
if e != nil {
log.Printf("Failed to delete session %v. Error: %v", s.getID(), e)
}
s.delete(ctx)
}
}()
// create session
sc := c.rrNext()
err = runRetryable(ctx, func(ctx context.Context) error {
sid, e := sc.CreateSession(ctx, &sppb.CreateSessionRequest{Database: c.database, Session: &sppb.Session{Labels: c.sessionLabels}})
if e != nil {
return e
}
// If no error, construct the new session.
s = &session{valid: true, client: sc, id: sid.Name, createTime: time.Now(), md: c.md}
return nil
})
s, err = createSession(ctx, sc, c.database, c.sessionLabels, c.md)
if err != nil {
return nil, err
}

View File

@ -306,6 +306,19 @@ mutations, which will all be executed at the end of the transaction:
return nil
})
DML and Partitioned DML
Spanner supports DML statements like INSERT, UPDATE and DELETE. Use
ReadWriteTransaction.Update to run DML statements. It returns the number of rows
affected. (You can call use ReadWriteTransaction.Query with a DML statement. The first
call to Next on the resulting RowIterator will return iterator.Done, and the RowCount
field of the iterator will hold the number of affected rows.)
For large databases, it may be more efficient to partition the DML statement. Use
client.PartitionedUpdate to run a DML statement in this way. Not all DML statements
can be partitioned.
Tracing
This client has been instrumented to use OpenCensus tracing (http://opencensus.io).

View File

@ -67,6 +67,10 @@ type RowIterator struct {
// if QueryWithStats was called.
QueryStats map[string]interface{}
// For a DML statement, the number of rows affected. For PDML, this is a lower bound.
// Available for DML statements after RowIterator.Next returns iterator.Done.
RowCount int64
streamd *resumableStreamDecoder
rowd *partialResultSetDecoder
setTimestamp func(time.Time)
@ -74,6 +78,7 @@ type RowIterator struct {
cancel func()
err error
rows []*Row
sawStats bool
}
// Next returns the next result. Its second return value is iterator.Done if
@ -86,8 +91,16 @@ func (r *RowIterator) Next() (*Row, error) {
for len(r.rows) == 0 && r.streamd.next() {
prs := r.streamd.get()
if prs.Stats != nil {
r.sawStats = true
r.QueryPlan = prs.Stats.QueryPlan
r.QueryStats = protostruct.DecodeToMap(prs.Stats.QueryStats)
if prs.Stats.RowCount != nil {
rc, err := extractRowCount(prs.Stats)
if err != nil {
return nil, err
}
r.RowCount = rc
}
}
r.rows, r.err = r.rowd.add(prs)
if r.err != nil {
@ -113,6 +126,20 @@ func (r *RowIterator) Next() (*Row, error) {
return nil, r.err
}
func extractRowCount(stats *sppb.ResultSetStats) (int64, error) {
if stats.RowCount == nil {
return 0, spannerErrorf(codes.Internal, "missing RowCount")
}
switch rc := stats.RowCount.(type) {
case *sppb.ResultSetStats_RowCountExact:
return rc.RowCountExact, nil
case *sppb.ResultSetStats_RowCountLowerBound:
return rc.RowCountLowerBound, nil
default:
return 0, spannerErrorf(codes.Internal, "unknown RowCount type %T", stats.RowCount)
}
}
// Do calls the provided function once in sequence for each row in the iteration. If the
// function returns a non-nil error, Do immediately returns that error.
//

View File

@ -260,6 +260,11 @@ func (s *session) destroy(isExpire bool) bool {
// Remove s from Cloud Spanner service.
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
s.delete(ctx)
return true
}
func (s *session) delete(ctx context.Context) {
// Ignore the error returned by runRetryable because even if we fail to explicitly destroy the session,
// it will be eventually garbage collected by Cloud Spanner.
err := runRetryable(ctx, func(ctx context.Context) error {
@ -269,7 +274,6 @@ func (s *session) destroy(isExpire bool) bool {
if err != nil {
log.Printf("Failed to delete session %v. Error: %v", s.getID(), err)
}
return true
}
// prepareForWrite prepares the session for write if it is not already in that state.
@ -464,29 +468,38 @@ func (p *sessionPool) createSession(ctx context.Context) (*session, error) {
doneCreate(false)
return nil, err
}
var s *session
err = runRetryable(ctx, func(ctx context.Context) error {
sid, e := sc.CreateSession(ctx, &sppb.CreateSessionRequest{
Database: p.db,
Session: &sppb.Session{Labels: p.sessionLabels},
})
if e != nil {
return e
}
// If no error, construct the new session.
s = &session{valid: true, client: sc, id: sid.Name, pool: p, createTime: time.Now(), md: p.md}
p.hc.register(s)
return nil
})
s, err := createSession(ctx, sc, p.db, p.sessionLabels, p.md)
if err != nil {
doneCreate(false)
// Should return error directly because of the previous retries on CreateSession RPC.
return nil, err
}
s.pool = p
p.hc.register(s)
doneCreate(true)
return s, nil
}
func createSession(ctx context.Context, sc sppb.SpannerClient, db string, labels map[string]string, md metadata.MD) (*session, error) {
var s *session
err := runRetryable(ctx, func(ctx context.Context) error {
sid, e := sc.CreateSession(ctx, &sppb.CreateSessionRequest{
Database: db,
Session: &sppb.Session{Labels: labels},
})
if e != nil {
return e
}
// If no error, construct the new session.
s = &session{valid: true, client: sc, id: sid.Name, createTime: time.Now(), md: md}
return nil
})
if err != nil {
return nil, err
}
return s, nil
}
func (p *sessionPool) isHealthy(s *session) bool {
if s.getNextCheck().Add(2 * p.hc.getInterval()).Before(time.Now()) {
// TODO: figure out if we need to schedule a new healthcheck worker here.

View File

@ -18,6 +18,7 @@ package spanner
import (
"sync"
"sync/atomic"
"time"
"golang.org/x/net/context"
@ -46,6 +47,8 @@ type txReadEnv interface {
type txReadOnly struct {
// read-transaction environment for performing transactional read operations.
txReadEnv
sequenceNumber int64 // Atomic. Only needed for DML statements, but used for all.
}
// errSessionClosed returns error for using a recycled/destroyed session
@ -159,7 +162,7 @@ func (t *txReadOnly) Query(ctx context.Context, statement Statement) *RowIterato
return t.query(ctx, statement, sppb.ExecuteSqlRequest_NORMAL)
}
// Query executes a query against the database. It returns a RowIterator
// Query executes a SQL statement against the database. It returns a RowIterator
// for retrieving the resulting rows. The RowIterator will also be populated
// with a query plan and execution statistics.
func (t *txReadOnly) QueryWithStats(ctx context.Context, statement Statement) *RowIterator {
@ -188,29 +191,11 @@ func (t *txReadOnly) AnalyzeQuery(ctx context.Context, statement Statement) (*sp
func (t *txReadOnly) query(ctx context.Context, statement Statement, mode sppb.ExecuteSqlRequest_QueryMode) (ri *RowIterator) {
ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.Query")
defer func() { traceEndSpan(ctx, ri.err) }()
var (
sh *sessionHandle
ts *sppb.TransactionSelector
err error
)
if sh, ts, err = t.acquire(ctx); err != nil {
return &RowIterator{err: err}
}
// Cloud Spanner will return "Session not found" on bad sessions.
sid, client := sh.getID(), sh.getClient()
if sid == "" || client == nil {
// Might happen if transaction is closed in the middle of a API call.
return &RowIterator{err: errSessionClosed(sh)}
}
req := &sppb.ExecuteSqlRequest{
Session: sid,
Transaction: ts,
Sql: statement.SQL,
QueryMode: mode,
}
if err := statement.bindParams(req); err != nil {
req, sh, err := t.prepareExecuteSql(ctx, statement, mode)
if err != nil {
return &RowIterator{err: err}
}
client := sh.getClient()
return stream(
contextWithOutgoingMetadata(ctx, sh.getMetadata()),
func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) {
@ -221,6 +206,31 @@ func (t *txReadOnly) query(ctx context.Context, statement Statement, mode sppb.E
t.release)
}
func (t *txReadOnly) prepareExecuteSql(ctx context.Context, stmt Statement, mode sppb.ExecuteSqlRequest_QueryMode) (
*sppb.ExecuteSqlRequest, *sessionHandle, error) {
sh, ts, err := t.acquire(ctx)
if err != nil {
return nil, nil, err
}
// Cloud Spanner will return "Session not found" on bad sessions.
sid := sh.getID()
if sid == "" {
// Might happen if transaction is closed in the middle of a API call.
return nil, nil, errSessionClosed(sh)
}
req := &sppb.ExecuteSqlRequest{
Session: sid,
Transaction: ts,
Sql: stmt.SQL,
QueryMode: mode,
Seqno: atomic.AddInt64(&t.sequenceNumber, 1),
}
if err := stmt.bindParams(req); err != nil {
return nil, nil, err
}
return req, sh, nil
}
// txState is the status of a transaction.
type txState int
@ -648,6 +658,27 @@ func (t *ReadWriteTransaction) BufferWrite(ms []*Mutation) error {
return nil
}
// Update executes a DML statement against the database. It returns the number of
// affected rows.
// Update returns an error if the statement is a query. However, the
// query is executed, and any data read will be validated upon commit.
func (t *ReadWriteTransaction) Update(ctx context.Context, stmt Statement) (rowCount int64, err error) {
ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.Update")
defer func() { traceEndSpan(ctx, err) }()
req, sh, err := t.prepareExecuteSql(ctx, stmt, sppb.ExecuteSqlRequest_NORMAL)
if err != nil {
return 0, err
}
resultSet, err := sh.getClient().ExecuteSql(ctx, req)
if err != nil {
return 0, err
}
if resultSet.Stats == nil {
return 0, spannerErrorf(codes.InvalidArgument, "query passed to Update: %q", stmt.SQL)
}
return extractRowCount(resultSet.Stats)
}
// acquire implements txReadEnv.acquire.
func (t *ReadWriteTransaction) acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) {
ts := &sppb.TransactionSelector{

View File

@ -169,7 +169,7 @@ func (q *Queue) GetMetadata(options *QueueServiceOptions) error {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}})
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
if err != nil {

View File

@ -18,4 +18,4 @@ package version
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// Number contains the semantic version of this SDK.
const Number = "v21.1.0"
const Number = "v21.2.0"

View File

@ -19,10 +19,6 @@ import (
"net/url"
)
const (
activeDirectoryAPIVersion = "1.0"
)
// OAuthConfig represents the endpoints needed
// in OAuth operations
type OAuthConfig struct {
@ -46,11 +42,25 @@ func validateStringParam(param, name string) error {
// NewOAuthConfig returns an OAuthConfig with tenant specific urls
func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
apiVer := "1.0"
return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer)
}
// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls.
// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value.
func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) {
if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil {
return nil, err
}
api := ""
// it's legal for tenantID to be empty so don't validate it
const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s"
if apiVersion != nil {
if err := validateStringParam(*apiVersion, "apiVersion"); err != nil {
return nil, err
}
api = fmt.Sprintf("?api-version=%s", *apiVersion)
}
const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
u, err := url.Parse(activeDirectoryEndpoint)
if err != nil {
return nil, err
@ -59,15 +69,15 @@ func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, err
if err != nil {
return nil, err
}
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion))
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api))
if err != nil {
return nil, err
}
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion))
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api))
if err != nil {
return nil, err
}
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion))
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api))
if err != nil {
return nil, err
}

View File

@ -226,6 +226,8 @@ func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalTo
token := jwt.New(jwt.SigningMethodRS256)
token.Header["x5t"] = thumbprint
x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)}
token.Header["x5c"] = x5c
token.Claims = jwt.MapClaims{
"aud": spt.inner.OauthConfig.TokenEndpoint.String(),
"iss": spt.inner.ClientID,

View File

@ -31,6 +31,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/azure/cli"
"github.com/dimchansky/utfbom"
"golang.org/x/crypto/pkcs12"
)
@ -167,6 +168,35 @@ func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, er
return autorest.NewBearerAuthorizer(spToken), nil
}
// NewAuthorizerFromCLI creates an Authorizer configured from Azure CLI 2.0 for local development scenarios.
func NewAuthorizerFromCLI() (autorest.Authorizer, error) {
settings, err := getAuthenticationSettings()
if err != nil {
return nil, err
}
if settings.resource == "" {
settings.resource = settings.environment.ResourceManagerEndpoint
}
return NewAuthorizerFromCLIWithResource(settings.resource)
}
// NewAuthorizerFromCLIWithResource creates an Authorizer configured from Azure CLI 2.0 for local development scenarios.
func NewAuthorizerFromCLIWithResource(resource string) (autorest.Authorizer, error) {
token, err := cli.GetTokenFromCLI(resource)
if err != nil {
return nil, err
}
adalToken, err := token.ToADALToken()
if err != nil {
return nil, err
}
return autorest.NewBearerAuthorizer(&adalToken), nil
}
func getAuthFile() (*file, error) {
fileLocation := os.Getenv("AZURE_AUTH_LOCATION")
if fileLocation == "" {

View File

@ -20,7 +20,7 @@ import (
)
// Number contains the semantic version of this SDK.
const Number = "v11.0.0"
const Number = "v11.1.0"
var (
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",

View File

@ -20,12 +20,11 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
"github.com/satori/go.uuid"
"net/url"
"reflect"
"strconv"
"time"
"github.com/satori/go.uuid"
)
// if you use go 1.10 or higher, you can hack this util by these to avoid "TimeZone.zip not found" on Windows

View File

@ -23,7 +23,7 @@
package fdb
/*
#define FDB_API_VERSION 600
#define FDB_API_VERSION 610
#include <foundationdb/fdb_c.h>
*/
import "C"

View File

@ -23,7 +23,7 @@
package fdb
/*
#define FDB_API_VERSION 600
#define FDB_API_VERSION 610
#include <foundationdb/fdb_c.h>
*/
import "C"

View File

@ -46,7 +46,7 @@ A basic interaction with the FoundationDB API is demonstrated below:
func main() {
// Different API versions may expose different runtime behaviors.
fdb.MustAPIVersion(600)
fdb.MustAPIVersion(610)
// Open the default database from the system cluster
db := fdb.MustOpenDefault()

View File

@ -23,7 +23,7 @@
package fdb
/*
#define FDB_API_VERSION 600
#define FDB_API_VERSION 610
#include <foundationdb/fdb_c.h>
*/
import "C"

View File

@ -23,7 +23,7 @@
package fdb
/*
#define FDB_API_VERSION 600
#define FDB_API_VERSION 610
#include <foundationdb/fdb_c.h>
#include <stdlib.h>
*/
@ -109,7 +109,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error {
// library, an error will be returned. APIVersion must be called prior to any
// other functions in the fdb package.
//
// Currently, this package supports API versions 200 through 600.
// Currently, this package supports API versions 200 through 610.
//
// Warning: When using the multi-version client API, setting an API version that
// is not supported by a particular client library will prevent that client from
@ -117,7 +117,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error {
// the API version of your application after upgrading your client until the
// cluster has also been upgraded.
func APIVersion(version int) error {
headerVersion := 600
headerVersion := 610
networkMutex.Lock()
defer networkMutex.Unlock()
@ -129,7 +129,7 @@ func APIVersion(version int) error {
return errAPIVersionAlreadySet
}
if version < 200 || version > 600 {
if version < 200 || version > 610 {
return errAPIVersionNotSupported
}

View File

@ -24,7 +24,7 @@ package fdb
/*
#cgo LDFLAGS: -lfdb_c -lm
#define FDB_API_VERSION 600
#define FDB_API_VERSION 610
#include <foundationdb/fdb_c.h>
#include <string.h>

View File

@ -23,7 +23,7 @@
package fdb
/*
#define FDB_API_VERSION 600
#define FDB_API_VERSION 610
#include <foundationdb/fdb_c.h>
*/
import "C"

View File

@ -23,7 +23,7 @@
package fdb
/*
#define FDB_API_VERSION 600
#define FDB_API_VERSION 610
#include <foundationdb/fdb_c.h>
*/
import "C"

View File

@ -48,4 +48,6 @@ type metric struct {
DNSLatency *int `json:"DnsLatency,omitempty"`
TCPLatency *int `json:"TcpLatency,omitempty"`
SSLLatency *int `json:"SslLatency,omitempty"`
MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"`
}

View File

@ -112,15 +112,16 @@ func (rep *Reporter) sendAPICallMetric(r *request.Request) {
now := time.Now()
m := metric{
ClientID: aws.String(rep.clientID),
API: aws.String(r.Operation.Name),
Service: aws.String(r.ClientInfo.ServiceID),
Timestamp: (*metricTime)(&now),
Type: aws.String("ApiCall"),
AttemptCount: aws.Int(r.RetryCount + 1),
Region: r.Config.Region,
Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
XAmzRequestID: aws.String(r.RequestID),
ClientID: aws.String(rep.clientID),
API: aws.String(r.Operation.Name),
Service: aws.String(r.ClientInfo.ServiceID),
Timestamp: (*metricTime)(&now),
Type: aws.String("ApiCall"),
AttemptCount: aws.Int(r.RetryCount + 1),
Region: r.Config.Region,
Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
XAmzRequestID: aws.String(r.RequestID),
MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())),
}
// TODO: Probably want to figure something out for logging dropped
@ -230,3 +231,12 @@ func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler)
}
// boolIntValue return 1 for true and 0 for false.
func boolIntValue(b bool) int {
if b {
return 1
}
return 0
}

View File

@ -24,6 +24,7 @@ import (
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
// A Defaults provides a collection of default values for SDK clients.
@ -114,7 +115,6 @@ func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Pro
const (
httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
)
// RemoteCredProvider returns a credentials provider for the default remote
@ -124,8 +124,8 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P
return localHTTPCredProvider(cfg, handlers, u)
}
if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 {
u := fmt.Sprintf("http://169.254.170.2%s", uri)
if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 {
u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri)
return httpCredProvider(cfg, handlers, u)
}

View File

@ -17,6 +17,10 @@ const (
ParamMinValueErrCode = "ParamMinValueError"
// ParamMinLenErrCode is the error code for fields without enough elements.
ParamMinLenErrCode = "ParamMinLenError"
// ParamFormatErrCode is the error code for a field with invalid
// format or characters.
ParamFormatErrCode = "ParamFormatInvalidError"
)
// Validator provides a way for types to perform validation logic on their
@ -232,3 +236,26 @@ func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
func (e *ErrParamMinLen) MinLen() int {
return e.min
}
// An ErrParamFormat represents a invalid format parameter error.
type ErrParamFormat struct {
errInvalidParam
format string
}
// NewErrParamFormat creates a new invalid format parameter error.
func NewErrParamFormat(field string, format, value string) *ErrParamFormat {
return &ErrParamFormat{
errInvalidParam: errInvalidParam{
code: ParamFormatErrCode,
field: field,
msg: fmt.Sprintf("format %v, %v", format, value),
},
format: format,
}
}
// Format returns the field's required format.
func (e *ErrParamFormat) Format() string {
return e.format
}

View File

@ -19,8 +19,26 @@ import (
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
const (
// ErrCodeSharedConfig represents an error that occurs in the shared
// configuration logic
ErrCodeSharedConfig = "SharedConfigErr"
)
// ErrSharedConfigSourceCollision will be returned if a section contains both
// source_profile and credential_source
var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil)
// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment
// variables are empty and Environment was set as the credential source
var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil)
// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided
var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil)
// A Session provides a central location to create service clients from and
// store configurations and request handlers for those services.
//
@ -436,6 +454,57 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
// Configure credentials if not already set
if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
// inspect the profile to see if a credential source has been specified.
if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 {
// if both credential_source and source_profile have been set, return an error
// as this is undefined behavior.
if len(sharedCfg.AssumeRole.SourceProfile) > 0 {
return ErrSharedConfigSourceCollision
}
// valid credential source values
const (
credSourceEc2Metadata = "Ec2InstanceMetadata"
credSourceEnvironment = "Environment"
credSourceECSContainer = "EcsContainer"
)
switch sharedCfg.AssumeRole.CredentialSource {
case credSourceEc2Metadata:
cfgCp := *cfg
p := defaults.RemoteCredProvider(cfgCp, handlers)
cfgCp.Credentials = credentials.NewCredentials(p)
if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
// AssumeRole Token provider is required if doing Assume Role
// with MFA.
return AssumeRoleTokenProviderNotSetError{}
}
cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
case credSourceEnvironment:
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
envCfg.Creds,
)
case credSourceECSContainer:
if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 {
return ErrSharedConfigECSContainerEnvVarEmpty
}
cfgCp := *cfg
p := defaults.RemoteCredProvider(cfgCp, handlers)
creds := credentials.NewCredentials(p)
cfg.Credentials = creds
default:
return ErrSharedConfigInvalidCredSource
}
return nil
}
if len(envCfg.Creds.AccessKeyID) > 0 {
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
envCfg.Creds,
@ -445,32 +514,14 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
sharedCfg.AssumeRoleSource.Creds,
)
if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
// AssumeRole Token provider is required if doing Assume Role
// with MFA.
return AssumeRoleTokenProviderNotSetError{}
}
cfg.Credentials = stscreds.NewCredentials(
&Session{
Config: &cfgCp,
Handlers: handlers.Copy(),
},
sharedCfg.AssumeRole.RoleARN,
func(opt *stscreds.AssumeRoleProvider) {
opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
// Assume role with external ID
if len(sharedCfg.AssumeRole.ExternalID) > 0 {
opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
}
// Assume role with MFA
if len(sharedCfg.AssumeRole.MFASerial) > 0 {
opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
}
},
)
cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
} else if len(sharedCfg.Creds.AccessKeyID) > 0 {
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
sharedCfg.Creds,
@ -493,6 +544,30 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
return nil
}
func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials {
return stscreds.NewCredentials(
&Session{
Config: &cfg,
Handlers: handlers.Copy(),
},
sharedCfg.AssumeRole.RoleARN,
func(opt *stscreds.AssumeRoleProvider) {
opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
// Assume role with external ID
if len(sharedCfg.AssumeRole.ExternalID) > 0 {
opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
}
// Assume role with MFA
if len(sharedCfg.AssumeRole.MFASerial) > 0 {
opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
}
},
)
}
// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the
// MFAToken option is not set when shared config is configured load assume a
// role with an MFA token.

View File

@ -16,11 +16,12 @@ const (
sessionTokenKey = `aws_session_token` // optional
// Assume Role Credentials group
roleArnKey = `role_arn` // group required
sourceProfileKey = `source_profile` // group required
externalIDKey = `external_id` // optional
mfaSerialKey = `mfa_serial` // optional
roleSessionNameKey = `role_session_name` // optional
roleArnKey = `role_arn` // group required
sourceProfileKey = `source_profile` // group required (or credential_source)
credentialSourceKey = `credential_source` // group required (or source_profile)
externalIDKey = `external_id` // optional
mfaSerialKey = `mfa_serial` // optional
roleSessionNameKey = `role_session_name` // optional
// Additional Config fields
regionKey = `region`
@ -32,11 +33,12 @@ const (
)
type assumeRoleConfig struct {
RoleARN string
SourceProfile string
ExternalID string
MFASerial string
RoleSessionName string
RoleARN string
SourceProfile string
CredentialSource string
ExternalID string
MFASerial string
RoleSessionName string
}
// sharedConfig represents the configuration fields of the SDK config files.
@ -127,6 +129,13 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error {
var assumeRoleSrc sharedConfig
if len(cfg.AssumeRole.CredentialSource) > 0 {
// setAssumeRoleSource is only called when source_profile is found.
// If both source_profile and credential_source are set, then
// ErrSharedConfigSourceCollision will be returned
return ErrSharedConfigSourceCollision
}
// Multiple level assume role chains are not support
if cfg.AssumeRole.SourceProfile == origProfile {
assumeRoleSrc = *cfg
@ -195,13 +204,16 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) e
// Assume Role
roleArn := section.Key(roleArnKey).String()
srcProfile := section.Key(sourceProfileKey).String()
if len(roleArn) > 0 && len(srcProfile) > 0 {
credentialSource := section.Key(credentialSourceKey).String()
hasSource := len(srcProfile) > 0 || len(credentialSource) > 0
if len(roleArn) > 0 && hasSource {
cfg.AssumeRole = assumeRoleConfig{
RoleARN: roleArn,
SourceProfile: srcProfile,
ExternalID: section.Key(externalIDKey).String(),
MFASerial: section.Key(mfaSerialKey).String(),
RoleSessionName: section.Key(roleSessionNameKey).String(),
RoleARN: roleArn,
SourceProfile: srcProfile,
CredentialSource: credentialSource,
ExternalID: section.Key(externalIDKey).String(),
MFASerial: section.Key(mfaSerialKey).String(),
RoleSessionName: section.Key(roleSessionNameKey).String(),
}
}

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.15.47"
const SDKVersion = "1.15.54"

View File

@ -18,7 +18,7 @@ const opBatchGetItem = "BatchGetItem"
// BatchGetItemRequest generates a "aws/request.Request" representing the
// client's request for the BatchGetItem operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -213,7 +213,7 @@ const opBatchWriteItem = "BatchWriteItem"
// BatchWriteItemRequest generates a "aws/request.Request" representing the
// client's request for the BatchWriteItem operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -379,7 +379,7 @@ const opCreateBackup = "CreateBackup"
// CreateBackupRequest generates a "aws/request.Request" representing the
// client's request for the CreateBackup operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -514,7 +514,7 @@ const opCreateGlobalTable = "CreateGlobalTable"
// CreateGlobalTableRequest generates a "aws/request.Request" representing the
// client's request for the CreateGlobalTable operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -645,7 +645,7 @@ const opCreateTable = "CreateTable"
// CreateTableRequest generates a "aws/request.Request" representing the
// client's request for the CreateTable operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -756,7 +756,7 @@ const opDeleteBackup = "DeleteBackup"
// DeleteBackupRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBackup operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -857,7 +857,7 @@ const opDeleteItem = "DeleteItem"
// DeleteItemRequest generates a "aws/request.Request" representing the
// client's request for the DeleteItem operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -968,7 +968,7 @@ const opDeleteTable = "DeleteTable"
// DeleteTableRequest generates a "aws/request.Request" representing the
// client's request for the DeleteTable operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1086,7 +1086,7 @@ const opDescribeBackup = "DescribeBackup"
// DescribeBackupRequest generates a "aws/request.Request" representing the
// client's request for the DescribeBackup operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1170,7 +1170,7 @@ const opDescribeContinuousBackups = "DescribeContinuousBackups"
// DescribeContinuousBackupsRequest generates a "aws/request.Request" representing the
// client's request for the DescribeContinuousBackups operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1265,7 +1265,7 @@ const opDescribeEndpoints = "DescribeEndpoints"
// DescribeEndpointsRequest generates a "aws/request.Request" representing the
// client's request for the DescribeEndpoints operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1337,7 +1337,7 @@ const opDescribeGlobalTable = "DescribeGlobalTable"
// DescribeGlobalTableRequest generates a "aws/request.Request" representing the
// client's request for the DescribeGlobalTable operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1419,7 +1419,7 @@ const opDescribeGlobalTableSettings = "DescribeGlobalTableSettings"
// DescribeGlobalTableSettingsRequest generates a "aws/request.Request" representing the
// client's request for the DescribeGlobalTableSettings operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1501,7 +1501,7 @@ const opDescribeLimits = "DescribeLimits"
// DescribeLimitsRequest generates a "aws/request.Request" representing the
// client's request for the DescribeLimits operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1636,7 +1636,7 @@ const opDescribeTable = "DescribeTable"
// DescribeTableRequest generates a "aws/request.Request" representing the
// client's request for the DescribeTable operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1727,7 +1727,7 @@ const opDescribeTimeToLive = "DescribeTimeToLive"
// DescribeTimeToLiveRequest generates a "aws/request.Request" representing the
// client's request for the DescribeTimeToLive operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1810,7 +1810,7 @@ const opGetItem = "GetItem"
// GetItemRequest generates a "aws/request.Request" representing the
// client's request for the GetItem operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1908,7 +1908,7 @@ const opListBackups = "ListBackups"
// ListBackupsRequest generates a "aws/request.Request" representing the
// client's request for the ListBackups operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1995,7 +1995,7 @@ const opListGlobalTables = "ListGlobalTables"
// ListGlobalTablesRequest generates a "aws/request.Request" representing the
// client's request for the ListGlobalTables operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2074,7 +2074,7 @@ const opListTables = "ListTables"
// ListTablesRequest generates a "aws/request.Request" representing the
// client's request for the ListTables operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2211,7 +2211,7 @@ const opListTagsOfResource = "ListTagsOfResource"
// ListTagsOfResourceRequest generates a "aws/request.Request" representing the
// client's request for the ListTagsOfResource operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2298,7 +2298,7 @@ const opPutItem = "PutItem"
// PutItemRequest generates a "aws/request.Request" representing the
// client's request for the PutItem operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2439,7 +2439,7 @@ const opQuery = "Query"
// QueryRequest generates a "aws/request.Request" representing the
// client's request for the Query operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2633,7 +2633,7 @@ const opRestoreTableFromBackup = "RestoreTableFromBackup"
// RestoreTableFromBackupRequest generates a "aws/request.Request" representing the
// client's request for the RestoreTableFromBackup operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2755,7 +2755,7 @@ const opRestoreTableToPointInTime = "RestoreTableToPointInTime"
// RestoreTableToPointInTimeRequest generates a "aws/request.Request" representing the
// client's request for the RestoreTableToPointInTime operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2901,7 +2901,7 @@ const opScan = "Scan"
// ScanRequest generates a "aws/request.Request" representing the
// client's request for the Scan operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3075,7 +3075,7 @@ const opTagResource = "TagResource"
// TagResourceRequest generates a "aws/request.Request" representing the
// client's request for the TagResource operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3184,7 +3184,7 @@ const opUntagResource = "UntagResource"
// UntagResourceRequest generates a "aws/request.Request" representing the
// client's request for the UntagResource operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3291,7 +3291,7 @@ const opUpdateContinuousBackups = "UpdateContinuousBackups"
// UpdateContinuousBackupsRequest generates a "aws/request.Request" representing the
// client's request for the UpdateContinuousBackups operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3387,7 +3387,7 @@ const opUpdateGlobalTable = "UpdateGlobalTable"
// UpdateGlobalTableRequest generates a "aws/request.Request" representing the
// client's request for the UpdateGlobalTable operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3498,7 +3498,7 @@ const opUpdateGlobalTableSettings = "UpdateGlobalTableSettings"
// UpdateGlobalTableSettingsRequest generates a "aws/request.Request" representing the
// client's request for the UpdateGlobalTableSettings operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3604,7 +3604,7 @@ const opUpdateItem = "UpdateItem"
// UpdateItemRequest generates a "aws/request.Request" representing the
// client's request for the UpdateItem operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3709,7 +3709,7 @@ const opUpdateTable = "UpdateTable"
// UpdateTableRequest generates a "aws/request.Request" representing the
// client's request for the UpdateTable operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3827,7 +3827,7 @@ const opUpdateTimeToLive = "UpdateTimeToLive"
// UpdateTimeToLiveRequest generates a "aws/request.Request" representing the
// client's request for the UpdateTimeToLive operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -27,7 +27,7 @@ const opAbortMultipartUpload = "AbortMultipartUpload"
// AbortMultipartUploadRequest generates a "aws/request.Request" representing the
// client's request for the AbortMultipartUpload operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -110,7 +110,7 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload"
// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the
// client's request for the CompleteMultipartUpload operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -184,7 +184,7 @@ const opCopyObject = "CopyObject"
// CopyObjectRequest generates a "aws/request.Request" representing the
// client's request for the CopyObject operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -264,7 +264,7 @@ const opCreateBucket = "CreateBucket"
// CreateBucketRequest generates a "aws/request.Request" representing the
// client's request for the CreateBucket operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -346,7 +346,7 @@ const opCreateMultipartUpload = "CreateMultipartUpload"
// CreateMultipartUploadRequest generates a "aws/request.Request" representing the
// client's request for the CreateMultipartUpload operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -426,7 +426,7 @@ const opDeleteBucket = "DeleteBucket"
// DeleteBucketRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucket operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -503,7 +503,7 @@ const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration
// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -580,7 +580,7 @@ const opDeleteBucketCors = "DeleteBucketCors"
// DeleteBucketCorsRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketCors operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -656,7 +656,7 @@ const opDeleteBucketEncryption = "DeleteBucketEncryption"
// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketEncryption operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -732,7 +732,7 @@ const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration
// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -809,7 +809,7 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle"
// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketLifecycle operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -885,7 +885,7 @@ const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration"
// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -962,7 +962,7 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy"
// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketPolicy operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1038,7 +1038,7 @@ const opDeleteBucketReplication = "DeleteBucketReplication"
// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketReplication operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1114,7 +1114,7 @@ const opDeleteBucketTagging = "DeleteBucketTagging"
// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketTagging operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1190,7 +1190,7 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite"
// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketWebsite operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1266,7 +1266,7 @@ const opDeleteObject = "DeleteObject"
// DeleteObjectRequest generates a "aws/request.Request" representing the
// client's request for the DeleteObject operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1342,7 +1342,7 @@ const opDeleteObjectTagging = "DeleteObjectTagging"
// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the
// client's request for the DeleteObjectTagging operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1416,7 +1416,7 @@ const opDeleteObjects = "DeleteObjects"
// DeleteObjectsRequest generates a "aws/request.Request" representing the
// client's request for the DeleteObjects operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1491,7 +1491,7 @@ const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration"
// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketAccelerateConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1565,7 +1565,7 @@ const opGetBucketAcl = "GetBucketAcl"
// GetBucketAclRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketAcl operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1639,7 +1639,7 @@ const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration"
// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1714,7 +1714,7 @@ const opGetBucketCors = "GetBucketCors"
// GetBucketCorsRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketCors operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1788,7 +1788,7 @@ const opGetBucketEncryption = "GetBucketEncryption"
// GetBucketEncryptionRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketEncryption operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1862,7 +1862,7 @@ const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration"
// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketInventoryConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -1937,7 +1937,7 @@ const opGetBucketLifecycle = "GetBucketLifecycle"
// GetBucketLifecycleRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketLifecycle operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2020,7 +2020,7 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration"
// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketLifecycleConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2094,7 +2094,7 @@ const opGetBucketLocation = "GetBucketLocation"
// GetBucketLocationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketLocation operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2168,7 +2168,7 @@ const opGetBucketLogging = "GetBucketLogging"
// GetBucketLoggingRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketLogging operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2243,7 +2243,7 @@ const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration"
// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketMetricsConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2318,7 +2318,7 @@ const opGetBucketNotification = "GetBucketNotification"
// GetBucketNotificationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketNotification operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2401,7 +2401,7 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration
// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketNotificationConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2475,7 +2475,7 @@ const opGetBucketPolicy = "GetBucketPolicy"
// GetBucketPolicyRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketPolicy operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2549,7 +2549,7 @@ const opGetBucketReplication = "GetBucketReplication"
// GetBucketReplicationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketReplication operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2623,7 +2623,7 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment"
// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketRequestPayment operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2697,7 +2697,7 @@ const opGetBucketTagging = "GetBucketTagging"
// GetBucketTaggingRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketTagging operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2771,7 +2771,7 @@ const opGetBucketVersioning = "GetBucketVersioning"
// GetBucketVersioningRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketVersioning operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2845,7 +2845,7 @@ const opGetBucketWebsite = "GetBucketWebsite"
// GetBucketWebsiteRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketWebsite operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2919,7 +2919,7 @@ const opGetObject = "GetObject"
// GetObjectRequest generates a "aws/request.Request" representing the
// client's request for the GetObject operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -2998,7 +2998,7 @@ const opGetObjectAcl = "GetObjectAcl"
// GetObjectAclRequest generates a "aws/request.Request" representing the
// client's request for the GetObjectAcl operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3077,7 +3077,7 @@ const opGetObjectTagging = "GetObjectTagging"
// GetObjectTaggingRequest generates a "aws/request.Request" representing the
// client's request for the GetObjectTagging operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3151,7 +3151,7 @@ const opGetObjectTorrent = "GetObjectTorrent"
// GetObjectTorrentRequest generates a "aws/request.Request" representing the
// client's request for the GetObjectTorrent operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3225,7 +3225,7 @@ const opHeadBucket = "HeadBucket"
// HeadBucketRequest generates a "aws/request.Request" representing the
// client's request for the HeadBucket operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3307,7 +3307,7 @@ const opHeadObject = "HeadObject"
// HeadObjectRequest generates a "aws/request.Request" representing the
// client's request for the HeadObject operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3386,7 +3386,7 @@ const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations"
// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the
// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3460,7 +3460,7 @@ const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations"
// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the
// client's request for the ListBucketInventoryConfigurations operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3534,7 +3534,7 @@ const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations"
// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the
// client's request for the ListBucketMetricsConfigurations operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3608,7 +3608,7 @@ const opListBuckets = "ListBuckets"
// ListBucketsRequest generates a "aws/request.Request" representing the
// client's request for the ListBuckets operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3682,7 +3682,7 @@ const opListMultipartUploads = "ListMultipartUploads"
// ListMultipartUploadsRequest generates a "aws/request.Request" representing the
// client's request for the ListMultipartUploads operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3812,7 +3812,7 @@ const opListObjectVersions = "ListObjectVersions"
// ListObjectVersionsRequest generates a "aws/request.Request" representing the
// client's request for the ListObjectVersions operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -3942,7 +3942,7 @@ const opListObjects = "ListObjects"
// ListObjectsRequest generates a "aws/request.Request" representing the
// client's request for the ListObjects operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4079,7 +4079,7 @@ const opListObjectsV2 = "ListObjectsV2"
// ListObjectsV2Request generates a "aws/request.Request" representing the
// client's request for the ListObjectsV2 operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4217,7 +4217,7 @@ const opListParts = "ListParts"
// ListPartsRequest generates a "aws/request.Request" representing the
// client's request for the ListParts operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4347,7 +4347,7 @@ const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration"
// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketAccelerateConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4423,7 +4423,7 @@ const opPutBucketAcl = "PutBucketAcl"
// PutBucketAclRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketAcl operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4499,7 +4499,7 @@ const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration"
// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4576,7 +4576,7 @@ const opPutBucketCors = "PutBucketCors"
// PutBucketCorsRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketCors operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4652,7 +4652,7 @@ const opPutBucketEncryption = "PutBucketEncryption"
// PutBucketEncryptionRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketEncryption operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4729,7 +4729,7 @@ const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration"
// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketInventoryConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4806,7 +4806,7 @@ const opPutBucketLifecycle = "PutBucketLifecycle"
// PutBucketLifecycleRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketLifecycle operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4891,7 +4891,7 @@ const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration"
// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketLifecycleConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -4968,7 +4968,7 @@ const opPutBucketLogging = "PutBucketLogging"
// PutBucketLoggingRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketLogging operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5046,7 +5046,7 @@ const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration"
// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketMetricsConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5123,7 +5123,7 @@ const opPutBucketNotification = "PutBucketNotification"
// PutBucketNotificationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketNotification operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5208,7 +5208,7 @@ const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration
// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketNotificationConfiguration operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5284,7 +5284,7 @@ const opPutBucketPolicy = "PutBucketPolicy"
// PutBucketPolicyRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketPolicy operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5361,7 +5361,7 @@ const opPutBucketReplication = "PutBucketReplication"
// PutBucketReplicationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketReplication operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5439,7 +5439,7 @@ const opPutBucketRequestPayment = "PutBucketRequestPayment"
// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketRequestPayment operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5519,7 +5519,7 @@ const opPutBucketTagging = "PutBucketTagging"
// PutBucketTaggingRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketTagging operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5595,7 +5595,7 @@ const opPutBucketVersioning = "PutBucketVersioning"
// PutBucketVersioningRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketVersioning operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5672,7 +5672,7 @@ const opPutBucketWebsite = "PutBucketWebsite"
// PutBucketWebsiteRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketWebsite operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5748,7 +5748,7 @@ const opPutObject = "PutObject"
// PutObjectRequest generates a "aws/request.Request" representing the
// client's request for the PutObject operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5822,7 +5822,7 @@ const opPutObjectAcl = "PutObjectAcl"
// PutObjectAclRequest generates a "aws/request.Request" representing the
// client's request for the PutObjectAcl operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5902,7 +5902,7 @@ const opPutObjectTagging = "PutObjectTagging"
// PutObjectTaggingRequest generates a "aws/request.Request" representing the
// client's request for the PutObjectTagging operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -5976,7 +5976,7 @@ const opRestoreObject = "RestoreObject"
// RestoreObjectRequest generates a "aws/request.Request" representing the
// client's request for the RestoreObject operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -6055,7 +6055,7 @@ const opSelectObjectContent = "SelectObjectContent"
// SelectObjectContentRequest generates a "aws/request.Request" representing the
// client's request for the SelectObjectContent operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -6137,7 +6137,7 @@ const opUploadPart = "UploadPart"
// UploadPartRequest generates a "aws/request.Request" representing the
// client's request for the UploadPart operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -6217,7 +6217,7 @@ const opUploadPartCopy = "UploadPartCopy"
// UploadPartCopyRequest generates a "aws/request.Request" representing the
// client's request for the UploadPartCopy operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.

View File

@ -3,6 +3,7 @@ package s3
import (
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/s3err"
)
func init() {
@ -21,6 +22,7 @@ func defaultInitClientFn(c *client.Client) {
// S3 uses custom error unmarshaling logic
c.Handlers.UnmarshalError.Clear()
c.Handlers.UnmarshalError.PushBack(unmarshalError)
c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler())
}
func defaultInitRequestFn(r *request.Request) {
@ -42,6 +44,7 @@ func defaultInitRequestFn(r *request.Request) {
r.Handlers.Validate.PushFront(populateLocationConstraint)
case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler())
case opPutObject, opUploadPart:
r.Handlers.Build.PushBack(computeBodyHashes)
// Disabled until #1837 root issue is resolved.

View File

@ -13,7 +13,11 @@ import (
func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "unable to read response body", err)
r.Error = awserr.NewRequestFailure(
awserr.New("SerializationError", "unable to read response body", err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
body := bytes.NewReader(b)

View File

@ -23,22 +23,17 @@ func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
hostID := r.HTTPResponse.Header.Get("X-Amz-Id-2")
// Bucket exists in a different region, and request needs
// to be made to the correct region.
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
r.Error = requestFailure{
RequestFailure: awserr.NewRequestFailure(
awserr.New("BucketRegionError",
fmt.Sprintf("incorrect region, the bucket is not in '%s' region",
aws.StringValue(r.Config.Region)),
nil),
r.HTTPResponse.StatusCode,
r.RequestID,
),
hostID: hostID,
}
r.Error = awserr.NewRequestFailure(
awserr.New("BucketRegionError",
fmt.Sprintf("incorrect region, the bucket is not in '%s' region",
aws.StringValue(r.Config.Region)),
nil),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
@ -63,14 +58,11 @@ func unmarshalError(r *request.Request) {
errMsg = statusText
}
r.Error = requestFailure{
RequestFailure: awserr.NewRequestFailure(
awserr.New(errCode, errMsg, err),
r.HTTPResponse.StatusCode,
r.RequestID,
),
hostID: hostID,
}
r.Error = awserr.NewRequestFailure(
awserr.New(errCode, errMsg, err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
}
// A RequestFailure provides access to the S3 Request ID and Host ID values
@ -83,21 +75,3 @@ type RequestFailure interface {
// Host ID is the S3 Host ID needed for debug, and contacting support
HostID() string
}
type requestFailure struct {
awserr.RequestFailure
hostID string
}
func (r requestFailure) Error() string {
extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s",
r.StatusCode(), r.RequestID(), r.hostID)
return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr())
}
func (r requestFailure) String() string {
return r.Error()
}
func (r requestFailure) HostID() string {
return r.hostID
}

View File

@ -15,7 +15,7 @@ const opAssumeRole = "AssumeRole"
// AssumeRoleRequest generates a "aws/request.Request" representing the
// client's request for the AssumeRole operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -209,7 +209,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the
// client's request for the AssumeRoleWithSAML operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -391,7 +391,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the
// client's request for the AssumeRoleWithWebIdentity operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -602,7 +602,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the
// client's request for the DecodeAuthorizationMessage operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -714,7 +714,7 @@ const opGetCallerIdentity = "GetCallerIdentity"
// GetCallerIdentityRequest generates a "aws/request.Request" representing the
// client's request for the GetCallerIdentity operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -789,7 +789,7 @@ const opGetFederationToken = "GetFederationToken"
// GetFederationTokenRequest generates a "aws/request.Request" representing the
// client's request for the GetFederationToken operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -958,7 +958,7 @@ const opGetSessionToken = "GetSessionToken"
// GetSessionTokenRequest generates a "aws/request.Request" representing the
// client's request for the GetSessionToken operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.

View File

@ -97,7 +97,7 @@ func main() {
## Notes:
* All options are *strings* with the following exceptions:
* `cfg.Log` - an instance of [`log.Logger`](https://golang.org/pkg/log/#Logger) or something else (e.g. [logrus](https://github.com/sirupsen/logrus)) which can be used to satisfy the interface requirements.
* `cfg.Log` - an instance of [`log.Logger`](https://golang.org/pkg/log/#Logger) or something else (e.g. [logrus](https://github.com/Sirupsen/logrus)) which can be used to satisfy the interface requirements.
* `cfg.Debug` - a boolean true|false.
* At a minimum, one of either `API.TokenKey` or `Check.SubmissionURL` is **required** for cgm to function.
* Check management can be disabled by providing a `Check.SubmissionURL` without an `API.TokenKey`. Note: the supplied URL needs to be http or the broker needs to be running with a cert which can be verified. Otherwise, the `API.TokenKey` will be required to retrieve the correct CA certificate to validate the broker's cert for the SSL connection.

View File

@ -1,87 +0,0 @@
# etcd/clientv3
[![Docs](https://readthedocs.org/projects/etcd/badge/?version=latest&style=flat-square)](https://etcd.readthedocs.io/en/latest/?badge=latest)
[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/clientv3)
`etcd/clientv3` is the official Go etcd client for v3.
See https://etcd.readthedocs.io/en/latest for latest client architecture.
## Install
```bash
go get go.etcd.io/etcd/clientv3
```
## Get started
Create client using `clientv3.New`:
```go
cli, err := clientv3.New(clientv3.Config{
Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
DialTimeout: 5 * time.Second,
})
if err != nil {
// handle error!
}
defer cli.Close()
```
etcd v3 uses [`gRPC`](http://www.grpc.io) for remote procedure calls. And `clientv3` uses
[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it.
If the client is not closed, the connection will have leaky goroutines. To specify client request timeout,
pass `context.WithTimeout` to APIs:
```go
ctx, cancel := context.WithTimeout(context.Background(), timeout)
resp, err := cli.Put(ctx, "sample_key", "sample_value")
cancel()
if err != nil {
// handle error!
}
// use the response
```
For full compatibility, it is recommended to vendor builds using etcd's vendored packages, using tools like `golang/dep`, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
## Error Handling
etcd client returns 2 types of errors:
1. context error: canceled or deadline exceeded.
2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes).
Here is the example code to handle client errors:
```go
resp, err := cli.Put(ctx, "", "")
if err != nil {
switch err {
case context.Canceled:
log.Fatalf("ctx is canceled by another routine: %v", err)
case context.DeadlineExceeded:
log.Fatalf("ctx is attached with a deadline is exceeded: %v", err)
case rpctypes.ErrEmptyKey:
log.Fatalf("client-side error: %v", err)
default:
log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err)
}
}
```
## Metrics
The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/etcd-io/etcd/blob/master/clientv3/example_metrics_test.go).
## Namespacing
The [namespace](https://godoc.org/go.etcd.io/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix.
## Request size limit
Client request size limit is configurable via `clientv3.Config.MaxCallSendMsgSize` and `MaxCallRecvMsgSize` in bytes. If none given, client request send limit defaults to 2 MiB including gRPC overhead bytes. And receive limit defaults to `math.MaxInt32`.
## Examples
More code examples can be found at [GoDoc](https://godoc.org/go.etcd.io/etcd/clientv3).

View File

@ -1,232 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
"fmt"
"strings"
"go.etcd.io/etcd/auth/authpb"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"google.golang.org/grpc"
)
type (
AuthEnableResponse pb.AuthEnableResponse
AuthDisableResponse pb.AuthDisableResponse
AuthenticateResponse pb.AuthenticateResponse
AuthUserAddResponse pb.AuthUserAddResponse
AuthUserDeleteResponse pb.AuthUserDeleteResponse
AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse
AuthUserGetResponse pb.AuthUserGetResponse
AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse
AuthRoleAddResponse pb.AuthRoleAddResponse
AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse
AuthRoleGetResponse pb.AuthRoleGetResponse
AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
AuthRoleDeleteResponse pb.AuthRoleDeleteResponse
AuthUserListResponse pb.AuthUserListResponse
AuthRoleListResponse pb.AuthRoleListResponse
PermissionType authpb.Permission_Type
Permission authpb.Permission
)
const (
PermRead = authpb.READ
PermWrite = authpb.WRITE
PermReadWrite = authpb.READWRITE
)
type Auth interface {
// AuthEnable enables auth of an etcd cluster.
AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
// AuthDisable disables auth of an etcd cluster.
AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
// UserAdd adds a new user to an etcd cluster.
UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
// UserDelete deletes a user from an etcd cluster.
UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
// UserChangePassword changes a password of a user.
UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
// UserGrantRole grants a role to a user.
UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
// UserGet gets a detailed information of a user.
UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
// UserList gets a list of all users.
UserList(ctx context.Context) (*AuthUserListResponse, error)
// UserRevokeRole revokes a role of a user.
UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
// RoleAdd adds a new role to an etcd cluster.
RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
// RoleGrantPermission grants a permission to a role.
RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
// RoleGet gets a detailed information of a role.
RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
// RoleList gets a list of all roles.
RoleList(ctx context.Context) (*AuthRoleListResponse, error)
// RoleRevokePermission revokes a permission from a role.
RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
// RoleDelete deletes a role.
RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
}
type authClient struct {
remote pb.AuthClient
callOpts []grpc.CallOption
}
func NewAuth(c *Client) Auth {
api := &authClient{remote: RetryAuthClient(c)}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
return (*AuthEnableResponse)(resp), toErr(ctx, err)
}
func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
return (*AuthDisableResponse)(resp), toErr(ctx, err)
}
func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...)
return (*AuthUserAddResponse)(resp), toErr(ctx, err)
}
func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
}
func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
}
func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
}
func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
}
func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) {
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
return (*AuthUserListResponse)(resp), toErr(ctx, err)
}
func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
}
func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
}
func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
perm := &authpb.Permission{
Key: []byte(key),
RangeEnd: []byte(rangeEnd),
PermType: authpb.Permission_Type(permType),
}
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
}
func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
}
func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
}
func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...)
return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
}
func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
}
func StrToPermissionType(s string) (PermissionType, error) {
val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
if ok {
return PermissionType(val), nil
}
return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
}
type authenticator struct {
conn *grpc.ClientConn // conn in-use
remote pb.AuthClient
callOpts []grpc.CallOption
}
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
return (*AuthenticateResponse)(resp), toErr(ctx, err)
}
func (auth *authenticator) close() {
auth.conn.Close()
}
func newAuthenticator(ctx context.Context, target string, opts []grpc.DialOption, c *Client) (*authenticator, error) {
conn, err := grpc.DialContext(ctx, target, opts...)
if err != nil {
return nil, err
}
api := &authenticator{
conn: conn,
remote: pb.NewAuthClient(conn),
}
if c != nil {
api.callOpts = c.callOpts
}
return api, nil
}

View File

@ -1,673 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net"
"net/url"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils"
"go.etcd.io/etcd/clientv3/balancer"
"go.etcd.io/etcd/clientv3/balancer/picker"
"go.etcd.io/etcd/clientv3/balancer/resolver/endpoint"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
var (
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
ErrOldCluster = errors.New("etcdclient: old cluster version")
roundRobinBalancerName = fmt.Sprintf("etcd-%s", picker.RoundrobinBalanced.String())
)
func init() {
lg := zap.NewNop()
if os.Getenv("ETCD_CLIENT_DEBUG") != "" {
var err error
lg, err = zap.NewProductionConfig().Build() // info level logging
if err != nil {
panic(err)
}
}
balancer.RegisterBuilder(balancer.Config{
Policy: picker.RoundrobinBalanced,
Name: roundRobinBalancerName,
Logger: lg,
})
}
// Client provides and manages an etcd v3 client session.
type Client struct {
Cluster
KV
Lease
Watcher
Auth
Maintenance
conn *grpc.ClientConn
cfg Config
creds *credentials.TransportCredentials
balancer balancer.Balancer
resolverGroup *endpoint.ResolverGroup
mu *sync.Mutex
ctx context.Context
cancel context.CancelFunc
// Username is a user name for authentication.
Username string
// Password is a password for authentication.
Password string
// tokenCred is an instance of WithPerRPCCredentials()'s argument
tokenCred *authTokenCredential
callOpts []grpc.CallOption
lg *zap.Logger
}
// New creates a new etcdv3 client from a given configuration.
func New(cfg Config) (*Client, error) {
if len(cfg.Endpoints) == 0 {
return nil, ErrNoAvailableEndpoints
}
return newClient(&cfg)
}
// NewCtxClient creates a client with a context but no underlying grpc
// connection. This is useful for embedded cases that override the
// service interface implementations and do not need connection management.
func NewCtxClient(ctx context.Context) *Client {
cctx, cancel := context.WithCancel(ctx)
return &Client{ctx: cctx, cancel: cancel}
}
// NewFromURL creates a new etcdv3 client from a URL.
func NewFromURL(url string) (*Client, error) {
return New(Config{Endpoints: []string{url}})
}
// NewFromURLs creates a new etcdv3 client from URLs.
func NewFromURLs(urls []string) (*Client, error) {
return New(Config{Endpoints: urls})
}
// Close shuts down the client's etcd connections.
func (c *Client) Close() error {
c.cancel()
c.Watcher.Close()
c.Lease.Close()
if c.resolverGroup != nil {
c.resolverGroup.Close()
}
if c.conn != nil {
return toErr(c.ctx, c.conn.Close())
}
return c.ctx.Err()
}
// Ctx is a context for "out of band" messages (e.g., for sending
// "clean up" message when another context is canceled). It is
// canceled on client Close().
func (c *Client) Ctx() context.Context { return c.ctx }
// Endpoints lists the registered endpoints for the client.
func (c *Client) Endpoints() (eps []string) {
// copy the slice; protect original endpoints from being changed
eps = make([]string, len(c.cfg.Endpoints))
copy(eps, c.cfg.Endpoints)
return
}
// SetEndpoints updates client's endpoints.
func (c *Client) SetEndpoints(eps ...string) {
c.mu.Lock()
defer c.mu.Unlock()
c.cfg.Endpoints = eps
c.resolverGroup.SetEndpoints(eps)
}
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
func (c *Client) Sync(ctx context.Context) error {
mresp, err := c.MemberList(ctx)
if err != nil {
return err
}
var eps []string
for _, m := range mresp.Members {
eps = append(eps, m.ClientURLs...)
}
c.SetEndpoints(eps...)
return nil
}
func (c *Client) autoSync() {
if c.cfg.AutoSyncInterval == time.Duration(0) {
return
}
for {
select {
case <-c.ctx.Done():
return
case <-time.After(c.cfg.AutoSyncInterval):
ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
err := c.Sync(ctx)
cancel()
if err != nil && err != c.ctx.Err() {
lg.Lvl(4).Infof("Auto sync endpoints failed: %v", err)
}
}
}
}
type authTokenCredential struct {
token string
tokenMu *sync.RWMutex
}
func (cred authTokenCredential) RequireTransportSecurity() bool {
return false
}
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
cred.tokenMu.RLock()
defer cred.tokenMu.RUnlock()
return map[string]string{
rpctypes.TokenFieldNameGRPC: cred.token,
}, nil
}
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
creds = c.creds
switch scheme {
case "unix":
case "http":
creds = nil
case "https", "unixs":
if creds != nil {
break
}
tlsconfig := &tls.Config{}
emptyCreds := credentials.NewTLS(tlsconfig)
creds = &emptyCreds
default:
creds = nil
}
return creds
}
// dialSetupOpts gives the dial opts prior to any authentication.
func (c *Client) dialSetupOpts(creds *credentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
if c.cfg.DialKeepAliveTime > 0 {
params := keepalive.ClientParameters{
Time: c.cfg.DialKeepAliveTime,
Timeout: c.cfg.DialKeepAliveTimeout,
PermitWithoutStream: c.cfg.PermitWithoutStream,
}
opts = append(opts, grpc.WithKeepaliveParams(params))
}
opts = append(opts, dopts...)
// Provide a net dialer that supports cancelation and timeout.
f := func(dialEp string, t time.Duration) (net.Conn, error) {
proto, host, _ := endpoint.ParseEndpoint(dialEp)
select {
case <-c.ctx.Done():
return nil, c.ctx.Err()
default:
}
dialer := &net.Dialer{Timeout: t}
return dialer.DialContext(c.ctx, proto, host)
}
opts = append(opts, grpc.WithDialer(f))
if creds != nil {
opts = append(opts, grpc.WithTransportCredentials(*creds))
} else {
opts = append(opts, grpc.WithInsecure())
}
// Interceptor retry and backoff.
// TODO: Replace all of clientv3/retry.go with interceptor based retry, or with
// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy
// once it is available.
rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction))
opts = append(opts,
// Disable stream retry by default since go-grpc-middleware/retry does not support client streams.
// Streams that are safe to retry are enabled individually.
grpc.WithStreamInterceptor(c.streamClientInterceptor(c.lg, withMax(0), rrBackoff)),
grpc.WithUnaryInterceptor(c.unaryClientInterceptor(c.lg, withMax(defaultUnaryMaxRetries), rrBackoff)),
)
return opts, nil
}
// Dial connects to a single endpoint using the client's config.
func (c *Client) Dial(ep string) (*grpc.ClientConn, error) {
creds := c.directDialCreds(ep)
// Use the grpc passthrough resolver to directly dial a single endpoint.
// This resolver passes through the 'unix' and 'unixs' endpoints schemes used
// by etcd without modification, allowing us to directly dial endpoints and
// using the same dial functions that we use for load balancer dialing.
return c.dial(fmt.Sprintf("passthrough:///%s", ep), creds)
}
func (c *Client) getToken(ctx context.Context) error {
var err error // return last error in a case of fail
var auth *authenticator
for i := 0; i < len(c.cfg.Endpoints); i++ {
ep := c.cfg.Endpoints[i]
// use dial options without dopts to avoid reusing the client balancer
var dOpts []grpc.DialOption
_, host, _ := endpoint.ParseEndpoint(ep)
target := c.resolverGroup.Target(host)
creds := c.dialWithBalancerCreds(ep)
dOpts, err = c.dialSetupOpts(creds, c.cfg.DialOptions...)
if err != nil {
err = fmt.Errorf("failed to configure auth dialer: %v", err)
continue
}
dOpts = append(dOpts, grpc.WithBalancerName(roundRobinBalancerName))
auth, err = newAuthenticator(ctx, target, dOpts, c)
if err != nil {
continue
}
defer auth.close()
var resp *AuthenticateResponse
resp, err = auth.authenticate(ctx, c.Username, c.Password)
if err != nil {
continue
}
c.tokenCred.tokenMu.Lock()
c.tokenCred.token = resp.Token
c.tokenCred.tokenMu.Unlock()
return nil
}
return err
}
// dialWithBalancer dials the client's current load balanced resolver group. The scheme of the host
// of the provided endpoint determines the scheme used for all endpoints of the client connection.
func (c *Client) dialWithBalancer(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
_, host, _ := endpoint.ParseEndpoint(ep)
target := c.resolverGroup.Target(host)
creds := c.dialWithBalancerCreds(ep)
return c.dial(target, creds, dopts...)
}
// dial configures and dials any grpc balancer target.
func (c *Client) dial(target string, creds *credentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
opts, err := c.dialSetupOpts(creds, dopts...)
if err != nil {
return nil, fmt.Errorf("failed to configure dialer: %v", err)
}
if c.Username != "" && c.Password != "" {
c.tokenCred = &authTokenCredential{
tokenMu: &sync.RWMutex{},
}
ctx, cancel := c.ctx, func() {}
if c.cfg.DialTimeout > 0 {
ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
}
err = c.getToken(ctx)
if err != nil {
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
err = context.DeadlineExceeded
}
cancel()
return nil, err
}
} else {
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
}
cancel()
}
opts = append(opts, c.cfg.DialOptions...)
dctx := c.ctx
if c.cfg.DialTimeout > 0 {
var cancel context.CancelFunc
dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options?
}
conn, err := grpc.DialContext(dctx, target, opts...)
if err != nil {
return nil, err
}
return conn, nil
}
func (c *Client) directDialCreds(ep string) *credentials.TransportCredentials {
_, hostPort, scheme := endpoint.ParseEndpoint(ep)
creds := c.creds
if len(scheme) != 0 {
creds = c.processCreds(scheme)
if creds != nil {
c := *creds
clone := c.Clone()
// Set the server name must to the endpoint hostname without port since grpc
// otherwise attempts to check if x509 cert is valid for the full endpoint
// including the scheme and port, which fails.
host, _ := endpoint.ParseHostPort(hostPort)
clone.OverrideServerName(host)
creds = &clone
}
}
return creds
}
func (c *Client) dialWithBalancerCreds(ep string) *credentials.TransportCredentials {
_, _, scheme := endpoint.ParseEndpoint(ep)
creds := c.creds
if len(scheme) != 0 {
creds = c.processCreds(scheme)
}
return creds
}
// WithRequireLeader requires client requests to only succeed
// when the cluster has a leader.
func WithRequireLeader(ctx context.Context) context.Context {
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewOutgoingContext(ctx, md)
}
func newClient(cfg *Config) (*Client, error) {
if cfg == nil {
cfg = &Config{}
}
var creds *credentials.TransportCredentials
if cfg.TLS != nil {
c := credentials.NewTLS(cfg.TLS)
creds = &c
}
// use a temporary skeleton client to bootstrap first connection
baseCtx := context.TODO()
if cfg.Context != nil {
baseCtx = cfg.Context
}
ctx, cancel := context.WithCancel(baseCtx)
client := &Client{
conn: nil,
cfg: *cfg,
creds: creds,
ctx: ctx,
cancel: cancel,
mu: new(sync.Mutex),
callOpts: defaultCallOpts,
}
lcfg := DefaultLogConfig
if cfg.LogConfig != nil {
lcfg = *cfg.LogConfig
}
var err error
client.lg, err = lcfg.Build()
if err != nil {
return nil, err
}
if cfg.Username != "" && cfg.Password != "" {
client.Username = cfg.Username
client.Password = cfg.Password
}
if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
}
callOpts := []grpc.CallOption{
defaultFailFast,
defaultMaxCallSendMsgSize,
defaultMaxCallRecvMsgSize,
}
if cfg.MaxCallSendMsgSize > 0 {
callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
}
if cfg.MaxCallRecvMsgSize > 0 {
callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
}
client.callOpts = callOpts
}
// Prepare a 'endpoint://<unique-client-id>/' resolver for the client and create a endpoint target to pass
// to dial so the client knows to use this resolver.
client.resolverGroup, err = endpoint.NewResolverGroup(fmt.Sprintf("client-%s", strconv.FormatInt(time.Now().UnixNano(), 36)))
if err != nil {
client.cancel()
return nil, err
}
client.resolverGroup.SetEndpoints(cfg.Endpoints)
if len(cfg.Endpoints) < 1 {
return nil, fmt.Errorf("at least one Endpoint must is required in client config")
}
dialEndpoint := cfg.Endpoints[0]
// Use an provided endpoint target so that for https:// without any tls config given, then
// grpc will assume the certificate server name is the endpoint host.
conn, err := client.dialWithBalancer(dialEndpoint, grpc.WithBalancerName(roundRobinBalancerName))
if err != nil {
client.cancel()
client.resolverGroup.Close()
return nil, err
}
// TODO: With the old grpc balancer interface, we waited until the dial timeout
// for the balancer to be ready. Is there an equivalent wait we should do with the new grpc balancer interface?
client.conn = conn
client.Cluster = NewCluster(client)
client.KV = NewKV(client)
client.Lease = NewLease(client)
client.Watcher = NewWatcher(client)
client.Auth = NewAuth(client)
client.Maintenance = NewMaintenance(client)
if cfg.RejectOldCluster {
if err := client.checkVersion(); err != nil {
client.Close()
return nil, err
}
}
go client.autoSync()
return client, nil
}
// roundRobinQuorumBackoff retries against quorum between each backoff.
// This is intended for use with a round robin load balancer.
func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc {
return func(attempt uint) time.Duration {
// after each round robin across quorum, backoff for our wait between duration
n := uint(len(c.Endpoints()))
quorum := (n/2 + 1)
if attempt%quorum == 0 {
c.lg.Info("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction))
return backoffutils.JitterUp(waitBetween, jitterFraction)
}
c.lg.Info("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum))
return 0
}
}
func (c *Client) checkVersion() (err error) {
var wg sync.WaitGroup
errc := make(chan error, len(c.cfg.Endpoints))
ctx, cancel := context.WithCancel(c.ctx)
if c.cfg.DialTimeout > 0 {
ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
}
wg.Add(len(c.cfg.Endpoints))
for _, ep := range c.cfg.Endpoints {
// if cluster is current, any endpoint gives a recent version
go func(e string) {
defer wg.Done()
resp, rerr := c.Status(ctx, e)
if rerr != nil {
errc <- rerr
return
}
vs := strings.Split(resp.Version, ".")
maj, min := 0, 0
if len(vs) >= 2 {
maj, _ = strconv.Atoi(vs[0])
min, rerr = strconv.Atoi(vs[1])
}
if maj < 3 || (maj == 3 && min < 2) {
rerr = ErrOldCluster
}
errc <- rerr
}(ep)
}
// wait for success
for i := 0; i < len(c.cfg.Endpoints); i++ {
if err = <-errc; err == nil {
break
}
}
cancel()
wg.Wait()
return err
}
// ActiveConnection returns the current in-use connection
func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
// isHaltErr returns true if the given error and context indicate no forward
// progress can be made, even after reconnecting.
func isHaltErr(ctx context.Context, err error) bool {
if ctx != nil && ctx.Err() != nil {
return true
}
if err == nil {
return false
}
ev, _ := status.FromError(err)
// Unavailable codes mean the system will be right back.
// (e.g., can't connect, lost leader)
// Treat Internal codes as if something failed, leaving the
// system in an inconsistent state, but retrying could make progress.
// (e.g., failed in middle of send, corrupted frame)
// TODO: are permanent Internal errors possible from grpc?
return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
}
// isUnavailableErr returns true if the given error is an unavailable error
func isUnavailableErr(ctx context.Context, err error) bool {
if ctx != nil && ctx.Err() != nil {
return false
}
if err == nil {
return false
}
ev, _ := status.FromError(err)
// Unavailable codes mean the system will be right back.
// (e.g., can't connect, lost leader)
return ev.Code() == codes.Unavailable
}
func toErr(ctx context.Context, err error) error {
if err == nil {
return nil
}
err = rpctypes.Error(err)
if _, ok := err.(rpctypes.EtcdError); ok {
return err
}
if ev, ok := status.FromError(err); ok {
code := ev.Code()
switch code {
case codes.DeadlineExceeded:
fallthrough
case codes.Canceled:
if ctx.Err() != nil {
err = ctx.Err()
}
case codes.Unavailable:
case codes.FailedPrecondition:
err = grpc.ErrClientConnClosing
}
}
return err
}
func canceledByCaller(stopCtx context.Context, err error) bool {
if stopCtx.Err() == nil || err == nil {
return false
}
return err == context.Canceled || err == context.DeadlineExceeded
}
// IsConnCanceled returns true, if error is from a closed gRPC connection.
// ref. https://github.com/grpc/grpc-go/pull/1854
func IsConnCanceled(err error) bool {
if err == nil {
return false
}
// >= gRPC v1.10.x
s, ok := status.FromError(err)
if ok {
// connection is canceled or server has already closed the connection
return s.Code() == codes.Canceled || s.Message() == "transport is closing"
}
// >= gRPC v1.10.x
if err == context.Canceled {
return true
}
// <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")'
return strings.Contains(err.Error(), "grpc: the client connection is closing")
}
func getHost(ep string) string {
url, uerr := url.Parse(ep)
if uerr != nil || !strings.Contains(ep, "://") {
return ep
}
return url.Host
}

View File

@ -1,114 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/pkg/types"
"google.golang.org/grpc"
)
type (
Member pb.Member
MemberListResponse pb.MemberListResponse
MemberAddResponse pb.MemberAddResponse
MemberRemoveResponse pb.MemberRemoveResponse
MemberUpdateResponse pb.MemberUpdateResponse
)
type Cluster interface {
// MemberList lists the current cluster membership.
MemberList(ctx context.Context) (*MemberListResponse, error)
// MemberAdd adds a new member into the cluster.
MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
// MemberRemove removes an existing member from the cluster.
MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
// MemberUpdate updates the peer addresses of the member.
MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
}
type cluster struct {
remote pb.ClusterClient
callOpts []grpc.CallOption
}
func NewCluster(c *Client) Cluster {
api := &cluster{remote: RetryClusterClient(c)}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
api := &cluster{remote: remote}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
// fail-fast before panic in rafthttp
if _, err := types.NewURLs(peerAddrs); err != nil {
return nil, err
}
r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
}
return (*MemberAddResponse)(resp), nil
}
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
r := &pb.MemberRemoveRequest{ID: id}
resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
}
return (*MemberRemoveResponse)(resp), nil
}
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
// fail-fast before panic in rafthttp
if _, err := types.NewURLs(peerAddrs); err != nil {
return nil, err
}
// it is safe to retry on update.
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
if err == nil {
return (*MemberUpdateResponse)(resp), nil
}
return nil, toErr(ctx, err)
}
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
// it is safe to retry on list.
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...)
if err == nil {
return (*MemberListResponse)(resp), nil
}
return nil, toErr(ctx, err)
}

View File

@ -1,51 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
)
// CompactOp represents a compact operation.
type CompactOp struct {
revision int64
physical bool
}
// CompactOption configures compact operation.
type CompactOption func(*CompactOp)
func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
for _, opt := range opts {
opt(op)
}
}
// OpCompact wraps slice CompactOption to create a CompactOp.
func OpCompact(rev int64, opts ...CompactOption) CompactOp {
ret := CompactOp{revision: rev}
ret.applyCompactOpts(opts)
return ret
}
func (op CompactOp) toRequest() *pb.CompactionRequest {
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
}
// WithCompactPhysical makes Compact wait until all compacted entries are
// removed from the etcd server's storage.
func WithCompactPhysical() CompactOption {
return func(op *CompactOp) { op.physical = true }
}

View File

@ -1,140 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
)
type CompareTarget int
type CompareResult int
const (
CompareVersion CompareTarget = iota
CompareCreated
CompareModified
CompareValue
)
type Cmp pb.Compare
func Compare(cmp Cmp, result string, v interface{}) Cmp {
var r pb.Compare_CompareResult
switch result {
case "=":
r = pb.Compare_EQUAL
case "!=":
r = pb.Compare_NOT_EQUAL
case ">":
r = pb.Compare_GREATER
case "<":
r = pb.Compare_LESS
default:
panic("Unknown result op")
}
cmp.Result = r
switch cmp.Target {
case pb.Compare_VALUE:
val, ok := v.(string)
if !ok {
panic("bad compare value")
}
cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
case pb.Compare_VERSION:
cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
case pb.Compare_CREATE:
cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
case pb.Compare_MOD:
cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
case pb.Compare_LEASE:
cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)}
default:
panic("Unknown compare type")
}
return cmp
}
func Value(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
}
func Version(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
}
func CreateRevision(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
}
func ModRevision(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
}
// LeaseValue compares a key's LeaseID to a value of your choosing. The empty
// LeaseID is 0, otherwise known as `NoLease`.
func LeaseValue(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
}
// KeyBytes returns the byte slice holding with the comparison key.
func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
// WithKeyBytes sets the byte slice for the comparison key.
func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key }
// ValueBytes returns the byte slice holding the comparison value, if any.
func (cmp *Cmp) ValueBytes() []byte {
if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok {
return tu.Value
}
return nil
}
// WithValueBytes sets the byte slice for the comparison's value.
func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
// WithRange sets the comparison to scan the range [key, end).
func (cmp Cmp) WithRange(end string) Cmp {
cmp.RangeEnd = []byte(end)
return cmp
}
// WithPrefix sets the comparison to scan all keys prefixed by the key.
func (cmp Cmp) WithPrefix() Cmp {
cmp.RangeEnd = getPrefix(cmp.Key)
return cmp
}
// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
func mustInt64(val interface{}) int64 {
if v, ok := val.(int64); ok {
return v
}
if v, ok := val.(int); ok {
return int64(v)
}
panic("bad value")
}
// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
// int64 otherwise.
func mustInt64orLeaseID(val interface{}) int64 {
if v, ok := val.(LeaseID); ok {
return int64(v)
}
return mustInt64(val)
}

View File

@ -1,17 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package concurrency implements concurrency operations on top of
// etcd such as distributed locks, barriers, and elections.
package concurrency

View File

@ -1,253 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"context"
"errors"
"fmt"
v3 "go.etcd.io/etcd/clientv3"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/mvcc/mvccpb"
)
var (
ErrElectionNotLeader = errors.New("election: not leader")
ErrElectionNoLeader = errors.New("election: no leader")
)
type Election struct {
session *Session
keyPrefix string
leaderKey string
leaderRev int64
leaderSession *Session
hdr *pb.ResponseHeader
}
// NewElection returns a new election on a given key prefix.
func NewElection(s *Session, pfx string) *Election {
return &Election{session: s, keyPrefix: pfx + "/"}
}
// ResumeElection initializes an election with a known leader.
func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
return &Election{
session: s,
leaderKey: leaderKey,
leaderRev: leaderRev,
leaderSession: s,
}
}
// Campaign puts a value as eligible for the election on the prefix
// key.
// Multiple sessions can participate in the election for the
// same prefix, but only one can be the leader at a time.
//
// If the context is 'context.TODO()/context.Background()', the Campaign
// will continue to be blocked for other keys to be deleted, unless server
// returns a non-recoverable error (e.g. ErrCompacted).
// Otherwise, until the context is not cancelled or timed-out, Campaign will
// continue to be blocked until it becomes the leader.
func (e *Election) Campaign(ctx context.Context, val string) error {
s := e.session
client := e.session.Client()
k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
txn = txn.Else(v3.OpGet(k))
resp, err := txn.Commit()
if err != nil {
return err
}
e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
if !resp.Succeeded {
kv := resp.Responses[0].GetResponseRange().Kvs[0]
e.leaderRev = kv.CreateRevision
if string(kv.Value) != val {
if err = e.Proclaim(ctx, val); err != nil {
e.Resign(ctx)
return err
}
}
}
_, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
if err != nil {
// clean up in case of context cancel
select {
case <-ctx.Done():
e.Resign(client.Ctx())
default:
e.leaderSession = nil
}
return err
}
e.hdr = resp.Header
return nil
}
// Proclaim lets the leader announce a new value without another election.
func (e *Election) Proclaim(ctx context.Context, val string) error {
if e.leaderSession == nil {
return ErrElectionNotLeader
}
client := e.session.Client()
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
txn := client.Txn(ctx).If(cmp)
txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
tresp, terr := txn.Commit()
if terr != nil {
return terr
}
if !tresp.Succeeded {
e.leaderKey = ""
return ErrElectionNotLeader
}
e.hdr = tresp.Header
return nil
}
// Resign lets a leader start a new election.
func (e *Election) Resign(ctx context.Context) (err error) {
if e.leaderSession == nil {
return nil
}
client := e.session.Client()
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
if err == nil {
e.hdr = resp.Header
}
e.leaderKey = ""
e.leaderSession = nil
return err
}
// Leader returns the leader value for the current election.
func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
client := e.session.Client()
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
if err != nil {
return nil, err
} else if len(resp.Kvs) == 0 {
// no leader currently elected
return nil, ErrElectionNoLeader
}
return resp, nil
}
// Observe returns a channel that reliably observes ordered leader proposals
// as GetResponse values on every current elected leader key. It will not
// necessarily fetch all historical leader updates, but will always post the
// most recent leader value.
//
// The channel closes when the context is canceled or the underlying watcher
// is otherwise disrupted.
func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
retc := make(chan v3.GetResponse)
go e.observe(ctx, retc)
return retc
}
func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
client := e.session.Client()
defer close(ch)
for {
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
if err != nil {
return
}
var kv *mvccpb.KeyValue
var hdr *pb.ResponseHeader
if len(resp.Kvs) == 0 {
cctx, cancel := context.WithCancel(ctx)
// wait for first key put on prefix
opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
wch := client.Watch(cctx, e.keyPrefix, opts...)
for kv == nil {
wr, ok := <-wch
if !ok || wr.Err() != nil {
cancel()
return
}
// only accept puts; a delete will make observe() spin
for _, ev := range wr.Events {
if ev.Type == mvccpb.PUT {
hdr, kv = &wr.Header, ev.Kv
// may have multiple revs; hdr.rev = the last rev
// set to kv's rev in case batch has multiple Puts
hdr.Revision = kv.ModRevision
break
}
}
}
cancel()
} else {
hdr, kv = resp.Header, resp.Kvs[0]
}
select {
case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
case <-ctx.Done():
return
}
cctx, cancel := context.WithCancel(ctx)
wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1))
keyDeleted := false
for !keyDeleted {
wr, ok := <-wch
if !ok {
cancel()
return
}
for _, ev := range wr.Events {
if ev.Type == mvccpb.DELETE {
keyDeleted = true
break
}
resp.Header = &wr.Header
resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
select {
case ch <- *resp:
case <-cctx.Done():
cancel()
return
}
}
}
cancel()
}
}
// Key returns the leader key if elected, empty string otherwise.
func (e *Election) Key() string { return e.leaderKey }
// Rev returns the leader key's creation revision, if elected.
func (e *Election) Rev() int64 { return e.leaderRev }
// Header is the response header from the last successful election proposal.
func (e *Election) Header() *pb.ResponseHeader { return e.hdr }

View File

@ -1,65 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"context"
"fmt"
v3 "go.etcd.io/etcd/clientv3"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/mvcc/mvccpb"
)
func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
cctx, cancel := context.WithCancel(ctx)
defer cancel()
var wr v3.WatchResponse
wch := client.Watch(cctx, key, v3.WithRev(rev))
for wr = range wch {
for _, ev := range wr.Events {
if ev.Type == mvccpb.DELETE {
return nil
}
}
}
if err := wr.Err(); err != nil {
return err
}
if err := ctx.Err(); err != nil {
return err
}
return fmt.Errorf("lost watcher waiting for delete")
}
// waitDeletes efficiently waits until all keys matching the prefix and no greater
// than the create revision.
func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
for {
resp, err := client.Get(ctx, pfx, getOpts...)
if err != nil {
return nil, err
}
if len(resp.Kvs) == 0 {
return resp.Header, nil
}
lastKey := string(resp.Kvs[0].Key)
if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
return nil, err
}
}
}

View File

@ -1,118 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"context"
"fmt"
"sync"
v3 "go.etcd.io/etcd/clientv3"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
)
// Mutex implements the sync Locker interface with etcd
type Mutex struct {
s *Session
pfx string
myKey string
myRev int64
hdr *pb.ResponseHeader
}
func NewMutex(s *Session, pfx string) *Mutex {
return &Mutex{s, pfx + "/", "", -1, nil}
}
// Lock locks the mutex with a cancelable context. If the context is canceled
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
func (m *Mutex) Lock(ctx context.Context) error {
s := m.s
client := m.s.Client()
m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
// put self in lock waiters via myKey; oldest waiter holds lock
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
// reuse key in case this session already holds the lock
get := v3.OpGet(m.myKey)
// fetch current holder to complete uncontended path with only one RPC
getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
if err != nil {
return err
}
m.myRev = resp.Header.Revision
if !resp.Succeeded {
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
}
// if no key on prefix / the minimum rev is key, already hold the lock
ownerKey := resp.Responses[1].GetResponseRange().Kvs
if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
m.hdr = resp.Header
return nil
}
// wait for deletion revisions prior to myKey
hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
// release lock key if cancelled
select {
case <-ctx.Done():
m.Unlock(client.Ctx())
default:
m.hdr = hdr
}
return werr
}
func (m *Mutex) Unlock(ctx context.Context) error {
client := m.s.Client()
if _, err := client.Delete(ctx, m.myKey); err != nil {
return err
}
m.myKey = "\x00"
m.myRev = -1
return nil
}
func (m *Mutex) IsOwner() v3.Cmp {
return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
}
func (m *Mutex) Key() string { return m.myKey }
// Header is the response header received from etcd on acquiring the lock.
func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
type lockerMutex struct{ *Mutex }
func (lm *lockerMutex) Lock() {
client := lm.s.Client()
if err := lm.Mutex.Lock(client.Ctx()); err != nil {
panic(err)
}
}
func (lm *lockerMutex) Unlock() {
client := lm.s.Client()
if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
panic(err)
}
}
// NewLocker creates a sync.Locker backed by an etcd mutex.
func NewLocker(s *Session, pfx string) sync.Locker {
return &lockerMutex{NewMutex(s, pfx)}
}

View File

@ -1,141 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"context"
"time"
v3 "go.etcd.io/etcd/clientv3"
)
const defaultSessionTTL = 60
// Session represents a lease kept alive for the lifetime of a client.
// Fault-tolerant applications may use sessions to reason about liveness.
type Session struct {
client *v3.Client
opts *sessionOptions
id v3.LeaseID
cancel context.CancelFunc
donec <-chan struct{}
}
// NewSession gets the leased session for a client.
func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
for _, opt := range opts {
opt(ops)
}
id := ops.leaseID
if id == v3.NoLease {
resp, err := client.Grant(ops.ctx, int64(ops.ttl))
if err != nil {
return nil, err
}
id = v3.LeaseID(resp.ID)
}
ctx, cancel := context.WithCancel(ops.ctx)
keepAlive, err := client.KeepAlive(ctx, id)
if err != nil || keepAlive == nil {
cancel()
return nil, err
}
donec := make(chan struct{})
s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
// keep the lease alive until client error or cancelled context
go func() {
defer close(donec)
for range keepAlive {
// eat messages until keep alive channel closes
}
}()
return s, nil
}
// Client is the etcd client that is attached to the session.
func (s *Session) Client() *v3.Client {
return s.client
}
// Lease is the lease ID for keys bound to the session.
func (s *Session) Lease() v3.LeaseID { return s.id }
// Done returns a channel that closes when the lease is orphaned, expires, or
// is otherwise no longer being refreshed.
func (s *Session) Done() <-chan struct{} { return s.donec }
// Orphan ends the refresh for the session lease. This is useful
// in case the state of the client connection is indeterminate (revoke
// would fail) or when transferring lease ownership.
func (s *Session) Orphan() {
s.cancel()
<-s.donec
}
// Close orphans the session and revokes the session lease.
func (s *Session) Close() error {
s.Orphan()
// if revoke takes longer than the ttl, lease is expired anyway
ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second)
_, err := s.client.Revoke(ctx, s.id)
cancel()
return err
}
type sessionOptions struct {
ttl int
leaseID v3.LeaseID
ctx context.Context
}
// SessionOption configures Session.
type SessionOption func(*sessionOptions)
// WithTTL configures the session's TTL in seconds.
// If TTL is <= 0, the default 60 seconds TTL will be used.
func WithTTL(ttl int) SessionOption {
return func(so *sessionOptions) {
if ttl > 0 {
so.ttl = ttl
}
}
}
// WithLease specifies the existing leaseID to be used for the session.
// This is useful in process restart scenario, for example, to reclaim
// leadership from an election prior to restart.
func WithLease(leaseID v3.LeaseID) SessionOption {
return func(so *sessionOptions) {
so.leaseID = leaseID
}
}
// WithContext assigns a context to the session instead of defaulting to
// using the client context. This is useful for canceling NewSession and
// Close operations immediately without having to close the client. If the
// context is canceled before Close() completes, the session's lease will be
// abandoned and left to expire instead of being revoked.
func WithContext(ctx context.Context) SessionOption {
return func(so *sessionOptions) {
so.ctx = ctx
}
}

View File

@ -1,387 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"context"
"math"
v3 "go.etcd.io/etcd/clientv3"
)
// STM is an interface for software transactional memory.
type STM interface {
// Get returns the value for a key and inserts the key in the txn's read set.
// If Get fails, it aborts the transaction with an error, never returning.
Get(key ...string) string
// Put adds a value for a key to the write set.
Put(key, val string, opts ...v3.OpOption)
// Rev returns the revision of a key in the read set.
Rev(key string) int64
// Del deletes a key.
Del(key string)
// commit attempts to apply the txn's changes to the server.
commit() *v3.TxnResponse
reset()
}
// Isolation is an enumeration of transactional isolation levels which
// describes how transactions should interfere and conflict.
type Isolation int
const (
// SerializableSnapshot provides serializable isolation and also checks
// for write conflicts.
SerializableSnapshot Isolation = iota
// Serializable reads within the same transaction attempt return data
// from the at the revision of the first read.
Serializable
// RepeatableReads reads within the same transaction attempt always
// return the same data.
RepeatableReads
// ReadCommitted reads keys from any committed revision.
ReadCommitted
)
// stmError safely passes STM errors through panic to the STM error channel.
type stmError struct{ err error }
type stmOptions struct {
iso Isolation
ctx context.Context
prefetch []string
}
type stmOption func(*stmOptions)
// WithIsolation specifies the transaction isolation level.
func WithIsolation(lvl Isolation) stmOption {
return func(so *stmOptions) { so.iso = lvl }
}
// WithAbortContext specifies the context for permanently aborting the transaction.
func WithAbortContext(ctx context.Context) stmOption {
return func(so *stmOptions) { so.ctx = ctx }
}
// WithPrefetch is a hint to prefetch a list of keys before trying to apply.
// If an STM transaction will unconditionally fetch a set of keys, prefetching
// those keys will save the round-trip cost from requesting each key one by one
// with Get().
func WithPrefetch(keys ...string) stmOption {
return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
}
// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
opts := &stmOptions{ctx: c.Ctx()}
for _, f := range so {
f(opts)
}
if len(opts.prefetch) != 0 {
f := apply
apply = func(s STM) error {
s.Get(opts.prefetch...)
return f(s)
}
}
return runSTM(mkSTM(c, opts), apply)
}
func mkSTM(c *v3.Client, opts *stmOptions) STM {
switch opts.iso {
case SerializableSnapshot:
s := &stmSerializable{
stm: stm{client: c, ctx: opts.ctx},
prefetch: make(map[string]*v3.GetResponse),
}
s.conflicts = func() []v3.Cmp {
return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...)
}
return s
case Serializable:
s := &stmSerializable{
stm: stm{client: c, ctx: opts.ctx},
prefetch: make(map[string]*v3.GetResponse),
}
s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
return s
case RepeatableReads:
s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
return s
case ReadCommitted:
s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
s.conflicts = func() []v3.Cmp { return nil }
return s
default:
panic("unsupported stm")
}
}
type stmResponse struct {
resp *v3.TxnResponse
err error
}
func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) {
outc := make(chan stmResponse, 1)
go func() {
defer func() {
if r := recover(); r != nil {
e, ok := r.(stmError)
if !ok {
// client apply panicked
panic(r)
}
outc <- stmResponse{nil, e.err}
}
}()
var out stmResponse
for {
s.reset()
if out.err = apply(s); out.err != nil {
break
}
if out.resp = s.commit(); out.resp != nil {
break
}
}
outc <- out
}()
r := <-outc
return r.resp, r.err
}
// stm implements repeatable-read software transactional memory over etcd
type stm struct {
client *v3.Client
ctx context.Context
// rset holds read key values and revisions
rset readSet
// wset holds overwritten keys and their values
wset writeSet
// getOpts are the opts used for gets
getOpts []v3.OpOption
// conflicts computes the current conflicts on the txn
conflicts func() []v3.Cmp
}
type stmPut struct {
val string
op v3.Op
}
type readSet map[string]*v3.GetResponse
func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
for i, resp := range txnresp.Responses {
rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange())
}
}
// first returns the store revision from the first fetch
func (rs readSet) first() int64 {
ret := int64(math.MaxInt64 - 1)
for _, resp := range rs {
if rev := resp.Header.Revision; rev < ret {
ret = rev
}
}
return ret
}
// cmps guards the txn from updates to read set
func (rs readSet) cmps() []v3.Cmp {
cmps := make([]v3.Cmp, 0, len(rs))
for k, rk := range rs {
cmps = append(cmps, isKeyCurrent(k, rk))
}
return cmps
}
type writeSet map[string]stmPut
func (ws writeSet) get(keys ...string) *stmPut {
for _, key := range keys {
if wv, ok := ws[key]; ok {
return &wv
}
}
return nil
}
// cmps returns a cmp list testing no writes have happened past rev
func (ws writeSet) cmps(rev int64) []v3.Cmp {
cmps := make([]v3.Cmp, 0, len(ws))
for key := range ws {
cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev))
}
return cmps
}
// puts is the list of ops for all pending writes
func (ws writeSet) puts() []v3.Op {
puts := make([]v3.Op, 0, len(ws))
for _, v := range ws {
puts = append(puts, v.op)
}
return puts
}
func (s *stm) Get(keys ...string) string {
if wv := s.wset.get(keys...); wv != nil {
return wv.val
}
return respToValue(s.fetch(keys...))
}
func (s *stm) Put(key, val string, opts ...v3.OpOption) {
s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)}
}
func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} }
func (s *stm) Rev(key string) int64 {
if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 {
return resp.Kvs[0].ModRevision
}
return 0
}
func (s *stm) commit() *v3.TxnResponse {
txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit()
if err != nil {
panic(stmError{err})
}
if txnresp.Succeeded {
return txnresp
}
return nil
}
func (s *stm) fetch(keys ...string) *v3.GetResponse {
if len(keys) == 0 {
return nil
}
ops := make([]v3.Op, len(keys))
for i, key := range keys {
if resp, ok := s.rset[key]; ok {
return resp
}
ops[i] = v3.OpGet(key, s.getOpts...)
}
txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit()
if err != nil {
panic(stmError{err})
}
s.rset.add(keys, txnresp)
return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange())
}
func (s *stm) reset() {
s.rset = make(map[string]*v3.GetResponse)
s.wset = make(map[string]stmPut)
}
type stmSerializable struct {
stm
prefetch map[string]*v3.GetResponse
}
func (s *stmSerializable) Get(keys ...string) string {
if wv := s.wset.get(keys...); wv != nil {
return wv.val
}
firstRead := len(s.rset) == 0
for _, key := range keys {
if resp, ok := s.prefetch[key]; ok {
delete(s.prefetch, key)
s.rset[key] = resp
}
}
resp := s.stm.fetch(keys...)
if firstRead {
// txn's base revision is defined by the first read
s.getOpts = []v3.OpOption{
v3.WithRev(resp.Header.Revision),
v3.WithSerializable(),
}
}
return respToValue(resp)
}
func (s *stmSerializable) Rev(key string) int64 {
s.Get(key)
return s.stm.Rev(key)
}
func (s *stmSerializable) gets() ([]string, []v3.Op) {
keys := make([]string, 0, len(s.rset))
ops := make([]v3.Op, 0, len(s.rset))
for k := range s.rset {
keys = append(keys, k)
ops = append(ops, v3.OpGet(k))
}
return keys, ops
}
func (s *stmSerializable) commit() *v3.TxnResponse {
keys, getops := s.gets()
txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...)
// use Else to prefetch keys in case of conflict to save a round trip
txnresp, err := txn.Else(getops...).Commit()
if err != nil {
panic(stmError{err})
}
if txnresp.Succeeded {
return txnresp
}
// load prefetch with Else data
s.rset.add(keys, txnresp)
s.prefetch = s.rset
s.getOpts = nil
return nil
}
func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
if len(r.Kvs) != 0 {
return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision)
}
return v3.Compare(v3.ModRevision(k), "=", 0)
}
func respToValue(resp *v3.GetResponse) string {
if resp == nil || len(resp.Kvs) == 0 {
return ""
}
return string(resp.Kvs[0].Value)
}
// NewSTMRepeatable is deprecated.
func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads))
}
// NewSTMSerializable is deprecated.
func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable))
}
// NewSTMReadCommitted is deprecated.
func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted))
}

View File

@ -1,102 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
"crypto/tls"
"time"
"go.uber.org/zap"
"google.golang.org/grpc"
)
type Config struct {
// Endpoints is a list of URLs.
Endpoints []string `json:"endpoints"`
// AutoSyncInterval is the interval to update endpoints with its latest members.
// 0 disables auto-sync. By default auto-sync is disabled.
AutoSyncInterval time.Duration `json:"auto-sync-interval"`
// DialTimeout is the timeout for failing to establish a connection.
DialTimeout time.Duration `json:"dial-timeout"`
// DialKeepAliveTime is the time after which client pings the server to see if
// transport is alive.
DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
// DialKeepAliveTimeout is the time that the client waits for a response for the
// keep-alive probe. If the response is not received in this time, the connection is closed.
DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
// MaxCallSendMsgSize is the client-side request send limit in bytes.
// If 0, it defaults to 2.0 MiB (2 * 1024 * 1024).
// Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit.
// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
MaxCallSendMsgSize int
// MaxCallRecvMsgSize is the client-side response receive limit.
// If 0, it defaults to "math.MaxInt32", because range response can
// easily exceed request send limits.
// Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit.
// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
MaxCallRecvMsgSize int
// TLS holds the client secure credentials, if any.
TLS *tls.Config
// Username is a user name for authentication.
Username string `json:"username"`
// Password is a password for authentication.
Password string `json:"password"`
// RejectOldCluster when set will refuse to create a client against an outdated cluster.
RejectOldCluster bool `json:"reject-old-cluster"`
// DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
DialOptions []grpc.DialOption
// Context is the default client context; it can be used to cancel grpc dial out and
// other operations that do not have an explicit context.
Context context.Context
// LogConfig configures client-side logger.
// If nil, use the default logger.
// TODO: configure gRPC logger
LogConfig *zap.Config
// PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs).
PermitWithoutStream bool `json:"permit-without-stream"`
}
// DefaultLogConfig is the default client logging configuration.
// Default log level is "Warn". Use "zap.InfoLevel" for debugging.
// Use "/dev/null" for output paths, to discard all logs.
var DefaultLogConfig = zap.Config{
Level: zap.NewAtomicLevelAt(zap.WarnLevel),
Development: false,
Sampling: &zap.SamplingConfig{
Initial: 100,
Thereafter: 100,
},
Encoding: "json",
EncoderConfig: zap.NewProductionEncoderConfig(),
// Use "/dev/null" to discard all
OutputPaths: []string{"stderr"},
ErrorOutputPaths: []string{"stderr"},
}

View File

@ -1,106 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package clientv3 implements the official Go etcd client for v3.
//
// Create client using `clientv3.New`:
//
// // expect dial time-out on ipv4 blackhole
// _, err := clientv3.New(clientv3.Config{
// Endpoints: []string{"http://254.0.0.1:12345"},
// DialTimeout: 2 * time.Second,
// })
//
// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3
// if err == context.DeadlineExceeded {
// // handle errors
// }
//
// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1
// if err == grpc.ErrClientConnTimeout {
// // handle errors
// }
//
// cli, err := clientv3.New(clientv3.Config{
// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
// DialTimeout: 5 * time.Second,
// })
// if err != nil {
// // handle error!
// }
// defer cli.Close()
//
// Make sure to close the client after using it. If the client is not closed, the
// connection will have leaky goroutines.
//
// To specify a client request timeout, wrap the context with context.WithTimeout:
//
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
// cancel()
// if err != nil {
// // handle error!
// }
// // use the response
//
// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
// Clients are safe for concurrent use by multiple goroutines.
//
// etcd client returns 3 types of errors:
//
// 1. context error: canceled or deadline exceeded.
// 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded.
// 3. gRPC error: see https://go.etcd.io/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
//
// Here is the example code to handle client errors:
//
// resp, err := kvc.Put(ctx, "", "")
// if err != nil {
// if err == context.Canceled {
// // ctx is canceled by another routine
// } else if err == context.DeadlineExceeded {
// // ctx is attached with a deadline and it exceeded
// } else if err == rpctypes.ErrEmptyKey {
// // client-side error: key is not provided
// } else if ev, ok := status.FromError(err); ok {
// code := ev.Code()
// if code == codes.DeadlineExceeded {
// // server-side context might have timed-out first (due to clock skew)
// // while original client-side context is not timed-out yet
// }
// } else {
// // bad cluster endpoints, which are not etcd servers
// }
// }
//
// go func() { cli.Close() }()
// _, err := kvc.Get(ctx, "a")
// if err != nil {
// // with etcd clientv3 <= v3.3
// if err == context.Canceled {
// // grpc balancer calls 'Get' with an inflight client.Close
// } else if err == grpc.ErrClientConnClosing {
// // grpc balancer calls 'Get' after client.Close.
// }
// // with etcd clientv3 >= v3.4
// if clientv3.IsConnCanceled(err) {
// // gRPC client connection is closed
// }
// }
//
// The grpc load balancer is registered statically and is shared across etcd clients.
// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment
// variable. E.g. "ETCD_CLIENT_DEBUG=1".
//
package clientv3

View File

@ -1,177 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"google.golang.org/grpc"
)
type (
CompactResponse pb.CompactionResponse
PutResponse pb.PutResponse
GetResponse pb.RangeResponse
DeleteResponse pb.DeleteRangeResponse
TxnResponse pb.TxnResponse
)
type KV interface {
// Put puts a key-value pair into etcd.
// Note that key,value can be plain bytes array and string is
// an immutable representation of that bytes array.
// To get a string of bytes, do string([]byte{0x10, 0x20}).
Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
// Get retrieves keys.
// By default, Get will return the value for "key", if any.
// When passed WithRange(end), Get will return the keys in the range [key, end).
// When passed WithFromKey(), Get returns keys greater than or equal to key.
// When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision;
// if the required revision is compacted, the request will fail with ErrCompacted .
// When passed WithLimit(limit), the number of returned keys is bounded by limit.
// When passed WithSort(), the keys will be sorted.
Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error)
// Delete deletes a key, or optionally using WithRange(end), [key, end).
Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
// Compact compacts etcd KV history before the given rev.
Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
// Do applies a single Op on KV without a transaction.
// Do is useful when creating arbitrary operations to be issued at a
// later time; the user can range over the operations, calling Do to
// execute them. Get/Put/Delete, on the other hand, are best suited
// for when the operation should be issued at the time of declaration.
Do(ctx context.Context, op Op) (OpResponse, error)
// Txn creates a transaction.
Txn(ctx context.Context) Txn
}
type OpResponse struct {
put *PutResponse
get *GetResponse
del *DeleteResponse
txn *TxnResponse
}
func (op OpResponse) Put() *PutResponse { return op.put }
func (op OpResponse) Get() *GetResponse { return op.get }
func (op OpResponse) Del() *DeleteResponse { return op.del }
func (op OpResponse) Txn() *TxnResponse { return op.txn }
func (resp *PutResponse) OpResponse() OpResponse {
return OpResponse{put: resp}
}
func (resp *GetResponse) OpResponse() OpResponse {
return OpResponse{get: resp}
}
func (resp *DeleteResponse) OpResponse() OpResponse {
return OpResponse{del: resp}
}
func (resp *TxnResponse) OpResponse() OpResponse {
return OpResponse{txn: resp}
}
type kv struct {
remote pb.KVClient
callOpts []grpc.CallOption
}
func NewKV(c *Client) KV {
api := &kv{remote: RetryKVClient(c)}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
api := &kv{remote: remote}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
r, err := kv.Do(ctx, OpPut(key, val, opts...))
return r.put, toErr(ctx, err)
}
func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
r, err := kv.Do(ctx, OpGet(key, opts...))
return r.get, toErr(ctx, err)
}
func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
r, err := kv.Do(ctx, OpDelete(key, opts...))
return r.del, toErr(ctx, err)
}
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
}
return (*CompactResponse)(resp), err
}
func (kv *kv) Txn(ctx context.Context) Txn {
return &txn{
kv: kv,
ctx: ctx,
callOpts: kv.callOpts,
}
}
func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
var err error
switch op.t {
case tRange:
var resp *pb.RangeResponse
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...)
if err == nil {
return OpResponse{get: (*GetResponse)(resp)}, nil
}
case tPut:
var resp *pb.PutResponse
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
resp, err = kv.remote.Put(ctx, r, kv.callOpts...)
if err == nil {
return OpResponse{put: (*PutResponse)(resp)}, nil
}
case tDeleteRange:
var resp *pb.DeleteRangeResponse
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...)
if err == nil {
return OpResponse{del: (*DeleteResponse)(resp)}, nil
}
case tTxn:
var resp *pb.TxnResponse
resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...)
if err == nil {
return OpResponse{txn: (*TxnResponse)(resp)}, nil
}
default:
panic("Unknown op")
}
return OpResponse{}, toErr(ctx, err)
}

View File

@ -1,597 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
"sync"
"time"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
type (
LeaseRevokeResponse pb.LeaseRevokeResponse
LeaseID int64
)
// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
type LeaseGrantResponse struct {
*pb.ResponseHeader
ID LeaseID
TTL int64
Error string
}
// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
type LeaseKeepAliveResponse struct {
*pb.ResponseHeader
ID LeaseID
TTL int64
}
// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
type LeaseTimeToLiveResponse struct {
*pb.ResponseHeader
ID LeaseID `json:"id"`
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
TTL int64 `json:"ttl"`
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
GrantedTTL int64 `json:"granted-ttl"`
// Keys is the list of keys attached to this lease.
Keys [][]byte `json:"keys"`
}
// LeaseStatus represents a lease status.
type LeaseStatus struct {
ID LeaseID `json:"id"`
// TODO: TTL int64
}
// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse.
type LeaseLeasesResponse struct {
*pb.ResponseHeader
Leases []LeaseStatus `json:"leases"`
}
const (
// defaultTTL is the assumed lease TTL used for the first keepalive
// deadline before the actual TTL is known to the client.
defaultTTL = 5 * time.Second
// NoLease is a lease ID for the absence of a lease.
NoLease LeaseID = 0
// retryConnWait is how long to wait before retrying request due to an error
retryConnWait = 500 * time.Millisecond
)
// LeaseResponseChSize is the size of buffer to store unsent lease responses.
// WARNING: DO NOT UPDATE.
// Only for testing purposes.
var LeaseResponseChSize = 16
// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
//
// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
type ErrKeepAliveHalted struct {
Reason error
}
func (e ErrKeepAliveHalted) Error() string {
s := "etcdclient: leases keep alive halted"
if e.Reason != nil {
s += ": " + e.Reason.Error()
}
return s
}
type Lease interface {
// Grant creates a new lease.
Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
// Revoke revokes the given lease.
Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
// TimeToLive retrieves the lease information of the given lease ID.
TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
// Leases retrieves all leases.
Leases(ctx context.Context) (*LeaseLeasesResponse, error)
// KeepAlive keeps the given lease alive forever. If the keepalive response
// posted to the channel is not consumed immediately, the lease client will
// continue sending keep alive requests to the etcd server at least every
// second until latest response is consumed.
//
// The returned "LeaseKeepAliveResponse" channel closes if underlying keep
// alive stream is interrupted in some way the client cannot handle itself;
// given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse"
// from this closed channel is nil.
//
// If client keep alive loop halts with an unexpected error (e.g. "etcdserver:
// no leader") or canceled by the caller (e.g. context.Canceled), the error
// is returned. Otherwise, it retries.
//
// TODO(v4.0): post errors to last keep alive message before closing
// (see https://go.etcd.io/etcd/pull/7866)
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
// KeepAliveOnce renews the lease once. The response corresponds to the
// first message from calling KeepAlive. If the response has a recoverable
// error, KeepAliveOnce will retry the RPC with a new keep alive message.
//
// In most of the cases, Keepalive should be used instead of KeepAliveOnce.
KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
// Close releases all resources Lease keeps for efficient communication
// with the etcd server.
Close() error
}
type lessor struct {
mu sync.Mutex // guards all fields
// donec is closed and loopErr is set when recvKeepAliveLoop stops
donec chan struct{}
loopErr error
remote pb.LeaseClient
stream pb.Lease_LeaseKeepAliveClient
streamCancel context.CancelFunc
stopCtx context.Context
stopCancel context.CancelFunc
keepAlives map[LeaseID]*keepAlive
// firstKeepAliveTimeout is the timeout for the first keepalive request
// before the actual TTL is known to the lease client
firstKeepAliveTimeout time.Duration
// firstKeepAliveOnce ensures stream starts after first KeepAlive call.
firstKeepAliveOnce sync.Once
callOpts []grpc.CallOption
lg *zap.Logger
}
// keepAlive multiplexes a keepalive for a lease over multiple channels
type keepAlive struct {
chs []chan<- *LeaseKeepAliveResponse
ctxs []context.Context
// deadline is the time the keep alive channels close if no response
deadline time.Time
// nextKeepAlive is when to send the next keep alive message
nextKeepAlive time.Time
// donec is closed on lease revoke, expiration, or cancel.
donec chan struct{}
}
func NewLease(c *Client) Lease {
return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
}
func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
l := &lessor{
donec: make(chan struct{}),
keepAlives: make(map[LeaseID]*keepAlive),
remote: remote,
firstKeepAliveTimeout: keepAliveTimeout,
lg: c.lg,
}
if l.firstKeepAliveTimeout == time.Second {
l.firstKeepAliveTimeout = defaultTTL
}
if c != nil {
l.callOpts = c.callOpts
}
reqLeaderCtx := WithRequireLeader(context.Background())
l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
return l
}
func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
r := &pb.LeaseGrantRequest{TTL: ttl}
resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
if err == nil {
gresp := &LeaseGrantResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
Error: resp.Error,
}
return gresp, nil
}
return nil, toErr(ctx, err)
}
func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
r := &pb.LeaseRevokeRequest{ID: int64(id)}
resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
if err == nil {
return (*LeaseRevokeResponse)(resp), nil
}
return nil, toErr(ctx, err)
}
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
r := toLeaseTimeToLiveRequest(id, opts...)
resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
if err == nil {
gresp := &LeaseTimeToLiveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
GrantedTTL: resp.GrantedTTL,
Keys: resp.Keys,
}
return gresp, nil
}
return nil, toErr(ctx, err)
}
func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...)
if err == nil {
leases := make([]LeaseStatus, len(resp.Leases))
for i := range resp.Leases {
leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
}
return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
}
return nil, toErr(ctx, err)
}
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
l.mu.Lock()
// ensure that recvKeepAliveLoop is still running
select {
case <-l.donec:
err := l.loopErr
l.mu.Unlock()
close(ch)
return ch, ErrKeepAliveHalted{Reason: err}
default:
}
ka, ok := l.keepAlives[id]
if !ok {
// create fresh keep alive
ka = &keepAlive{
chs: []chan<- *LeaseKeepAliveResponse{ch},
ctxs: []context.Context{ctx},
deadline: time.Now().Add(l.firstKeepAliveTimeout),
nextKeepAlive: time.Now(),
donec: make(chan struct{}),
}
l.keepAlives[id] = ka
} else {
// add channel and context to existing keep alive
ka.ctxs = append(ka.ctxs, ctx)
ka.chs = append(ka.chs, ch)
}
l.mu.Unlock()
go l.keepAliveCtxCloser(ctx, id, ka.donec)
l.firstKeepAliveOnce.Do(func() {
go l.recvKeepAliveLoop()
go l.deadlineLoop()
})
return ch, nil
}
func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
for {
resp, err := l.keepAliveOnce(ctx, id)
if err == nil {
if resp.TTL <= 0 {
err = rpctypes.ErrLeaseNotFound
}
return resp, err
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
}
}
func (l *lessor) Close() error {
l.stopCancel()
// close for synchronous teardown if stream goroutines never launched
l.firstKeepAliveOnce.Do(func() { close(l.donec) })
<-l.donec
return nil
}
func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) {
select {
case <-donec:
return
case <-l.donec:
return
case <-ctx.Done():
}
l.mu.Lock()
defer l.mu.Unlock()
ka, ok := l.keepAlives[id]
if !ok {
return
}
// close channel and remove context if still associated with keep alive
for i, c := range ka.ctxs {
if c == ctx {
close(ka.chs[i])
ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
break
}
}
// remove if no one more listeners
if len(ka.chs) == 0 {
delete(l.keepAlives, id)
}
}
// closeRequireLeader scans keepAlives for ctxs that have require leader
// and closes the associated channels.
func (l *lessor) closeRequireLeader() {
l.mu.Lock()
defer l.mu.Unlock()
for _, ka := range l.keepAlives {
reqIdxs := 0
// find all required leader channels, close, mark as nil
for i, ctx := range ka.ctxs {
md, ok := metadata.FromOutgoingContext(ctx)
if !ok {
continue
}
ks := md[rpctypes.MetadataRequireLeaderKey]
if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
continue
}
close(ka.chs[i])
ka.chs[i] = nil
reqIdxs++
}
if reqIdxs == 0 {
continue
}
// remove all channels that required a leader from keepalive
newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
newCtxs := make([]context.Context, len(newChs))
newIdx := 0
for i := range ka.chs {
if ka.chs[i] == nil {
continue
}
newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
newIdx++
}
ka.chs, ka.ctxs = newChs, newCtxs
}
}
func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
cctx, cancel := context.WithCancel(ctx)
defer cancel()
stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
}
err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
if err != nil {
return nil, toErr(ctx, err)
}
resp, rerr := stream.Recv()
if rerr != nil {
return nil, toErr(ctx, rerr)
}
karesp := &LeaseKeepAliveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
}
return karesp, nil
}
func (l *lessor) recvKeepAliveLoop() (gerr error) {
defer func() {
l.mu.Lock()
close(l.donec)
l.loopErr = gerr
for _, ka := range l.keepAlives {
ka.close()
}
l.keepAlives = make(map[LeaseID]*keepAlive)
l.mu.Unlock()
}()
for {
stream, err := l.resetRecv()
if err != nil {
if canceledByCaller(l.stopCtx, err) {
return err
}
} else {
for {
resp, err := stream.Recv()
if err != nil {
if canceledByCaller(l.stopCtx, err) {
return err
}
if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
l.closeRequireLeader()
}
break
}
l.recvKeepAlive(resp)
}
}
select {
case <-time.After(retryConnWait):
case <-l.stopCtx.Done():
return l.stopCtx.Err()
}
}
}
// resetRecv opens a new lease stream and starts sending keep alive requests.
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
sctx, cancel := context.WithCancel(l.stopCtx)
stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...)
if err != nil {
cancel()
return nil, err
}
l.mu.Lock()
defer l.mu.Unlock()
if l.stream != nil && l.streamCancel != nil {
l.streamCancel()
}
l.streamCancel = cancel
l.stream = stream
go l.sendKeepAliveLoop(stream)
return stream, nil
}
// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
karesp := &LeaseKeepAliveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
}
l.mu.Lock()
defer l.mu.Unlock()
ka, ok := l.keepAlives[karesp.ID]
if !ok {
return
}
if karesp.TTL <= 0 {
// lease expired; close all keep alive channels
delete(l.keepAlives, karesp.ID)
ka.close()
return
}
// send update to all channels
nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
for _, ch := range ka.chs {
select {
case ch <- karesp:
default:
if l.lg != nil {
l.lg.Warn("lease keepalive response queue is full; dropping response send",
zap.Int("queue-size", len(ch)),
zap.Int("queue-capacity", cap(ch)),
)
}
}
// still advance in order to rate-limit keep-alive sends
ka.nextKeepAlive = nextKeepAlive
}
}
// deadlineLoop reaps any keep alive channels that have not received a response
// within the lease TTL
func (l *lessor) deadlineLoop() {
for {
select {
case <-time.After(time.Second):
case <-l.donec:
return
}
now := time.Now()
l.mu.Lock()
for id, ka := range l.keepAlives {
if ka.deadline.Before(now) {
// waited too long for response; lease may be expired
ka.close()
delete(l.keepAlives, id)
}
}
l.mu.Unlock()
}
}
// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
for {
var tosend []LeaseID
now := time.Now()
l.mu.Lock()
for id, ka := range l.keepAlives {
if ka.nextKeepAlive.Before(now) {
tosend = append(tosend, id)
}
}
l.mu.Unlock()
for _, id := range tosend {
r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
if err := stream.Send(r); err != nil {
// TODO do something with this error?
return
}
}
select {
case <-time.After(retryConnWait):
case <-stream.Context().Done():
return
case <-l.donec:
return
case <-l.stopCtx.Done():
return
}
}
}
func (ka *keepAlive) close() {
close(ka.donec)
for _, ch := range ka.chs {
close(ch)
}
}

View File

@ -1,101 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"io/ioutil"
"sync"
"go.etcd.io/etcd/pkg/logutil"
"google.golang.org/grpc/grpclog"
)
var (
lgMu sync.RWMutex
lg logutil.Logger
)
type settableLogger struct {
l grpclog.LoggerV2
mu sync.RWMutex
}
func init() {
// disable client side logs by default
lg = &settableLogger{}
SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
}
// SetLogger sets client-side Logger.
func SetLogger(l grpclog.LoggerV2) {
lgMu.Lock()
lg = logutil.NewLogger(l)
// override grpclog so that any changes happen with locking
grpclog.SetLoggerV2(lg)
lgMu.Unlock()
}
// GetLogger returns the current logutil.Logger.
func GetLogger() logutil.Logger {
lgMu.RLock()
l := lg
lgMu.RUnlock()
return l
}
// NewLogger returns a new Logger with logutil.Logger.
func NewLogger(gl grpclog.LoggerV2) logutil.Logger {
return &settableLogger{l: gl}
}
func (s *settableLogger) get() grpclog.LoggerV2 {
s.mu.RLock()
l := s.l
s.mu.RUnlock()
return l
}
// implement the grpclog.LoggerV2 interface
func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) }
func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) }
func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) }
func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) }
func (s *settableLogger) Warningf(format string, args ...interface{}) {
s.get().Warningf(format, args...)
}
func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) }
func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) }
func (s *settableLogger) Errorf(format string, args ...interface{}) {
s.get().Errorf(format, args...)
}
func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) }
func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) }
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) }
func (s *settableLogger) V(l int) bool { return s.get().V(l) }
func (s *settableLogger) Lvl(lvl int) grpclog.LoggerV2 {
s.mu.RLock()
l := s.l
s.mu.RUnlock()
if l.V(lvl) {
return s
}
return logutil.NewDiscardLogger()
}

View File

@ -1,230 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
"fmt"
"io"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"google.golang.org/grpc"
)
type (
DefragmentResponse pb.DefragmentResponse
AlarmResponse pb.AlarmResponse
AlarmMember pb.AlarmMember
StatusResponse pb.StatusResponse
HashKVResponse pb.HashKVResponse
MoveLeaderResponse pb.MoveLeaderResponse
)
type Maintenance interface {
// AlarmList gets all active alarms.
AlarmList(ctx context.Context) (*AlarmResponse, error)
// AlarmDisarm disarms a given alarm.
AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
// Defragment releases wasted space from internal fragmentation on a given etcd member.
// Defragment is only needed when deleting a large number of keys and want to reclaim
// the resources.
// Defragment is an expensive operation. User should avoid defragmenting multiple members
// at the same time.
// To defragment multiple members in the cluster, user need to call defragment multiple
// times with different endpoints.
Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
// Status gets the status of the endpoint.
Status(ctx context.Context, endpoint string) (*StatusResponse, error)
// HashKV returns a hash of the KV state at the time of the RPC.
// If revision is zero, the hash is computed on all keys. If the revision
// is non-zero, the hash is computed on all keys at or below the given revision.
HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
// Snapshot provides a reader for a point-in-time snapshot of etcd.
// If the context "ctx" is canceled or timed out, reading from returned
// "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded).
Snapshot(ctx context.Context) (io.ReadCloser, error)
// MoveLeader requests current leader to transfer its leadership to the transferee.
// Request must be made to the leader.
MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)
}
type maintenance struct {
dial func(endpoint string) (pb.MaintenanceClient, func(), error)
remote pb.MaintenanceClient
callOpts []grpc.CallOption
}
func NewMaintenance(c *Client) Maintenance {
api := &maintenance{
dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
conn, err := c.Dial(endpoint)
if err != nil {
return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err)
}
cancel := func() { conn.Close() }
return RetryMaintenanceClient(c, conn), cancel, nil
},
remote: RetryMaintenanceClient(c, c.conn),
}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
api := &maintenance{
dial: func(string) (pb.MaintenanceClient, func(), error) {
return remote, func() {}, nil
},
remote: remote,
}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
req := &pb.AlarmRequest{
Action: pb.AlarmRequest_GET,
MemberID: 0, // all
Alarm: pb.AlarmType_NONE, // all
}
resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
if err == nil {
return (*AlarmResponse)(resp), nil
}
return nil, toErr(ctx, err)
}
func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
req := &pb.AlarmRequest{
Action: pb.AlarmRequest_DEACTIVATE,
MemberID: am.MemberID,
Alarm: am.Alarm,
}
if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
ar, err := m.AlarmList(ctx)
if err != nil {
return nil, toErr(ctx, err)
}
ret := AlarmResponse{}
for _, am := range ar.Alarms {
dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
if derr != nil {
return nil, toErr(ctx, derr)
}
ret.Alarms = append(ret.Alarms, dresp.Alarms...)
}
return &ret, nil
}
resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
if err == nil {
return (*AlarmResponse)(resp), nil
}
return nil, toErr(ctx, err)
}
func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
remote, cancel, err := m.dial(endpoint)
if err != nil {
return nil, toErr(ctx, err)
}
defer cancel()
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
}
return (*DefragmentResponse)(resp), nil
}
func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
remote, cancel, err := m.dial(endpoint)
if err != nil {
return nil, toErr(ctx, err)
}
defer cancel()
resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
}
return (*StatusResponse)(resp), nil
}
func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
remote, cancel, err := m.dial(endpoint)
if err != nil {
return nil, toErr(ctx, err)
}
defer cancel()
resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
}
return (*HashKVResponse)(resp), nil
}
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)
if err != nil {
return nil, toErr(ctx, err)
}
pr, pw := io.Pipe()
go func() {
for {
resp, err := ss.Recv()
if err != nil {
pw.CloseWithError(err)
return
}
if resp == nil && err == nil {
break
}
if _, werr := pw.Write(resp.Blob); werr != nil {
pw.CloseWithError(werr)
return
}
}
pw.Close()
}()
return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
}
type snapshotReadCloser struct {
ctx context.Context
io.ReadCloser
}
func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {
n, err = rc.ReadCloser.Read(p)
return n, toErr(rc.ctx, err)
}
func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)
return (*MoveLeaderResponse)(resp), toErr(ctx, err)
}

View File

@ -1,539 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
type opType int
const (
// A default Op has opType 0, which is invalid.
tRange opType = iota + 1
tPut
tDeleteRange
tTxn
)
var noPrefixEnd = []byte{0}
// Op represents an Operation that kv can execute.
type Op struct {
t opType
key []byte
end []byte
// for range
limit int64
sort *SortOption
serializable bool
keysOnly bool
countOnly bool
minModRev int64
maxModRev int64
minCreateRev int64
maxCreateRev int64
// for range, watch
rev int64
// for watch, put, delete
prevKV bool
// for watch
// fragmentation should be disabled by default
// if true, split watch events when total exceeds
// "--max-request-bytes" flag value + 512-byte
fragment bool
// for put
ignoreValue bool
ignoreLease bool
// progressNotify is for progress updates.
progressNotify bool
// createdNotify is for created event
createdNotify bool
// filters for watchers
filterPut bool
filterDelete bool
// for put
val []byte
leaseID LeaseID
// txn
cmps []Cmp
thenOps []Op
elseOps []Op
}
// accessors / mutators
// IsTxn returns true if the "Op" type is transaction.
func (op Op) IsTxn() bool {
return op.t == tTxn
}
// Txn returns the comparison(if) operations, "then" operations, and "else" operations.
func (op Op) Txn() ([]Cmp, []Op, []Op) {
return op.cmps, op.thenOps, op.elseOps
}
// KeyBytes returns the byte slice holding the Op's key.
func (op Op) KeyBytes() []byte { return op.key }
// WithKeyBytes sets the byte slice for the Op's key.
func (op *Op) WithKeyBytes(key []byte) { op.key = key }
// RangeBytes returns the byte slice holding with the Op's range end, if any.
func (op Op) RangeBytes() []byte { return op.end }
// Rev returns the requested revision, if any.
func (op Op) Rev() int64 { return op.rev }
// IsPut returns true iff the operation is a Put.
func (op Op) IsPut() bool { return op.t == tPut }
// IsGet returns true iff the operation is a Get.
func (op Op) IsGet() bool { return op.t == tRange }
// IsDelete returns true iff the operation is a Delete.
func (op Op) IsDelete() bool { return op.t == tDeleteRange }
// IsSerializable returns true if the serializable field is true.
func (op Op) IsSerializable() bool { return op.serializable == true }
// IsKeysOnly returns whether keysOnly is set.
func (op Op) IsKeysOnly() bool { return op.keysOnly == true }
// IsCountOnly returns whether countOnly is set.
func (op Op) IsCountOnly() bool { return op.countOnly == true }
// MinModRev returns the operation's minimum modify revision.
func (op Op) MinModRev() int64 { return op.minModRev }
// MaxModRev returns the operation's maximum modify revision.
func (op Op) MaxModRev() int64 { return op.maxModRev }
// MinCreateRev returns the operation's minimum create revision.
func (op Op) MinCreateRev() int64 { return op.minCreateRev }
// MaxCreateRev returns the operation's maximum create revision.
func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
// WithRangeBytes sets the byte slice for the Op's range end.
func (op *Op) WithRangeBytes(end []byte) { op.end = end }
// ValueBytes returns the byte slice holding the Op's value, if any.
func (op Op) ValueBytes() []byte { return op.val }
// WithValueBytes sets the byte slice for the Op's value.
func (op *Op) WithValueBytes(v []byte) { op.val = v }
func (op Op) toRangeRequest() *pb.RangeRequest {
if op.t != tRange {
panic("op.t != tRange")
}
r := &pb.RangeRequest{
Key: op.key,
RangeEnd: op.end,
Limit: op.limit,
Revision: op.rev,
Serializable: op.serializable,
KeysOnly: op.keysOnly,
CountOnly: op.countOnly,
MinModRevision: op.minModRev,
MaxModRevision: op.maxModRev,
MinCreateRevision: op.minCreateRev,
MaxCreateRevision: op.maxCreateRev,
}
if op.sort != nil {
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
}
return r
}
func (op Op) toTxnRequest() *pb.TxnRequest {
thenOps := make([]*pb.RequestOp, len(op.thenOps))
for i, tOp := range op.thenOps {
thenOps[i] = tOp.toRequestOp()
}
elseOps := make([]*pb.RequestOp, len(op.elseOps))
for i, eOp := range op.elseOps {
elseOps[i] = eOp.toRequestOp()
}
cmps := make([]*pb.Compare, len(op.cmps))
for i := range op.cmps {
cmps[i] = (*pb.Compare)(&op.cmps[i])
}
return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps}
}
func (op Op) toRequestOp() *pb.RequestOp {
switch op.t {
case tRange:
return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
case tPut:
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
case tDeleteRange:
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
case tTxn:
return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}}
default:
panic("Unknown Op")
}
}
func (op Op) isWrite() bool {
if op.t == tTxn {
for _, tOp := range op.thenOps {
if tOp.isWrite() {
return true
}
}
for _, tOp := range op.elseOps {
if tOp.isWrite() {
return true
}
}
return false
}
return op.t != tRange
}
// OpGet returns "get" operation based on given key and operation options.
func OpGet(key string, opts ...OpOption) Op {
ret := Op{t: tRange, key: []byte(key)}
ret.applyOpts(opts)
return ret
}
// OpDelete returns "delete" operation based on given key and operation options.
func OpDelete(key string, opts ...OpOption) Op {
ret := Op{t: tDeleteRange, key: []byte(key)}
ret.applyOpts(opts)
switch {
case ret.leaseID != 0:
panic("unexpected lease in delete")
case ret.limit != 0:
panic("unexpected limit in delete")
case ret.rev != 0:
panic("unexpected revision in delete")
case ret.sort != nil:
panic("unexpected sort in delete")
case ret.serializable:
panic("unexpected serializable in delete")
case ret.countOnly:
panic("unexpected countOnly in delete")
case ret.minModRev != 0, ret.maxModRev != 0:
panic("unexpected mod revision filter in delete")
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
panic("unexpected create revision filter in delete")
case ret.filterDelete, ret.filterPut:
panic("unexpected filter in delete")
case ret.createdNotify:
panic("unexpected createdNotify in delete")
}
return ret
}
// OpPut returns "put" operation based on given key-value and operation options.
func OpPut(key, val string, opts ...OpOption) Op {
ret := Op{t: tPut, key: []byte(key), val: []byte(val)}
ret.applyOpts(opts)
switch {
case ret.end != nil:
panic("unexpected range in put")
case ret.limit != 0:
panic("unexpected limit in put")
case ret.rev != 0:
panic("unexpected revision in put")
case ret.sort != nil:
panic("unexpected sort in put")
case ret.serializable:
panic("unexpected serializable in put")
case ret.countOnly:
panic("unexpected countOnly in put")
case ret.minModRev != 0, ret.maxModRev != 0:
panic("unexpected mod revision filter in put")
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
panic("unexpected create revision filter in put")
case ret.filterDelete, ret.filterPut:
panic("unexpected filter in put")
case ret.createdNotify:
panic("unexpected createdNotify in put")
}
return ret
}
// OpTxn returns "txn" operation based on given transaction conditions.
func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
}
func opWatch(key string, opts ...OpOption) Op {
ret := Op{t: tRange, key: []byte(key)}
ret.applyOpts(opts)
switch {
case ret.leaseID != 0:
panic("unexpected lease in watch")
case ret.limit != 0:
panic("unexpected limit in watch")
case ret.sort != nil:
panic("unexpected sort in watch")
case ret.serializable:
panic("unexpected serializable in watch")
case ret.countOnly:
panic("unexpected countOnly in watch")
case ret.minModRev != 0, ret.maxModRev != 0:
panic("unexpected mod revision filter in watch")
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
panic("unexpected create revision filter in watch")
}
return ret
}
func (op *Op) applyOpts(opts []OpOption) {
for _, opt := range opts {
opt(op)
}
}
// OpOption configures Operations like Get, Put, Delete.
type OpOption func(*Op)
// WithLease attaches a lease ID to a key in 'Put' request.
func WithLease(leaseID LeaseID) OpOption {
return func(op *Op) { op.leaseID = leaseID }
}
// WithLimit limits the number of results to return from 'Get' request.
// If WithLimit is given a 0 limit, it is treated as no limit.
func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
// WithRev specifies the store revision for 'Get' request.
// Or the start revision of 'Watch' request.
func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
// WithSort specifies the ordering in 'Get' request. It requires
// 'WithRange' and/or 'WithPrefix' to be specified too.
// 'target' specifies the target to sort by: key, version, revisions, value.
// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
func WithSort(target SortTarget, order SortOrder) OpOption {
return func(op *Op) {
if target == SortByKey && order == SortAscend {
// If order != SortNone, server fetches the entire key-space,
// and then applies the sort and limit, if provided.
// Since by default the server returns results sorted by keys
// in lexicographically ascending order, the client should ignore
// SortOrder if the target is SortByKey.
order = SortNone
}
op.sort = &SortOption{target, order}
}
}
// GetPrefixRangeEnd gets the range end of the prefix.
// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
func GetPrefixRangeEnd(prefix string) string {
return string(getPrefix([]byte(prefix)))
}
func getPrefix(key []byte) []byte {
end := make([]byte, len(key))
copy(end, key)
for i := len(end) - 1; i >= 0; i-- {
if end[i] < 0xff {
end[i] = end[i] + 1
end = end[:i+1]
return end
}
}
// next prefix does not exist (e.g., 0xffff);
// default to WithFromKey policy
return noPrefixEnd
}
// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate
// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())'
// can return 'foo1', 'foo2', and so on.
func WithPrefix() OpOption {
return func(op *Op) {
if len(op.key) == 0 {
op.key, op.end = []byte{0}, []byte{0}
return
}
op.end = getPrefix(op.key)
}
}
// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests.
// For example, 'Get' requests with 'WithRange(end)' returns
// the keys in the range [key, end).
// endKey must be lexicographically greater than start key.
func WithRange(endKey string) OpOption {
return func(op *Op) { op.end = []byte(endKey) }
}
// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests
// to be equal or greater than the key in the argument.
func WithFromKey() OpOption { return WithRange("\x00") }
// WithSerializable makes 'Get' request serializable. By default,
// it's linearizable. Serializable requests are better for lower latency
// requirement.
func WithSerializable() OpOption {
return func(op *Op) { op.serializable = true }
}
// WithKeysOnly makes the 'Get' request return only the keys and the corresponding
// values will be omitted.
func WithKeysOnly() OpOption {
return func(op *Op) { op.keysOnly = true }
}
// WithCountOnly makes the 'Get' request return only the count of keys.
func WithCountOnly() OpOption {
return func(op *Op) { op.countOnly = true }
}
// WithMinModRev filters out keys for Get with modification revisions less than the given revision.
func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } }
// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision.
func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } }
// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision.
func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } }
// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
// WithFirstCreate gets the key with the oldest creation revision in the request range.
func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
// WithLastCreate gets the key with the latest creation revision in the request range.
func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) }
// WithFirstKey gets the lexically first key in the request range.
func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) }
// WithLastKey gets the lexically last key in the request range.
func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) }
// WithFirstRev gets the key with the oldest modification revision in the request range.
func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) }
// WithLastRev gets the key with the latest modification revision in the request range.
func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) }
// withTop gets the first key over the get's prefix given a sort order
func withTop(target SortTarget, order SortOrder) []OpOption {
return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
}
// WithProgressNotify makes watch server send periodic progress updates
// every 10 minutes when there is no incoming events.
// Progress updates have zero events in WatchResponse.
func WithProgressNotify() OpOption {
return func(op *Op) {
op.progressNotify = true
}
}
// WithCreatedNotify makes watch server sends the created event.
func WithCreatedNotify() OpOption {
return func(op *Op) {
op.createdNotify = true
}
}
// WithFilterPut discards PUT events from the watcher.
func WithFilterPut() OpOption {
return func(op *Op) { op.filterPut = true }
}
// WithFilterDelete discards DELETE events from the watcher.
func WithFilterDelete() OpOption {
return func(op *Op) { op.filterDelete = true }
}
// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
// nothing will be returned.
func WithPrevKV() OpOption {
return func(op *Op) {
op.prevKV = true
}
}
// WithFragment to receive raw watch response with fragmentation.
// Fragmentation is disabled by default. If fragmentation is enabled,
// etcd watch server will split watch response before sending to clients
// when the total size of watch events exceed server-side request limit.
// The default server-side request limit is 1.5 MiB, which can be configured
// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes.
// See "etcdserver/api/v3rpc/watch.go" for more details.
func WithFragment() OpOption {
return func(op *Op) { op.fragment = true }
}
// WithIgnoreValue updates the key using its current value.
// This option can not be combined with non-empty values.
// Returns an error if the key does not exist.
func WithIgnoreValue() OpOption {
return func(op *Op) {
op.ignoreValue = true
}
}
// WithIgnoreLease updates the key using its current lease.
// This option can not be combined with WithLease.
// Returns an error if the key does not exist.
func WithIgnoreLease() OpOption {
return func(op *Op) {
op.ignoreLease = true
}
}
// LeaseOp represents an Operation that lease can execute.
type LeaseOp struct {
id LeaseID
// for TimeToLive
attachedKeys bool
}
// LeaseOption configures lease operations.
type LeaseOption func(*LeaseOp)
func (op *LeaseOp) applyOpts(opts []LeaseOption) {
for _, opt := range opts {
opt(op)
}
}
// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID.
func WithAttachedKeys() LeaseOption {
return func(op *LeaseOp) { op.attachedKeys = true }
}
func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest {
ret := &LeaseOp{id: id}
ret.applyOpts(opts)
return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
}

View File

@ -1,65 +0,0 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"math"
"time"
"google.golang.org/grpc"
)
var (
// client-side handling retrying of request failures where data was not written to the wire or
// where server indicates it did not process the data. gRPC default is default is "FailFast(true)"
// but for etcd we default to "FailFast(false)" to minimize client request error responses due to
// transient failures.
defaultFailFast = grpc.FailFast(false)
// client-side request send limit, gRPC default is math.MaxInt32
// Make sure that "client-side send limit < server-side default send/recv limit"
// Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes
defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024)
// client-side response receive limit, gRPC default is 4MB
// Make sure that "client-side receive limit >= server-side default send/recv limit"
// because range response can easily exceed request send limits
// Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
// client-side non-streaming retry limit, only applied to requests where server responds with
// a error code clearly indicating it was unable to process the request such as codes.Unavailable.
// If set to 0, retry is disabled.
defaultUnaryMaxRetries uint = 100
// client-side streaming retry limit, only applied to requests where server responds with
// a error code clearly indicating it was unable to process the request such as codes.Unavailable.
// If set to 0, retry is disabled.
defaultStreamMaxRetries = uint(^uint(0)) // max uint
// client-side retry backoff wait between requests.
defaultBackoffWaitBetween = 25 * time.Millisecond
// client-side retry backoff default jitter fraction.
defaultBackoffJitterFraction = 0.10
)
// defaultCallOpts defines a list of default "gRPC.CallOption".
// Some options are exposed to "clientv3.Config".
// Defaults will be overridden by the settings in "clientv3.Config".
var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize}
// MaxLeaseTTL is the maximum lease TTL value
const MaxLeaseTTL = 9000000000

View File

@ -1,30 +0,0 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import "context"
// TODO: remove this when "FailFast=false" is fixed.
// See https://github.com/grpc/grpc-go/issues/1532.
func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error {
select {
case <-ready:
return nil
case <-rpcCtx.Done():
return rpcCtx.Err()
case <-clientCtx.Done():
return clientCtx.Err()
}
}

View File

@ -1,298 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type retryPolicy uint8
const (
repeatable retryPolicy = iota
nonRepeatable
)
func (rp retryPolicy) String() string {
switch rp {
case repeatable:
return "repeatable"
case nonRepeatable:
return "nonRepeatable"
default:
return "UNKNOWN"
}
}
type rpcFunc func(ctx context.Context) error
type retryRPCFunc func(context.Context, rpcFunc, retryPolicy) error
type retryStopErrFunc func(error) bool
// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry.
//
// immutable requests (e.g. Get) should be retried unless it's
// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge).
//
// Returning "false" means retry should stop, since client cannot
// handle itself even with retries.
func isSafeRetryImmutableRPC(err error) bool {
eErr := rpctypes.Error(err)
if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
// interrupted by non-transient server-side or gRPC-side error
// client cannot handle itself (e.g. rpctypes.ErrCompacted)
return false
}
// only retry if unavailable
ev, ok := status.FromError(err)
if !ok {
// all errors from RPC is typed "grpc/status.(*statusError)"
// (ref. https://github.com/grpc/grpc-go/pull/1782)
//
// if the error type is not "grpc/status.(*statusError)",
// it could be from "Dial"
// TODO: do not retry for now
// ref. https://github.com/grpc/grpc-go/issues/1581
return false
}
return ev.Code() == codes.Unavailable
}
// isSafeRetryMutableRPC returns "true" when a mutable request is safe for retry.
//
// mutable requests (e.g. Put, Delete, Txn) should only be retried
// when the status code is codes.Unavailable when initial connection
// has not been established (no endpoint is up).
//
// Returning "false" means retry should stop, otherwise it violates
// write-at-most-once semantics.
func isSafeRetryMutableRPC(err error) bool {
if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable {
// not safe for mutable RPCs
// e.g. interrupted by non-transient error that client cannot handle itself,
// or transient error while the connection has already been established
return false
}
desc := rpctypes.ErrorDesc(err)
return desc == "there is no address available" || desc == "there is no connection available"
}
type retryKVClient struct {
kc pb.KVClient
}
// RetryKVClient implements a KVClient.
func RetryKVClient(c *Client) pb.KVClient {
return &retryKVClient{
kc: pb.NewKVClient(c.conn),
}
}
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
return rkv.kc.Put(ctx, in, opts...)
}
func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
return rkv.kc.DeleteRange(ctx, in, opts...)
}
func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
return rkv.kc.Txn(ctx, in, opts...)
}
func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
return rkv.kc.Compact(ctx, in, opts...)
}
type retryLeaseClient struct {
lc pb.LeaseClient
}
// RetryLeaseClient implements a LeaseClient.
func RetryLeaseClient(c *Client) pb.LeaseClient {
return &retryLeaseClient{
lc: pb.NewLeaseClient(c.conn),
}
}
func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...)
}
type retryClusterClient struct {
cc pb.ClusterClient
}
// RetryClusterClient implements a ClusterClient.
func RetryClusterClient(c *Client) pb.ClusterClient {
return &retryClusterClient{
cc: pb.NewClusterClient(c.conn),
}
}
func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
return rcc.cc.MemberAdd(ctx, in, opts...)
}
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
return rcc.cc.MemberRemove(ctx, in, opts...)
}
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
return rcc.cc.MemberUpdate(ctx, in, opts...)
}
type retryMaintenanceClient struct {
mc pb.MaintenanceClient
}
// RetryMaintenanceClient implements a Maintenance.
func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
return &retryMaintenanceClient{
mc: pb.NewMaintenanceClient(conn),
}
}
func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
return rmc.mc.Defragment(ctx, in, opts...)
}
type retryAuthClient struct {
ac pb.AuthClient
}
// RetryAuthClient implements a AuthClient.
func RetryAuthClient(c *Client) pb.AuthClient {
return &retryAuthClient{
ac: pb.NewAuthClient(c.conn),
}
}
func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
return rac.ac.AuthEnable(ctx, in, opts...)
}
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
return rac.ac.AuthDisable(ctx, in, opts...)
}
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
return rac.ac.UserAdd(ctx, in, opts...)
}
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
return rac.ac.UserDelete(ctx, in, opts...)
}
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
return rac.ac.UserChangePassword(ctx, in, opts...)
}
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
return rac.ac.UserGrantRole(ctx, in, opts...)
}
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
return rac.ac.UserRevokeRole(ctx, in, opts...)
}
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
return rac.ac.RoleAdd(ctx, in, opts...)
}
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
return rac.ac.RoleDelete(ctx, in, opts...)
}
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
return rac.ac.RoleGrantPermission(ctx, in, opts...)
}
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
return rac.ac.RoleRevokePermission(ctx, in, opts...)
}
func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
return rac.ac.Authenticate(ctx, in, opts...)
}

View File

@ -1,382 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more
// fine grained error checking required by write-at-most-once retry semantics of etcd.
package clientv3
import (
"context"
"io"
"sync"
"time"
"github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// unaryClientInterceptor returns a new retrying unary client interceptor.
//
// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor {
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
grpcOpts, retryOpts := filterCallOptions(opts)
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
// short circuit for simplicity, and avoiding allocations.
if callOpts.max == 0 {
return invoker(ctx, method, req, reply, cc, grpcOpts...)
}
var lastErr error
for attempt := uint(0); attempt < callOpts.max; attempt++ {
if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil {
return err
}
lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...)
logger.Info("retry unary intercept", zap.Uint("attempt", attempt), zap.Error(lastErr))
if lastErr == nil {
return nil
}
if isContextError(lastErr) {
if ctx.Err() != nil {
// its the context deadline or cancellation.
return lastErr
}
// its the callCtx deadline or cancellation, in which case try again.
continue
}
if callOpts.retryAuth && rpctypes.Error(lastErr) == rpctypes.ErrInvalidAuthToken {
gterr := c.getToken(ctx)
if gterr != nil {
logger.Info("retry failed to fetch new auth token", zap.Error(gterr))
return lastErr // return the original error for simplicity
}
continue
}
if !isSafeRetry(c.lg, lastErr, callOpts) {
return lastErr
}
}
return lastErr
}
}
// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls.
//
// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
//
// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs
// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams,
// BidiStreams), the retry interceptor will fail the call.
func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor {
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
grpcOpts, retryOpts := filterCallOptions(opts)
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
// short circuit for simplicity, and avoiding allocations.
if callOpts.max == 0 {
return streamer(ctx, desc, cc, method, grpcOpts...)
}
if desc.ClientStreams {
return nil, grpc.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
}
newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
logger.Info("retry stream intercept", zap.Error(err))
if err != nil {
// TODO(mwitkow): Maybe dial and transport errors should be retriable?
return nil, err
}
retryingStreamer := &serverStreamingRetryingStream{
client: c,
ClientStream: newStreamer,
callOpts: callOpts,
ctx: ctx,
streamerCall: func(ctx context.Context) (grpc.ClientStream, error) {
return streamer(ctx, desc, cc, method, grpcOpts...)
},
}
return retryingStreamer, nil
}
}
// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
// a new ClientStream according to the retry policy.
type serverStreamingRetryingStream struct {
grpc.ClientStream
client *Client
bufferedSends []interface{} // single message that the client can sen
receivedGood bool // indicates whether any prior receives were successful
wasClosedSend bool // indicates that CloseSend was closed
ctx context.Context
callOpts *options
streamerCall func(ctx context.Context) (grpc.ClientStream, error)
mu sync.RWMutex
}
func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) {
s.mu.Lock()
s.ClientStream = clientStream
s.mu.Unlock()
}
func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream {
s.mu.RLock()
defer s.mu.RUnlock()
return s.ClientStream
}
func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error {
s.mu.Lock()
s.bufferedSends = append(s.bufferedSends, m)
s.mu.Unlock()
return s.getStream().SendMsg(m)
}
func (s *serverStreamingRetryingStream) CloseSend() error {
s.mu.Lock()
s.wasClosedSend = true
s.mu.Unlock()
return s.getStream().CloseSend()
}
func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) {
return s.getStream().Header()
}
func (s *serverStreamingRetryingStream) Trailer() metadata.MD {
return s.getStream().Trailer()
}
func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m)
if !attemptRetry {
return lastErr // success or hard failure
}
// We start off from attempt 1, because zeroth was already made on normal SendMsg().
for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
return err
}
newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
if err != nil {
// TODO(mwitkow): Maybe dial and transport errors should be retriable?
return err
}
s.setStream(newStream)
attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
//fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr)
if !attemptRetry {
return lastErr
}
}
return lastErr
}
func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) {
s.mu.RLock()
wasGood := s.receivedGood
s.mu.RUnlock()
err := s.getStream().RecvMsg(m)
if err == nil || err == io.EOF {
s.mu.Lock()
s.receivedGood = true
s.mu.Unlock()
return false, err
} else if wasGood {
// previous RecvMsg in the stream succeeded, no retry logic should interfere
return false, err
}
if isContextError(err) {
if s.ctx.Err() != nil {
return false, err
}
// its the callCtx deadline or cancellation, in which case try again.
return true, err
}
if s.callOpts.retryAuth && rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
gterr := s.client.getToken(s.ctx)
if gterr != nil {
s.client.lg.Info("retry failed to fetch new auth token", zap.Error(gterr))
return false, err // return the original error for simplicity
}
return true, err
}
return isSafeRetry(s.client.lg, err, s.callOpts), err
}
func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) {
s.mu.RLock()
bufferedSends := s.bufferedSends
s.mu.RUnlock()
newStream, err := s.streamerCall(callCtx)
if err != nil {
return nil, err
}
for _, msg := range bufferedSends {
if err := newStream.SendMsg(msg); err != nil {
return nil, err
}
}
if err := newStream.CloseSend(); err != nil {
return nil, err
}
return newStream, nil
}
func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) error {
waitTime := time.Duration(0)
if attempt > 0 {
waitTime = callOpts.backoffFunc(attempt)
}
if waitTime > 0 {
timer := time.NewTimer(waitTime)
select {
case <-ctx.Done():
timer.Stop()
return contextErrToGrpcErr(ctx.Err())
case <-timer.C:
}
}
return nil
}
// isSafeRetry returns "true", if request is safe for retry with the given error.
func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool {
if isContextError(err) {
return false
}
switch callOpts.retryPolicy {
case repeatable:
return isSafeRetryImmutableRPC(err)
case nonRepeatable:
return isSafeRetryMutableRPC(err)
default:
lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String()))
return false
}
}
func isContextError(err error) bool {
return grpc.Code(err) == codes.DeadlineExceeded || grpc.Code(err) == codes.Canceled
}
func contextErrToGrpcErr(err error) error {
switch err {
case context.DeadlineExceeded:
return grpc.Errorf(codes.DeadlineExceeded, err.Error())
case context.Canceled:
return grpc.Errorf(codes.Canceled, err.Error())
default:
return grpc.Errorf(codes.Unknown, err.Error())
}
}
var (
defaultOptions = &options{
retryPolicy: nonRepeatable,
max: 0, // disable
backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10),
retryAuth: true,
}
)
// backoffFunc denotes a family of functions that control the backoff duration between call retries.
//
// They are called with an identifier of the attempt, and should return a time the system client should
// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request
// the deadline of the request takes precedence and the wait will be interrupted before proceeding
// with the next iteration.
type backoffFunc func(attempt uint) time.Duration
// withRetryPolicy sets the retry policy of this call.
func withRetryPolicy(rp retryPolicy) retryOption {
return retryOption{applyFunc: func(o *options) {
o.retryPolicy = rp
}}
}
// withAuthRetry sets enables authentication retries.
func withAuthRetry(retryAuth bool) retryOption {
return retryOption{applyFunc: func(o *options) {
o.retryAuth = retryAuth
}}
}
// withMax sets the maximum number of retries on this call, or this interceptor.
func withMax(maxRetries uint) retryOption {
return retryOption{applyFunc: func(o *options) {
o.max = maxRetries
}}
}
// WithBackoff sets the `BackoffFunc `used to control time between retries.
func withBackoff(bf backoffFunc) retryOption {
return retryOption{applyFunc: func(o *options) {
o.backoffFunc = bf
}}
}
type options struct {
retryPolicy retryPolicy
max uint
backoffFunc backoffFunc
retryAuth bool
}
// retryOption is a grpc.CallOption that is local to clientv3's retry interceptor.
type retryOption struct {
grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic.
applyFunc func(opt *options)
}
func reuseOrNewWithCallOptions(opt *options, retryOptions []retryOption) *options {
if len(retryOptions) == 0 {
return opt
}
optCopy := &options{}
*optCopy = *opt
for _, f := range retryOptions {
f.applyFunc(optCopy)
}
return optCopy
}
func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []retryOption) {
for _, opt := range callOptions {
if co, ok := opt.(retryOption); ok {
retryOptions = append(retryOptions, co)
} else {
grpcOptions = append(grpcOptions, opt)
}
}
return grpcOptions, retryOptions
}
// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment).
//
// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms.
func backoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) backoffFunc {
return func(attempt uint) time.Duration {
return backoffutils.JitterUp(waitBetween, jitterFraction)
}
}

View File

@ -1,37 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
type SortTarget int
type SortOrder int
const (
SortNone SortOrder = iota
SortAscend
SortDescend
)
const (
SortByKey SortTarget = iota
SortByVersion
SortByCreateRevision
SortByModRevision
SortByValue
)
type SortOption struct {
Target SortTarget
Order SortOrder
}

View File

@ -1,151 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
"sync"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"google.golang.org/grpc"
)
// Txn is the interface that wraps mini-transactions.
//
// Txn(context.TODO()).If(
// Compare(Value(k1), ">", v1),
// Compare(Version(k1), "=", 2)
// ).Then(
// OpPut(k2,v2), OpPut(k3,v3)
// ).Else(
// OpPut(k4,v4), OpPut(k5,v5)
// ).Commit()
//
type Txn interface {
// If takes a list of comparison. If all comparisons passed in succeed,
// the operations passed into Then() will be executed. Or the operations
// passed into Else() will be executed.
If(cs ...Cmp) Txn
// Then takes a list of operations. The Ops list will be executed, if the
// comparisons passed in If() succeed.
Then(ops ...Op) Txn
// Else takes a list of operations. The Ops list will be executed, if the
// comparisons passed in If() fail.
Else(ops ...Op) Txn
// Commit tries to commit the transaction.
Commit() (*TxnResponse, error)
}
type txn struct {
kv *kv
ctx context.Context
mu sync.Mutex
cif bool
cthen bool
celse bool
isWrite bool
cmps []*pb.Compare
sus []*pb.RequestOp
fas []*pb.RequestOp
callOpts []grpc.CallOption
}
func (txn *txn) If(cs ...Cmp) Txn {
txn.mu.Lock()
defer txn.mu.Unlock()
if txn.cif {
panic("cannot call If twice!")
}
if txn.cthen {
panic("cannot call If after Then!")
}
if txn.celse {
panic("cannot call If after Else!")
}
txn.cif = true
for i := range cs {
txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i]))
}
return txn
}
func (txn *txn) Then(ops ...Op) Txn {
txn.mu.Lock()
defer txn.mu.Unlock()
if txn.cthen {
panic("cannot call Then twice!")
}
if txn.celse {
panic("cannot call Then after Else!")
}
txn.cthen = true
for _, op := range ops {
txn.isWrite = txn.isWrite || op.isWrite()
txn.sus = append(txn.sus, op.toRequestOp())
}
return txn
}
func (txn *txn) Else(ops ...Op) Txn {
txn.mu.Lock()
defer txn.mu.Unlock()
if txn.celse {
panic("cannot call Else twice!")
}
txn.celse = true
for _, op := range ops {
txn.isWrite = txn.isWrite || op.isWrite()
txn.fas = append(txn.fas, op.toRequestOp())
}
return txn
}
func (txn *txn) Commit() (*TxnResponse, error) {
txn.mu.Lock()
defer txn.mu.Unlock()
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
var resp *pb.TxnResponse
var err error
resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...)
if err != nil {
return nil, toErr(txn.ctx, err)
}
return (*TxnResponse)(resp), nil
}

View File

@ -1,982 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
"errors"
"fmt"
"sync"
"time"
v3rpc "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
mvccpb "go.etcd.io/etcd/mvcc/mvccpb"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
const (
EventTypeDelete = mvccpb.DELETE
EventTypePut = mvccpb.PUT
closeSendErrTimeout = 250 * time.Millisecond
)
type Event mvccpb.Event
type WatchChan <-chan WatchResponse
type Watcher interface {
// Watch watches on a key or prefix. The watched events will be returned
// through the returned channel. If revisions waiting to be sent over the
// watch are compacted, then the watch will be canceled by the server, the
// client will post a compacted error watch response, and the channel will close.
// If the context "ctx" is canceled or timed out, returned "WatchChan" is closed,
// and "WatchResponse" from this closed channel has zero events and nil "Err()".
// The context "ctx" MUST be canceled, as soon as watcher is no longer being used,
// to release the associated resources.
//
// If the context is "context.Background/TODO", returned "WatchChan" will
// not be closed and block until event is triggered, except when server
// returns a non-recoverable error (e.g. ErrCompacted).
// For example, when context passed with "WithRequireLeader" and the
// connected server has no leader (e.g. due to network partition),
// error "etcdserver: no leader" (ErrNoLeader) will be returned,
// and then "WatchChan" is closed with non-nil "Err()".
// In order to prevent a watch stream being stuck in a partitioned node,
// make sure to wrap context with "WithRequireLeader".
//
// Otherwise, as long as the context has not been canceled or timed out,
// watch will retry on other recoverable errors forever until reconnected.
//
// TODO: explicitly set context error in the last "WatchResponse" message and close channel?
// Currently, client contexts are overwritten with "valCtx" that never closes.
// TODO(v3.4): configure watch retry policy, limit maximum retry number
// (see https://go.etcd.io/etcd/issues/8980)
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
// RequestProgress requests a progress notify response be sent in all watch channels.
RequestProgress(ctx context.Context) error
// Close closes the watcher and cancels all watch requests.
Close() error
}
type WatchResponse struct {
Header pb.ResponseHeader
Events []*Event
// CompactRevision is the minimum revision the watcher may receive.
CompactRevision int64
// Canceled is used to indicate watch failure.
// If the watch failed and the stream was about to close, before the channel is closed,
// the channel sends a final response that has Canceled set to true with a non-nil Err().
Canceled bool
// Created is used to indicate the creation of the watcher.
Created bool
closeErr error
// cancelReason is a reason of canceling watch
cancelReason string
}
// IsCreate returns true if the event tells that the key is newly created.
func (e *Event) IsCreate() bool {
return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision
}
// IsModify returns true if the event tells that a new value is put on existing key.
func (e *Event) IsModify() bool {
return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision
}
// Err is the error value if this WatchResponse holds an error.
func (wr *WatchResponse) Err() error {
switch {
case wr.closeErr != nil:
return v3rpc.Error(wr.closeErr)
case wr.CompactRevision != 0:
return v3rpc.ErrCompacted
case wr.Canceled:
if len(wr.cancelReason) != 0 {
return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
}
return v3rpc.ErrFutureRev
}
return nil
}
// IsProgressNotify returns true if the WatchResponse is progress notification.
func (wr *WatchResponse) IsProgressNotify() bool {
return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
}
// watcher implements the Watcher interface
type watcher struct {
remote pb.WatchClient
callOpts []grpc.CallOption
// mu protects the grpc streams map
mu sync.RWMutex
// streams holds all the active grpc streams keyed by ctx value.
streams map[string]*watchGrpcStream
}
// watchGrpcStream tracks all watch resources attached to a single grpc stream.
type watchGrpcStream struct {
owner *watcher
remote pb.WatchClient
callOpts []grpc.CallOption
// ctx controls internal remote.Watch requests
ctx context.Context
// ctxKey is the key used when looking up this stream's context
ctxKey string
cancel context.CancelFunc
// substreams holds all active watchers on this grpc stream
substreams map[int64]*watcherStream
// resuming holds all resuming watchers on this grpc stream
resuming []*watcherStream
// reqc sends a watch request from Watch() to the main goroutine
reqc chan watchStreamRequest
// respc receives data from the watch client
respc chan *pb.WatchResponse
// donec closes to broadcast shutdown
donec chan struct{}
// errc transmits errors from grpc Recv to the watch stream reconnect logic
errc chan error
// closingc gets the watcherStream of closing watchers
closingc chan *watcherStream
// wg is Done when all substream goroutines have exited
wg sync.WaitGroup
// resumec closes to signal that all substreams should begin resuming
resumec chan struct{}
// closeErr is the error that closed the watch stream
closeErr error
}
// watchStreamRequest is a union of the supported watch request operation types
type watchStreamRequest interface {
toPB() *pb.WatchRequest
}
// watchRequest is issued by the subscriber to start a new watcher
type watchRequest struct {
ctx context.Context
key string
end string
rev int64
// send created notification event if this field is true
createdNotify bool
// progressNotify is for progress updates
progressNotify bool
// fragmentation should be disabled by default
// if true, split watch events when total exceeds
// "--max-request-bytes" flag value + 512-byte
fragment bool
// filters is the list of events to filter out
filters []pb.WatchCreateRequest_FilterType
// get the previous key-value pair before the event happens
prevKV bool
// retc receives a chan WatchResponse once the watcher is established
retc chan chan WatchResponse
}
// progressRequest is issued by the subscriber to request watch progress
type progressRequest struct {
}
// watcherStream represents a registered watcher
type watcherStream struct {
// initReq is the request that initiated this request
initReq watchRequest
// outc publishes watch responses to subscriber
outc chan WatchResponse
// recvc buffers watch responses before publishing
recvc chan *WatchResponse
// donec closes when the watcherStream goroutine stops.
donec chan struct{}
// closing is set to true when stream should be scheduled to shutdown.
closing bool
// id is the registered watch id on the grpc stream
id int64
// buf holds all events received from etcd but not yet consumed by the client
buf []*WatchResponse
}
func NewWatcher(c *Client) Watcher {
return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c)
}
func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
w := &watcher{
remote: wc,
streams: make(map[string]*watchGrpcStream),
}
if c != nil {
w.callOpts = c.callOpts
}
return w
}
// never closes
var valCtxCh = make(chan struct{})
var zeroTime = time.Unix(0, 0)
// ctx with only the values; never Done
type valCtx struct{ context.Context }
func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
func (vc *valCtx) Done() <-chan struct{} { return valCtxCh }
func (vc *valCtx) Err() error { return nil }
func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
ctx, cancel := context.WithCancel(&valCtx{inctx})
wgs := &watchGrpcStream{
owner: w,
remote: w.remote,
callOpts: w.callOpts,
ctx: ctx,
ctxKey: streamKeyFromCtx(inctx),
cancel: cancel,
substreams: make(map[int64]*watcherStream),
respc: make(chan *pb.WatchResponse),
reqc: make(chan watchStreamRequest),
donec: make(chan struct{}),
errc: make(chan error, 1),
closingc: make(chan *watcherStream),
resumec: make(chan struct{}),
}
go wgs.run()
return wgs
}
// Watch posts a watch request to run() and waits for a new watcher channel
func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
ow := opWatch(key, opts...)
var filters []pb.WatchCreateRequest_FilterType
if ow.filterPut {
filters = append(filters, pb.WatchCreateRequest_NOPUT)
}
if ow.filterDelete {
filters = append(filters, pb.WatchCreateRequest_NODELETE)
}
wr := &watchRequest{
ctx: ctx,
createdNotify: ow.createdNotify,
key: string(ow.key),
end: string(ow.end),
rev: ow.rev,
progressNotify: ow.progressNotify,
fragment: ow.fragment,
filters: filters,
prevKV: ow.prevKV,
retc: make(chan chan WatchResponse, 1),
}
ok := false
ctxKey := streamKeyFromCtx(ctx)
// find or allocate appropriate grpc watch stream
w.mu.Lock()
if w.streams == nil {
// closed
w.mu.Unlock()
ch := make(chan WatchResponse)
close(ch)
return ch
}
wgs := w.streams[ctxKey]
if wgs == nil {
wgs = w.newWatcherGrpcStream(ctx)
w.streams[ctxKey] = wgs
}
donec := wgs.donec
reqc := wgs.reqc
w.mu.Unlock()
// couldn't create channel; return closed channel
closeCh := make(chan WatchResponse, 1)
// submit request
select {
case reqc <- wr:
ok = true
case <-wr.ctx.Done():
case <-donec:
if wgs.closeErr != nil {
closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
break
}
// retry; may have dropped stream from no ctxs
return w.Watch(ctx, key, opts...)
}
// receive channel
if ok {
select {
case ret := <-wr.retc:
return ret
case <-ctx.Done():
case <-donec:
if wgs.closeErr != nil {
closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
break
}
// retry; may have dropped stream from no ctxs
return w.Watch(ctx, key, opts...)
}
}
close(closeCh)
return closeCh
}
func (w *watcher) Close() (err error) {
w.mu.Lock()
streams := w.streams
w.streams = nil
w.mu.Unlock()
for _, wgs := range streams {
if werr := wgs.close(); werr != nil {
err = werr
}
}
return err
}
// RequestProgress requests a progress notify response be sent in all watch channels.
func (w *watcher) RequestProgress(ctx context.Context) (err error) {
ctxKey := streamKeyFromCtx(ctx)
w.mu.Lock()
if w.streams == nil {
return fmt.Errorf("no stream found for context")
}
wgs := w.streams[ctxKey]
if wgs == nil {
wgs = w.newWatcherGrpcStream(ctx)
w.streams[ctxKey] = wgs
}
donec := wgs.donec
reqc := wgs.reqc
w.mu.Unlock()
pr := &progressRequest{}
select {
case reqc <- pr:
return nil
case <-ctx.Done():
if err == nil {
return ctx.Err()
}
return err
case <-donec:
if wgs.closeErr != nil {
return wgs.closeErr
}
// retry; may have dropped stream from no ctxs
return w.RequestProgress(ctx)
}
}
func (w *watchGrpcStream) close() (err error) {
w.cancel()
<-w.donec
select {
case err = <-w.errc:
default:
}
return toErr(w.ctx, err)
}
func (w *watcher) closeStream(wgs *watchGrpcStream) {
w.mu.Lock()
close(wgs.donec)
wgs.cancel()
if w.streams != nil {
delete(w.streams, wgs.ctxKey)
}
w.mu.Unlock()
}
func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
// check watch ID for backward compatibility (<= v3.3)
if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") {
w.closeErr = v3rpc.Error(errors.New(resp.CancelReason))
// failed; no channel
close(ws.recvc)
return
}
ws.id = resp.WatchId
w.substreams[ws.id] = ws
}
func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) {
select {
case ws.outc <- *resp:
case <-ws.initReq.ctx.Done():
case <-time.After(closeSendErrTimeout):
}
close(ws.outc)
}
func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
// send channel response in case stream was never established
select {
case ws.initReq.retc <- ws.outc:
default:
}
// close subscriber's channel
if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr})
} else if ws.outc != nil {
close(ws.outc)
}
if ws.id != -1 {
delete(w.substreams, ws.id)
return
}
for i := range w.resuming {
if w.resuming[i] == ws {
w.resuming[i] = nil
return
}
}
}
// run is the root of the goroutines for managing a watcher client
func (w *watchGrpcStream) run() {
var wc pb.Watch_WatchClient
var closeErr error
// substreams marked to close but goroutine still running; needed for
// avoiding double-closing recvc on grpc stream teardown
closing := make(map[*watcherStream]struct{})
defer func() {
w.closeErr = closeErr
// shutdown substreams and resuming substreams
for _, ws := range w.substreams {
if _, ok := closing[ws]; !ok {
close(ws.recvc)
closing[ws] = struct{}{}
}
}
for _, ws := range w.resuming {
if _, ok := closing[ws]; ws != nil && !ok {
close(ws.recvc)
closing[ws] = struct{}{}
}
}
w.joinSubstreams()
for range closing {
w.closeSubstream(<-w.closingc)
}
w.wg.Wait()
w.owner.closeStream(w)
}()
// start a stream with the etcd grpc server
if wc, closeErr = w.newWatchClient(); closeErr != nil {
return
}
cancelSet := make(map[int64]struct{})
var cur *pb.WatchResponse
for {
select {
// Watch() requested
case req := <-w.reqc:
switch wreq := req.(type) {
case *watchRequest:
outc := make(chan WatchResponse, 1)
// TODO: pass custom watch ID?
ws := &watcherStream{
initReq: *wreq,
id: -1,
outc: outc,
// unbuffered so resumes won't cause repeat events
recvc: make(chan *WatchResponse),
}
ws.donec = make(chan struct{})
w.wg.Add(1)
go w.serveSubstream(ws, w.resumec)
// queue up for watcher creation/resume
w.resuming = append(w.resuming, ws)
if len(w.resuming) == 1 {
// head of resume queue, can register a new watcher
wc.Send(ws.initReq.toPB())
}
case *progressRequest:
wc.Send(wreq.toPB())
}
// new events from the watch client
case pbresp := <-w.respc:
if cur == nil || pbresp.Created || pbresp.Canceled {
cur = pbresp
} else if cur != nil && cur.WatchId == pbresp.WatchId {
// merge new events
cur.Events = append(cur.Events, pbresp.Events...)
// update "Fragment" field; last response with "Fragment" == false
cur.Fragment = pbresp.Fragment
}
switch {
case pbresp.Created:
// response to head of queue creation
if ws := w.resuming[0]; ws != nil {
w.addSubstream(pbresp, ws)
w.dispatchEvent(pbresp)
w.resuming[0] = nil
}
if ws := w.nextResume(); ws != nil {
wc.Send(ws.initReq.toPB())
}
// reset for next iteration
cur = nil
case pbresp.Canceled && pbresp.CompactRevision == 0:
delete(cancelSet, pbresp.WatchId)
if ws, ok := w.substreams[pbresp.WatchId]; ok {
// signal to stream goroutine to update closingc
close(ws.recvc)
closing[ws] = struct{}{}
}
// reset for next iteration
cur = nil
case cur.Fragment:
// watch response events are still fragmented
// continue to fetch next fragmented event arrival
continue
default:
// dispatch to appropriate watch stream
ok := w.dispatchEvent(cur)
// reset for next iteration
cur = nil
if ok {
break
}
// watch response on unexpected watch id; cancel id
if _, ok := cancelSet[pbresp.WatchId]; ok {
break
}
cancelSet[pbresp.WatchId] = struct{}{}
cr := &pb.WatchRequest_CancelRequest{
CancelRequest: &pb.WatchCancelRequest{
WatchId: pbresp.WatchId,
},
}
req := &pb.WatchRequest{RequestUnion: cr}
wc.Send(req)
}
// watch client failed on Recv; spawn another if possible
case err := <-w.errc:
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
closeErr = err
return
}
if wc, closeErr = w.newWatchClient(); closeErr != nil {
return
}
if ws := w.nextResume(); ws != nil {
wc.Send(ws.initReq.toPB())
}
cancelSet = make(map[int64]struct{})
case <-w.ctx.Done():
return
case ws := <-w.closingc:
w.closeSubstream(ws)
delete(closing, ws)
// no more watchers on this stream, shutdown
if len(w.substreams)+len(w.resuming) == 0 {
return
}
}
}
}
// nextResume chooses the next resuming to register with the grpc stream. Abandoned
// streams are marked as nil in the queue since the head must wait for its inflight registration.
func (w *watchGrpcStream) nextResume() *watcherStream {
for len(w.resuming) != 0 {
if w.resuming[0] != nil {
return w.resuming[0]
}
w.resuming = w.resuming[1:len(w.resuming)]
}
return nil
}
// dispatchEvent sends a WatchResponse to the appropriate watcher stream
func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
events := make([]*Event, len(pbresp.Events))
for i, ev := range pbresp.Events {
events[i] = (*Event)(ev)
}
// TODO: return watch ID?
wr := &WatchResponse{
Header: *pbresp.Header,
Events: events,
CompactRevision: pbresp.CompactRevision,
Created: pbresp.Created,
Canceled: pbresp.Canceled,
cancelReason: pbresp.CancelReason,
}
// watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to
// indicate they should be broadcast.
if wr.IsProgressNotify() && pbresp.WatchId == -1 {
return w.broadcastResponse(wr)
}
return w.unicastResponse(wr, pbresp.WatchId)
}
// broadcastResponse send a watch response to all watch substreams.
func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool {
for _, ws := range w.substreams {
select {
case ws.recvc <- wr:
case <-ws.donec:
}
}
return true
}
// unicastResponse sends a watch response to a specific watch substream.
func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool {
ws, ok := w.substreams[watchId]
if !ok {
return false
}
select {
case ws.recvc <- wr:
case <-ws.donec:
return false
}
return true
}
// serveWatchClient forwards messages from the grpc stream to run()
func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
for {
resp, err := wc.Recv()
if err != nil {
select {
case w.errc <- err:
case <-w.donec:
}
return
}
select {
case w.respc <- resp:
case <-w.donec:
return
}
}
}
// serveSubstream forwards watch responses from run() to the subscriber
func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) {
if ws.closing {
panic("created substream goroutine but substream is closing")
}
// nextRev is the minimum expected next revision
nextRev := ws.initReq.rev
resuming := false
defer func() {
if !resuming {
ws.closing = true
}
close(ws.donec)
if !resuming {
w.closingc <- ws
}
w.wg.Done()
}()
emptyWr := &WatchResponse{}
for {
curWr := emptyWr
outc := ws.outc
if len(ws.buf) > 0 {
curWr = ws.buf[0]
} else {
outc = nil
}
select {
case outc <- *curWr:
if ws.buf[0].Err() != nil {
return
}
ws.buf[0] = nil
ws.buf = ws.buf[1:]
case wr, ok := <-ws.recvc:
if !ok {
// shutdown from closeSubstream
return
}
if wr.Created {
if ws.initReq.retc != nil {
ws.initReq.retc <- ws.outc
// to prevent next write from taking the slot in buffered channel
// and posting duplicate create events
ws.initReq.retc = nil
// send first creation event only if requested
if ws.initReq.createdNotify {
ws.outc <- *wr
}
// once the watch channel is returned, a current revision
// watch must resume at the store revision. This is necessary
// for the following case to work as expected:
// wch := m1.Watch("a")
// m2.Put("a", "b")
// <-wch
// If the revision is only bound on the first observed event,
// if wch is disconnected before the Put is issued, then reconnects
// after it is committed, it'll miss the Put.
if ws.initReq.rev == 0 {
nextRev = wr.Header.Revision
}
}
} else {
// current progress of watch; <= store revision
nextRev = wr.Header.Revision
}
if len(wr.Events) > 0 {
nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
}
ws.initReq.rev = nextRev
// created event is already sent above,
// watcher should not post duplicate events
if wr.Created {
continue
}
// TODO pause channel if buffer gets too large
ws.buf = append(ws.buf, wr)
case <-w.ctx.Done():
return
case <-ws.initReq.ctx.Done():
return
case <-resumec:
resuming = true
return
}
}
// lazily send cancel message if events on missing id
}
func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
// mark all substreams as resuming
close(w.resumec)
w.resumec = make(chan struct{})
w.joinSubstreams()
for _, ws := range w.substreams {
ws.id = -1
w.resuming = append(w.resuming, ws)
}
// strip out nils, if any
var resuming []*watcherStream
for _, ws := range w.resuming {
if ws != nil {
resuming = append(resuming, ws)
}
}
w.resuming = resuming
w.substreams = make(map[int64]*watcherStream)
// connect to grpc stream while accepting watcher cancelation
stopc := make(chan struct{})
donec := w.waitCancelSubstreams(stopc)
wc, err := w.openWatchClient()
close(stopc)
<-donec
// serve all non-closing streams, even if there's a client error
// so that the teardown path can shutdown the streams as expected.
for _, ws := range w.resuming {
if ws.closing {
continue
}
ws.donec = make(chan struct{})
w.wg.Add(1)
go w.serveSubstream(ws, w.resumec)
}
if err != nil {
return nil, v3rpc.Error(err)
}
// receive data from new grpc stream
go w.serveWatchClient(wc)
return wc, nil
}
func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
var wg sync.WaitGroup
wg.Add(len(w.resuming))
donec := make(chan struct{})
for i := range w.resuming {
go func(ws *watcherStream) {
defer wg.Done()
if ws.closing {
if ws.initReq.ctx.Err() != nil && ws.outc != nil {
close(ws.outc)
ws.outc = nil
}
return
}
select {
case <-ws.initReq.ctx.Done():
// closed ws will be removed from resuming
ws.closing = true
close(ws.outc)
ws.outc = nil
w.wg.Add(1)
go func() {
defer w.wg.Done()
w.closingc <- ws
}()
case <-stopc:
}
}(w.resuming[i])
}
go func() {
defer close(donec)
wg.Wait()
}()
return donec
}
// joinSubstreams waits for all substream goroutines to complete.
func (w *watchGrpcStream) joinSubstreams() {
for _, ws := range w.substreams {
<-ws.donec
}
for _, ws := range w.resuming {
if ws != nil {
<-ws.donec
}
}
}
var maxBackoff = 100 * time.Millisecond
// openWatchClient retries opening a watch client until success or halt.
// manually retry in case "ws==nil && err==nil"
// TODO: remove FailFast=false
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
backoff := time.Millisecond
for {
select {
case <-w.ctx.Done():
if err == nil {
return nil, w.ctx.Err()
}
return nil, err
default:
}
if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil {
break
}
if isHaltErr(w.ctx, err) {
return nil, v3rpc.Error(err)
}
if isUnavailableErr(w.ctx, err) {
// retry, but backoff
if backoff < maxBackoff {
// 25% backoff factor
backoff = backoff + backoff/4
if backoff > maxBackoff {
backoff = maxBackoff
}
}
time.Sleep(backoff)
}
}
return ws, nil
}
// toPB converts an internal watch request structure to its protobuf WatchRequest structure.
func (wr *watchRequest) toPB() *pb.WatchRequest {
req := &pb.WatchCreateRequest{
StartRevision: wr.rev,
Key: []byte(wr.key),
RangeEnd: []byte(wr.end),
ProgressNotify: wr.progressNotify,
Filters: wr.filters,
PrevKv: wr.prevKV,
Fragment: wr.fragment,
}
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
return &pb.WatchRequest{RequestUnion: cr}
}
// toPB converts an internal progress request structure to its protobuf WatchRequest structure.
func (pr *progressRequest) toPB() *pb.WatchRequest {
req := &pb.WatchProgressRequest{}
cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req}
return &pb.WatchRequest{RequestUnion: cr}
}
func streamKeyFromCtx(ctx context.Context) string {
if md, ok := metadata.FromOutgoingContext(ctx); ok {
return fmt.Sprintf("%+v", md)
}
return ""
}

View File

@ -66,7 +66,7 @@ func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
if ci.closed {
return nil, errors.New("errCopyInClosed")
return nil, errors.New("copyin query is closed")
}
if len(v) == 0 {

View File

@ -1,7 +1,13 @@
# Archived project. No maintenance.
This project is not maintained anymore and is archived. Feel free to fork and
make your own changes if needed. For more detail read my blog post: [Taking an indefinite sabbatical from my projects](https://arslan.io/2018/10/09/taking-an-indefinite-sabbatical-from-my-projects/)
Thanks to everyone for their valuable feedback and contributions.
# Color [![GoDoc](https://godoc.org/github.com/fatih/color?status.svg)](https://godoc.org/github.com/fatih/color) [![Build Status](https://img.shields.io/travis/fatih/color.svg?style=flat-square)](https://travis-ci.org/fatih/color)
Color lets you use colorized outputs in terms of [ANSI Escape
Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It
has support for Windows too! The API can be used in several ways, pick one that

View File

@ -1,3 +1,10 @@
# Archived project. No maintenance.
This project is not maintained anymore and is archived. Feel free to fork and
make your own changes if needed. For more detail read my blog post: [Taking an indefinite sabbatical from my projects](https://arslan.io/2018/10/09/taking-an-indefinite-sabbatical-from-my-projects/)
Thanks to everyone for their valuable feedback and contributions.
# Structs [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structs) [![Build Status](http://img.shields.io/travis/fatih/structs.svg?style=flat-square)](https://travis-ci.org/fatih/structs) [![Coverage Status](http://img.shields.io/coveralls/fatih/structs.svg?style=flat-square)](https://coveralls.io/r/fatih/structs)
Structs contains various utilities to work with Go (Golang) structs. It was

View File

@ -45,6 +45,9 @@ type File struct {
// newFile initializes File object with given data sources.
func newFile(dataSources []dataSource, opts LoadOptions) *File {
if len(opts.KeyValueDelimiters) == 0 {
opts.KeyValueDelimiters = "=:"
}
return &File{
BlockMode: true,
dataSources: dataSources,
@ -286,7 +289,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
for _, kname := range sec.keyList {
keyLength := len(kname)
// First case will surround key by ` and second by """
if strings.ContainsAny(kname, "\"=:") {
if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) {
keyLength += 2
} else if strings.Contains(kname, "`") {
keyLength += 6
@ -329,7 +332,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
switch {
case key.isAutoIncrement:
kname = "-"
case strings.ContainsAny(kname, "\"=:"):
case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters):
kname = "`" + kname + "`"
case strings.Contains(kname, "`"):
kname = `"""` + kname + `"""`

View File

@ -34,7 +34,7 @@ const (
// Maximum allowed depth when recursively substituing variable names.
_DEPTH_VALUES = 99
_VERSION = "1.38.3"
_VERSION = "1.39.0"
)
// Version returns current package version literal.
@ -168,6 +168,8 @@ type LoadOptions struct {
// UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise
// conform to key/value pairs. Specify the names of those blocks here.
UnparseableSections []string
// KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:".
KeyValueDelimiters string
}
func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {

View File

@ -100,7 +100,7 @@ func cleanComment(in []byte) ([]byte, bool) {
return in[i:], true
}
func readKeyName(in []byte) (string, int, error) {
func readKeyName(delimiters string, in []byte) (string, int, error) {
line := string(in)
// Check if key name surrounded by quotes.
@ -127,7 +127,7 @@ func readKeyName(in []byte) (string, int, error) {
pos += startIdx
// Find key-value delimiter
i := strings.IndexAny(line[pos+startIdx:], "=:")
i := strings.IndexAny(line[pos+startIdx:], delimiters)
if i < 0 {
return "", -1, ErrDelimiterNotFound{line}
}
@ -135,7 +135,7 @@ func readKeyName(in []byte) (string, int, error) {
return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
}
endIdx = strings.IndexAny(line, "=:")
endIdx = strings.IndexAny(line, delimiters)
if endIdx < 0 {
return "", -1, ErrDelimiterNotFound{line}
}
@ -428,7 +428,7 @@ func (f *File) parse(reader io.Reader) (err error) {
continue
}
kname, offset, err := readKeyName(line)
kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line)
if err != nil {
// Treat as boolean key when desired, and whole line is key name.
if IsErrDelimiterNotFound(err) {

View File

@ -17,7 +17,7 @@ Supported Versions
The following matrix shows the versions of Go and Cassandra that are tested with the integration test suite as part of the CI build:
Go/Cassandra | 2.1.x | 2.2.x | 3.0.x
Go/Cassandra | 2.1.x | 2.2.x | 3.x.x
-------------| -------| ------| ---------
1.10 | yes | yes | yes
1.11 | yes | yes | yes

242
vendor/github.com/gocql/gocql/conn.go generated vendored
View File

@ -227,51 +227,19 @@ func (s *Session) dial(host *HostInfo, cfg *ConnConfig, errorHandler ConnErrorHa
}
defer cancel()
frameTicker := make(chan struct{}, 1)
startupErr := make(chan error)
go func() {
for range frameTicker {
err := c.recv()
if err != nil {
select {
case startupErr <- err:
case <-ctx.Done():
}
startup := &startupCoordinator{
frameTicker: make(chan struct{}),
conn: c,
}
return
}
}
}()
go func() {
defer close(frameTicker)
err := c.startup(ctx, frameTicker)
select {
case startupErr <- err:
case <-ctx.Done():
}
}()
select {
case err := <-startupErr:
if err != nil {
c.Close()
return nil, err
}
case <-ctx.Done():
c.Close()
return nil, errors.New("gocql: no response to connection startup within timeout")
if err := startup.setupConn(ctx); err != nil {
c.close()
return nil, err
}
// dont coalesce startup frames
if s.cfg.WriteCoalesceWaitTime > 0 {
w := &writeCoalescer{
fcond: sync.NewCond(&sync.Mutex{}),
cond: sync.NewCond(&sync.Mutex{}),
w: c.w,
}
go w.writeFlusher(s.cfg.WriteCoalesceWaitTime, c.quit)
c.w = w
c.w = newWriteCoalescer(c.w, s.cfg.WriteCoalesceWaitTime, c.quit)
}
go c.serve()
@ -306,27 +274,98 @@ func (c *Conn) Read(p []byte) (n int, err error) {
return
}
func (c *Conn) startup(ctx context.Context, frameTicker chan struct{}) error {
m := map[string]string{
"CQL_VERSION": c.cfg.CQLVersion,
}
type startupCoordinator struct {
conn *Conn
frameTicker chan struct{}
}
if c.compressor != nil {
m["COMPRESSION"] = c.compressor.Name()
}
func (s *startupCoordinator) setupConn(ctx context.Context) error {
startupErr := make(chan error)
go func() {
for range s.frameTicker {
err := s.conn.recv()
if err != nil {
select {
case startupErr <- err:
case <-ctx.Done():
}
return
}
}
}()
go func() {
defer close(s.frameTicker)
err := s.options(ctx)
select {
case startupErr <- err:
case <-ctx.Done():
}
}()
select {
case frameTicker <- struct{}{}:
case err := <-startupErr:
if err != nil {
return err
}
case <-ctx.Done():
return ctx.Err()
return errors.New("gocql: no response to connection startup within timeout")
}
framer, err := c.exec(ctx, &writeStartupFrame{opts: m}, nil)
return nil
}
func (s *startupCoordinator) write(ctx context.Context, frame frameWriter) (frame, error) {
select {
case s.frameTicker <- struct{}{}:
case <-ctx.Done():
return nil, ctx.Err()
}
framer, err := s.conn.exec(ctx, frame, nil)
if err != nil {
return nil, err
}
return framer.parseFrame()
}
func (s *startupCoordinator) options(ctx context.Context) error {
frame, err := s.write(ctx, &writeOptionsFrame{})
if err != nil {
return err
}
frame, err := framer.parseFrame()
supported, ok := frame.(*supportedFrame)
if !ok {
return NewErrProtocol("Unknown type of response to startup frame: %T", frame)
}
return s.startup(ctx, supported.supported)
}
func (s *startupCoordinator) startup(ctx context.Context, supported map[string][]string) error {
m := map[string]string{
"CQL_VERSION": s.conn.cfg.CQLVersion,
}
if s.conn.compressor != nil {
comp := supported["COMPRESSION"]
name := s.conn.compressor.Name()
for _, compressor := range comp {
if compressor == name {
m["COMPRESSION"] = compressor
break
}
}
if _, ok := m["COMPRESSION"]; !ok {
s.conn.compressor = nil
}
}
frame, err := s.write(ctx, &writeStartupFrame{opts: m})
if err != nil {
return err
}
@ -337,37 +376,25 @@ func (c *Conn) startup(ctx context.Context, frameTicker chan struct{}) error {
case *readyFrame:
return nil
case *authenticateFrame:
return c.authenticateHandshake(ctx, v, frameTicker)
return s.authenticateHandshake(ctx, v)
default:
return NewErrProtocol("Unknown type of response to startup frame: %s", v)
}
}
func (c *Conn) authenticateHandshake(ctx context.Context, authFrame *authenticateFrame, frameTicker chan struct{}) error {
if c.auth == nil {
func (s *startupCoordinator) authenticateHandshake(ctx context.Context, authFrame *authenticateFrame) error {
if s.conn.auth == nil {
return fmt.Errorf("authentication required (using %q)", authFrame.class)
}
resp, challenger, err := c.auth.Challenge([]byte(authFrame.class))
resp, challenger, err := s.conn.auth.Challenge([]byte(authFrame.class))
if err != nil {
return err
}
req := &writeAuthResponseFrame{data: resp}
for {
select {
case frameTicker <- struct{}{}:
case <-ctx.Done():
return ctx.Err()
}
framer, err := c.exec(ctx, req, nil)
if err != nil {
return err
}
frame, err := framer.parseFrame()
frame, err := s.write(ctx, req)
if err != nil {
return err
}
@ -612,11 +639,22 @@ func (c *deadlineWriter) Write(p []byte) (int, error) {
return c.w.Write(p)
}
func newWriteCoalescer(w io.Writer, d time.Duration, quit <-chan struct{}) *writeCoalescer {
wc := &writeCoalescer{
writeCh: make(chan struct{}), // TODO: could this be sync?
cond: sync.NewCond(&sync.Mutex{}),
w: w,
quit: quit,
}
go wc.writeFlusher(d)
return wc
}
type writeCoalescer struct {
w io.Writer
// fcond waits for a new write to start the flush loop
fcond *sync.Cond
quit <-chan struct{}
writeCh chan struct{}
running bool
// cond waits for the buffer to be flushed
@ -627,10 +665,8 @@ type writeCoalescer struct {
err error
}
func (w *writeCoalescer) flush() {
w.cond.L.Lock()
defer w.cond.L.Unlock()
func (w *writeCoalescer) flushLocked() {
w.running = false
if len(w.buffers) == 0 {
return
}
@ -645,16 +681,36 @@ func (w *writeCoalescer) flush() {
w.cond.Broadcast()
}
func (w *writeCoalescer) Write(p []byte) (int, error) {
// TODO: use atomics for this?
w.fcond.L.Lock()
if !w.running {
w.running = true
w.fcond.Broadcast()
}
w.fcond.L.Unlock()
func (w *writeCoalescer) flush() {
w.cond.L.Lock()
w.flushLocked()
w.cond.L.Unlock()
}
func (w *writeCoalescer) stop() {
w.cond.L.Lock()
defer w.cond.L.Unlock()
w.flushLocked()
// nil the channel out sends block forever on it
// instead of closing which causes a send on closed channel
// panic.
w.writeCh = nil
}
func (w *writeCoalescer) Write(p []byte) (int, error) {
w.cond.L.Lock()
if !w.running {
select {
case w.writeCh <- struct{}{}:
w.running = true
case <-w.quit:
w.cond.L.Unlock()
return 0, io.EOF // TODO: better error here?
}
}
w.buffers = append(w.buffers, p)
for len(w.buffers) != 0 {
w.cond.Wait()
@ -669,10 +725,10 @@ func (w *writeCoalescer) Write(p []byte) (int, error) {
return len(p), nil
}
func (w *writeCoalescer) writeFlusher(interval time.Duration, quit chan struct{}) {
func (w *writeCoalescer) writeFlusher(interval time.Duration) {
timer := time.NewTimer(interval)
defer timer.Stop()
defer w.flush()
defer w.stop()
if !timer.Stop() {
<-timer.C
@ -680,24 +736,21 @@ func (w *writeCoalescer) writeFlusher(interval time.Duration, quit chan struct{}
for {
// wait for a write to start the flush loop
w.fcond.L.Lock()
for !w.running {
w.fcond.Wait()
select {
case <-w.writeCh:
case <-w.quit:
return
}
w.fcond.L.Unlock()
timer.Reset(interval)
select {
case <-quit:
case <-w.quit:
return
case <-timer.C:
}
w.fcond.L.Lock()
w.flush()
w.running = false
w.fcond.L.Unlock()
}
}
@ -1220,6 +1273,7 @@ func (c *Conn) executeBatch(batch *Batch) *Iter {
func (c *Conn) query(statement string, values ...interface{}) (iter *Iter) {
q := c.session.Query(statement, values...).Consistency(One)
q.trace = nil
return c.executeQuery(q)
}

View File

@ -453,8 +453,7 @@ func (c *controlConn) query(statement string, values ...interface{}) (iter *Iter
Logger.Printf("control: error executing %q: %v\n", statement, iter.err)
}
metric := q.getHostMetrics(c.getConn().host)
metric.Attempts++
q.AddAttempts(1, c.getConn().host)
if iter.err == nil || !c.retry.Attempt(q) {
break
}

View File

@ -5,6 +5,8 @@
package gocql
import (
"context"
"errors"
"fmt"
"math"
"math/rand"
@ -130,6 +132,7 @@ type RetryableQuery interface {
Attempts() int
SetConsistency(c Consistency)
GetConsistency() Consistency
GetContext() context.Context
}
type RetryType uint16
@ -141,6 +144,10 @@ const (
Rethrow RetryType = 0x03 // raise error and stop retrying
)
// ErrUnknownRetryType is returned if the retry policy returns a retry type
// unknown to the query executor.
var ErrUnknownRetryType = errors.New("unknown retry type returned by retry policy")
// RetryPolicy interface is used by gocql to determine if a query can be attempted
// again after a retryable error has been received. The interface allows gocql
// users to implement their own logic to determine if a query can be attempted
@ -852,3 +859,21 @@ func (e *ExponentialReconnectionPolicy) GetInterval(currentRetry int) time.Durat
func (e *ExponentialReconnectionPolicy) GetMaxRetries() int {
return e.MaxRetries
}
type SpeculativeExecutionPolicy interface {
Attempts() int
Delay() time.Duration
}
type NonSpeculativeExecution struct{}
func (sp NonSpeculativeExecution) Attempts() int { return 0 } // No additional attempts
func (sp NonSpeculativeExecution) Delay() time.Duration { return 1 } // The delay. Must be positive to be used in a ticker.
type SimpleSpeculativeExecution struct {
NumAttempts int
TimeoutDelay time.Duration
}
func (sp *SimpleSpeculativeExecution) Attempts() int { return sp.NumAttempts }
func (sp *SimpleSpeculativeExecution) Delay() time.Duration { return sp.TimeoutDelay }

View File

@ -1,6 +1,7 @@
package gocql
import (
"sync"
"time"
)
@ -8,9 +9,11 @@ type ExecutableQuery interface {
execute(conn *Conn) *Iter
attempt(keyspace string, end, start time.Time, iter *Iter, host *HostInfo)
retryPolicy() RetryPolicy
speculativeExecutionPolicy() SpeculativeExecutionPolicy
GetRoutingKey() ([]byte, error)
Keyspace() string
Cancel()
IsIdempotent() bool
RetryableQuery
}
@ -19,6 +22,11 @@ type queryExecutor struct {
policy HostSelectionPolicy
}
type queryResponse struct {
iter *Iter
err error
}
func (q *queryExecutor) attemptQuery(qry ExecutableQuery, conn *Conn) *Iter {
start := time.Now()
iter := qry.execute(conn)
@ -30,12 +38,74 @@ func (q *queryExecutor) attemptQuery(qry ExecutableQuery, conn *Conn) *Iter {
}
func (q *queryExecutor) executeQuery(qry ExecutableQuery) (*Iter, error) {
rt := qry.retryPolicy()
// check if the query is not marked as idempotent, if
// it is, we force the policy to NonSpeculative
sp := qry.speculativeExecutionPolicy()
if !qry.IsIdempotent() {
sp = NonSpeculativeExecution{}
}
results := make(chan queryResponse, 1)
stop := make(chan struct{})
defer close(stop)
var specWG sync.WaitGroup
// Launch the main execution
specWG.Add(1)
go q.run(qry, &specWG, results, stop)
// The speculative executions are launched _in addition_ to the main
// execution, on a timer. So Speculation{2} would make 3 executions running
// in total.
go func() {
// Handle the closing of the resources. We do it here because it's
// right after we finish launching executions. Otherwise clearing the
// wait group is complicated.
defer func() {
specWG.Wait()
close(results)
}()
// setup a ticker
ticker := time.NewTicker(sp.Delay())
defer ticker.Stop()
for i := 0; i < sp.Attempts(); i++ {
select {
case <-ticker.C:
// Launch the additional execution
specWG.Add(1)
go q.run(qry, &specWG, results, stop)
case <-qry.GetContext().Done():
// not starting additional executions
return
case <-stop:
// not starting additional executions
return
}
}
}()
res := <-results
if res.iter == nil && res.err == nil {
// if we're here, the results channel was closed, so no more hosts
return nil, ErrNoConnections
}
return res.iter, res.err
}
func (q *queryExecutor) run(qry ExecutableQuery, specWG *sync.WaitGroup, results chan queryResponse, stop chan struct{}) {
// Handle the wait group
defer specWG.Done()
hostIter := q.policy.Pick(qry)
selectedHost := hostIter()
rt := qry.retryPolicy()
var iter *Iter
for hostResponse := hostIter(); hostResponse != nil; hostResponse = hostIter() {
host := hostResponse.Info()
for selectedHost != nil {
host := selectedHost.Info()
if host == nil || !host.IsUp() {
continue
}
@ -50,51 +120,50 @@ func (q *queryExecutor) executeQuery(qry ExecutableQuery) (*Iter, error) {
continue
}
iter = q.attemptQuery(qry, conn)
// Update host
hostResponse.Mark(iter.err)
if rt == nil {
iter.host = host
break
}
switch rt.GetRetryType(iter.err) {
case Retry:
for rt.Attempt(qry) {
iter = q.attemptQuery(qry, conn)
hostResponse.Mark(iter.err)
if iter.err == nil {
iter.host = host
return iter, nil
}
if rt.GetRetryType(iter.err) != Retry {
break
}
}
case Rethrow:
return nil, iter.err
case Ignore:
return iter, nil
case RetryNextHost:
select {
case <-stop:
// stop this execution and return
return
default:
// Run the query
iter = q.attemptQuery(qry, conn)
iter.host = selectedHost.Info()
// Update host
selectedHost.Mark(iter.err)
// Exit if the query was successful
// or no retry policy defined or retry attempts were reached
if iter.err == nil || rt == nil || !rt.Attempt(qry) {
results <- queryResponse{iter: iter}
return
}
// If query is unsuccessful, check the error with RetryPolicy to retry
switch rt.GetRetryType(iter.err) {
case Retry:
// retry on the same host
continue
case Rethrow:
results <- queryResponse{err: iter.err}
return
case Ignore:
results <- queryResponse{iter: iter}
return
case RetryNextHost:
// retry on the next host
selectedHost = hostIter()
if selectedHost == nil {
results <- queryResponse{iter: iter}
return
}
continue
default:
// Undefined? Return nil and error, this will panic in the requester
results <- queryResponse{iter: nil, err: ErrUnknownRetryType}
return
}
}
// Exit for loop if the query was successful
if iter.err == nil {
iter.host = host
return iter, nil
}
if !rt.Attempt(qry) {
// What do here? Should we just return an error here?
break
}
}
if iter == nil {
return nil, ErrNoConnections
}
return iter, nil
// All hosts are exhausted, return nothing
}

View File

@ -658,11 +658,16 @@ func (s *Session) connect(host *HostInfo, errorHandler ConnErrorHandler) (*Conn,
return s.dial(host, s.connCfg, errorHandler)
}
type queryMetrics struct {
type hostMetrics struct {
Attempts int
TotalLatency int64
}
type queryMetrics struct {
l sync.RWMutex
m map[string]*hostMetrics
}
// Query represents a CQL statement that can be executed.
type Query struct {
stmt string
@ -677,6 +682,7 @@ type Query struct {
observer QueryObserver
session *Session
rt RetryPolicy
spec SpeculativeExecutionPolicy
binding func(q *QueryInfo) ([]interface{}, error)
serialCons SerialConsistency
defaultTimestamp bool
@ -685,8 +691,8 @@ type Query struct {
context context.Context
cancelQuery func()
idempotent bool
metrics map[string]*queryMetrics
customPayload map[string][]byte
metrics *queryMetrics
disableAutoPage bool
}
@ -704,23 +710,26 @@ func (q *Query) defaultsFromSession() {
q.serialCons = s.cfg.SerialConsistency
q.defaultTimestamp = s.cfg.DefaultTimestamp
q.idempotent = s.cfg.DefaultIdempotence
q.metrics = make(map[string]*queryMetrics)
q.metrics = &queryMetrics{m: make(map[string]*hostMetrics)}
// Initiate an empty context with a cancel call
q.WithContext(context.Background())
q.spec = &NonSpeculativeExecution{}
s.mu.RUnlock()
}
func (q *Query) getHostMetrics(host *HostInfo) *queryMetrics {
hostMetrics, exists := q.metrics[host.ConnectAddress().String()]
func (q *Query) getHostMetrics(host *HostInfo) *hostMetrics {
q.metrics.l.Lock()
metrics, exists := q.metrics.m[host.ConnectAddress().String()]
if !exists {
// if the host is not in the map, it means it's been accessed for the first time
hostMetrics = &queryMetrics{Attempts: 0, TotalLatency: 0}
q.metrics[host.ConnectAddress().String()] = hostMetrics
metrics = &hostMetrics{}
q.metrics.m[host.ConnectAddress().String()] = metrics
}
q.metrics.l.Unlock()
return hostMetrics
return metrics
}
// Statement returns the statement that was used to generate this query.
@ -735,27 +744,45 @@ func (q Query) String() string {
//Attempts returns the number of times the query was executed.
func (q *Query) Attempts() int {
attempts := 0
for _, metric := range q.metrics {
q.metrics.l.Lock()
var attempts int
for _, metric := range q.metrics.m {
attempts += metric.Attempts
}
q.metrics.l.Unlock()
return attempts
}
func (q *Query) AddAttempts(i int, host *HostInfo) {
hostMetric := q.getHostMetrics(host)
q.metrics.l.Lock()
hostMetric.Attempts += i
q.metrics.l.Unlock()
}
//Latency returns the average amount of nanoseconds per attempt of the query.
func (q *Query) Latency() int64 {
q.metrics.l.Lock()
var attempts int
var latency int64
for _, metric := range q.metrics {
for _, metric := range q.metrics.m {
attempts += metric.Attempts
latency += metric.TotalLatency
}
q.metrics.l.Unlock()
if attempts > 0 {
return latency / int64(attempts)
}
return 0
}
func (q *Query) AddLatency(l int64, host *HostInfo) {
hostMetric := q.getHostMetrics(host)
q.metrics.l.Lock()
hostMetric.TotalLatency += l
q.metrics.l.Unlock()
}
// Consistency sets the consistency level for this query. If no consistency
// level have been set, the default consistency level of the cluster
// is used.
@ -781,6 +808,10 @@ func (q *Query) CustomPayload(customPayload map[string][]byte) *Query {
return q
}
func (q *Query) GetContext() context.Context {
return q.context
}
// Trace enables tracing of this query. Look at the documentation of the
// Tracer interface to learn more about tracing.
func (q *Query) Trace(trace Tracer) *Query {
@ -851,9 +882,8 @@ func (q *Query) execute(conn *Conn) *Iter {
}
func (q *Query) attempt(keyspace string, end, start time.Time, iter *Iter, host *HostInfo) {
hostMetrics := q.getHostMetrics(host)
hostMetrics.Attempts++
hostMetrics.TotalLatency += end.Sub(start).Nanoseconds()
q.AddAttempts(1, host)
q.AddLatency(end.Sub(start).Nanoseconds(), host)
if q.observer != nil {
q.observer.ObserveQuery(q.context, ObservedQuery{
@ -863,7 +893,7 @@ func (q *Query) attempt(keyspace string, end, start time.Time, iter *Iter, host
End: end,
Rows: iter.numRows,
Host: host,
Metrics: hostMetrics,
Metrics: q.getHostMetrics(host),
Err: iter.err,
})
}
@ -983,6 +1013,17 @@ func (q *Query) RetryPolicy(r RetryPolicy) *Query {
return q
}
// SetSpeculativeExecutionPolicy sets the execution policy
func (q *Query) SetSpeculativeExecutionPolicy(sp SpeculativeExecutionPolicy) *Query {
q.spec = sp
return q
}
// speculativeExecutionPolicy fetches the policy
func (q *Query) speculativeExecutionPolicy() SpeculativeExecutionPolicy {
return q.spec
}
func (q *Query) IsIdempotent() bool {
return q.idempotent
}
@ -1431,6 +1472,7 @@ type Batch struct {
Cons Consistency
CustomPayload map[string][]byte
rt RetryPolicy
spec SpeculativeExecutionPolicy
observer BatchObserver
serialCons SerialConsistency
defaultTimestamp bool
@ -1438,14 +1480,14 @@ type Batch struct {
context context.Context
cancelBatch func()
keyspace string
metrics map[string]*queryMetrics
metrics *queryMetrics
}
// NewBatch creates a new batch operation without defaults from the cluster
//
// Deprecated: use session.NewBatch instead
func NewBatch(typ BatchType) *Batch {
return &Batch{Type: typ, metrics: make(map[string]*queryMetrics)}
return &Batch{Type: typ, metrics: &queryMetrics{m: make(map[string]*hostMetrics)}}
}
// NewBatch creates a new batch operation using defaults defined in the cluster
@ -1459,7 +1501,8 @@ func (s *Session) NewBatch(typ BatchType) *Batch {
Cons: s.cons,
defaultTimestamp: s.cfg.DefaultTimestamp,
keyspace: s.cfg.Keyspace,
metrics: make(map[string]*queryMetrics),
metrics: &queryMetrics{m: make(map[string]*hostMetrics)},
spec: &NonSpeculativeExecution{},
}
// Initiate an empty context with a cancel call
@ -1469,15 +1512,17 @@ func (s *Session) NewBatch(typ BatchType) *Batch {
return batch
}
func (b *Batch) getHostMetrics(host *HostInfo) *queryMetrics {
hostMetrics, exists := b.metrics[host.ConnectAddress().String()]
func (b *Batch) getHostMetrics(host *HostInfo) *hostMetrics {
b.metrics.l.Lock()
metrics, exists := b.metrics.m[host.ConnectAddress().String()]
if !exists {
// if the host is not in the map, it means it's been accessed for the first time
hostMetrics = &queryMetrics{Attempts: 0, TotalLatency: 0}
b.metrics[host.ConnectAddress().String()] = hostMetrics
metrics = &hostMetrics{}
b.metrics.m[host.ConnectAddress().String()] = metrics
}
b.metrics.l.Unlock()
return hostMetrics
return metrics
}
// Observer enables batch-level observer on this batch.
@ -1493,18 +1538,33 @@ func (b *Batch) Keyspace() string {
// Attempts returns the number of attempts made to execute the batch.
func (b *Batch) Attempts() int {
attempts := 0
for _, metric := range b.metrics {
b.metrics.l.Lock()
defer b.metrics.l.Unlock()
var attempts int
for _, metric := range b.metrics.m {
attempts += metric.Attempts
}
return attempts
}
func (b *Batch) AddAttempts(i int, host *HostInfo) {
hostMetric := b.getHostMetrics(host)
b.metrics.l.Lock()
hostMetric.Attempts += i
b.metrics.l.Unlock()
}
//Latency returns the average number of nanoseconds to execute a single attempt of the batch.
func (b *Batch) Latency() int64 {
attempts := 0
var latency int64 = 0
for _, metric := range b.metrics {
b.metrics.l.Lock()
defer b.metrics.l.Unlock()
var (
attempts int
latency int64
)
for _, metric := range b.metrics.m {
attempts += metric.Attempts
latency += metric.TotalLatency
}
@ -1514,6 +1574,13 @@ func (b *Batch) Latency() int64 {
return 0
}
func (b *Batch) AddLatency(l int64, host *HostInfo) {
hostMetric := b.getHostMetrics(host)
b.metrics.l.Lock()
hostMetric.TotalLatency += l
b.metrics.l.Unlock()
}
// GetConsistency returns the currently configured consistency level for the batch
// operation.
func (b *Batch) GetConsistency() Consistency {
@ -1526,6 +1593,28 @@ func (b *Batch) SetConsistency(c Consistency) {
b.Cons = c
}
func (b *Batch) GetContext() context.Context {
return b.context
}
func (b *Batch) IsIdempotent() bool {
for _, entry := range b.Entries {
if !entry.Idempotent {
return false
}
}
return true
}
func (b *Batch) speculativeExecutionPolicy() SpeculativeExecutionPolicy {
return b.spec
}
func (b *Batch) SpeculativeExecutionPolicy(sp SpeculativeExecutionPolicy) *Batch {
b.spec = sp
return b
}
// Query adds the query to the batch operation
func (b *Batch) Query(stmt string, args ...interface{}) {
b.Entries = append(b.Entries, BatchEntry{Stmt: stmt, Args: args})
@ -1601,9 +1690,8 @@ func (b *Batch) WithTimestamp(timestamp int64) *Batch {
}
func (b *Batch) attempt(keyspace string, end, start time.Time, iter *Iter, host *HostInfo) {
hostMetrics := b.getHostMetrics(host)
hostMetrics.Attempts++
hostMetrics.TotalLatency += end.Sub(start).Nanoseconds()
b.AddAttempts(1, host)
b.AddLatency(end.Sub(start).Nanoseconds(), host)
if b.observer == nil {
return
@ -1621,7 +1709,7 @@ func (b *Batch) attempt(keyspace string, end, start time.Time, iter *Iter, host
End: end,
// Rows not used in batch observations // TODO - might be able to support it when using BatchCAS
Host: host,
Metrics: hostMetrics,
Metrics: b.getHostMetrics(host),
Err: iter.err,
})
}
@ -1640,9 +1728,10 @@ const (
)
type BatchEntry struct {
Stmt string
Args []interface{}
binding func(q *QueryInfo) ([]interface{}, error)
Stmt string
Args []interface{}
Idempotent bool
binding func(q *QueryInfo) ([]interface{}, error)
}
type ColumnInfo struct {
@ -1775,7 +1864,7 @@ type ObservedQuery struct {
Host *HostInfo
// The metrics per this host
Metrics *queryMetrics
Metrics *hostMetrics
// Err is the error in the query.
// It only tracks network errors or errors of bad cassandra syntax, in particular selects with no match return nil error
@ -1807,7 +1896,7 @@ type ObservedBatch struct {
Err error
// The metrics per this host
Metrics *queryMetrics
Metrics *hostMetrics
}
// BatchObserver is the interface implemented by batch observers / stat collectors.

View File

@ -96,6 +96,8 @@ func (e *Event) ParsePayload() (payload interface{}, err error) {
payload = &ReleaseEvent{}
case "RepositoryEvent":
payload = &RepositoryEvent{}
case "RepositoryVulnerabilityAlertEvent":
payload = &RepositoryVulnerabilityAlertEvent{}
case "StatusEvent":
payload = &StatusEvent{}
case "TeamEvent":

View File

@ -164,7 +164,7 @@ func (s *AppsService) ListUserInstallations(ctx context.Context, opt *ListOption
//
// GitHub API docs: https://developer.github.com/v3/apps/#create-a-new-installation-token
func (s *AppsService) CreateInstallationToken(ctx context.Context, id int64) (*InstallationToken, *Response, error) {
u := fmt.Sprintf("installations/%v/access_tokens", id)
u := fmt.Sprintf("app/installations/%v/access_tokens", id)
req, err := s.client.NewRequest("POST", u, nil)
if err != nil {

View File

@ -710,6 +710,27 @@ type RepositoryEvent struct {
Installation *Installation `json:"installation,omitempty"`
}
// RepositoryVulnerabilityAlertEvent is triggered when a security alert is created, dismissed, or resolved.
//
// GitHub API docs: https://developer.github.com/v3/activity/events/types/#repositoryvulnerabilityalertevent
type RepositoryVulnerabilityAlertEvent struct {
// Action is the action that was performed. This can be: "create", "dismiss", "resolve".
Action *string `json:"action,omitempty"`
//The security alert of the vulnerable dependency.
Alert *struct {
ID *int64 `json:"id,omitempty"`
AffectedRange *string `json:"affected_range,omitempty"`
AffectedPackageName *string `json:"affected_package_name,omitempty"`
ExternalReference *string `json:"external_reference,omitempty"`
ExternalIdentifier *string `json:"external_identifier,omitempty"`
FixedIn *string `json:"fixed_in,omitempty"`
Dismisser *User `json:"dismisser,omitempty"`
DismissReason *string `json:"dismiss_reason,omitempty"`
DismissedAt *Timestamp `json:"dismissed_at,omitempty"`
} `json:"alert,omitempty"`
}
// StatusEvent is triggered when the status of a Git commit changes.
// The Webhook event name is "status".
//

View File

@ -58,7 +58,7 @@ func (c CommitAuthor) String() string {
return Stringify(c)
}
// GetCommit fetchs the Commit object for a given SHA.
// GetCommit fetches the Commit object for a given SHA.
//
// GitHub API docs: https://developer.github.com/v3/git/commits/#get-a-commit
func (s *GitService) GetCommit(ctx context.Context, owner string, repo string, sha string) (*Commit, *Response, error) {

View File

@ -33,7 +33,7 @@ type createTagRequest struct {
Tagger *CommitAuthor `json:"tagger,omitempty"`
}
// GetTag fetchs a tag from a repo given a SHA.
// GetTag fetches a tag from a repo given a SHA.
//
// GitHub API docs: https://developer.github.com/v3/git/tags/#get-a-tag
func (s *GitService) GetTag(ctx context.Context, owner string, repo string, sha string) (*Tag, *Response, error) {

View File

@ -3140,14 +3140,6 @@ func (h *Hook) GetID() int64 {
return *h.ID
}
// GetName returns the Name field if it's non-nil, zero value otherwise.
func (h *Hook) GetName() string {
if h == nil || h.Name == nil {
return ""
}
return *h.Name
}
// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
func (h *Hook) GetUpdatedAt() time.Time {
if h == nil || h.UpdatedAt == nil {
@ -9964,6 +9956,14 @@ func (r *RepositoryTag) GetZipballURL() string {
return *r.ZipballURL
}
// GetAction returns the Action field if it's non-nil, zero value otherwise.
func (r *RepositoryVulnerabilityAlertEvent) GetAction() string {
if r == nil || r.Action == nil {
return ""
}
return *r.Action
}
// GetForkRepos returns the ForkRepos field if it's non-nil, zero value otherwise.
func (r *RepoStats) GetForkRepos() int {
if r == nil || r.ForkRepos == nil {

View File

@ -41,42 +41,43 @@ const (
var (
// eventTypeMapping maps webhooks types to their corresponding go-github struct types.
eventTypeMapping = map[string]string{
"check_run": "CheckRunEvent",
"check_suite": "CheckSuiteEvent",
"commit_comment": "CommitCommentEvent",
"create": "CreateEvent",
"delete": "DeleteEvent",
"deployment": "DeploymentEvent",
"deployment_status": "DeploymentStatusEvent",
"fork": "ForkEvent",
"gollum": "GollumEvent",
"installation": "InstallationEvent",
"installation_repositories": "InstallationRepositoriesEvent",
"issue_comment": "IssueCommentEvent",
"issues": "IssuesEvent",
"label": "LabelEvent",
"marketplace_purchase": "MarketplacePurchaseEvent",
"member": "MemberEvent",
"membership": "MembershipEvent",
"milestone": "MilestoneEvent",
"organization": "OrganizationEvent",
"org_block": "OrgBlockEvent",
"page_build": "PageBuildEvent",
"ping": "PingEvent",
"project": "ProjectEvent",
"project_card": "ProjectCardEvent",
"project_column": "ProjectColumnEvent",
"public": "PublicEvent",
"pull_request_review": "PullRequestReviewEvent",
"pull_request_review_comment": "PullRequestReviewCommentEvent",
"pull_request": "PullRequestEvent",
"push": "PushEvent",
"repository": "RepositoryEvent",
"release": "ReleaseEvent",
"status": "StatusEvent",
"team": "TeamEvent",
"team_add": "TeamAddEvent",
"watch": "WatchEvent",
"check_run": "CheckRunEvent",
"check_suite": "CheckSuiteEvent",
"commit_comment": "CommitCommentEvent",
"create": "CreateEvent",
"delete": "DeleteEvent",
"deployment": "DeploymentEvent",
"deployment_status": "DeploymentStatusEvent",
"fork": "ForkEvent",
"gollum": "GollumEvent",
"installation": "InstallationEvent",
"installation_repositories": "InstallationRepositoriesEvent",
"issue_comment": "IssueCommentEvent",
"issues": "IssuesEvent",
"label": "LabelEvent",
"marketplace_purchase": "MarketplacePurchaseEvent",
"member": "MemberEvent",
"membership": "MembershipEvent",
"milestone": "MilestoneEvent",
"organization": "OrganizationEvent",
"org_block": "OrgBlockEvent",
"page_build": "PageBuildEvent",
"ping": "PingEvent",
"project": "ProjectEvent",
"project_card": "ProjectCardEvent",
"project_column": "ProjectColumnEvent",
"public": "PublicEvent",
"pull_request_review": "PullRequestReviewEvent",
"pull_request_review_comment": "PullRequestReviewCommentEvent",
"pull_request": "PullRequestEvent",
"push": "PushEvent",
"repository": "RepositoryEvent",
"repository_vulnerability_alert": "RepositoryVulnerabilityAlertEvent",
"release": "ReleaseEvent",
"status": "StatusEvent",
"team": "TeamEvent",
"team_add": "TeamAddEvent",
"watch": "WatchEvent",
}
)

View File

@ -49,7 +49,7 @@ func (s *OrganizationsService) GetHook(ctx context.Context, org string, id int64
}
// CreateHook creates a Hook for the specified org.
// Name and Config are required fields.
// Config is a required field.
//
// Note that only a subset of the hook fields are used and hook must
// not be nil.
@ -59,7 +59,6 @@ func (s *OrganizationsService) CreateHook(ctx context.Context, org string, hook
u := fmt.Sprintf("orgs/%v/hooks", org)
hookReq := &createHookRequest{
Name: hook.Name,
Events: hook.Events,
Active: hook.Active,
Config: hook.Config,

View File

@ -60,6 +60,10 @@ type PullRequest struct {
NodeID *string `json:"node_id,omitempty"`
RequestedReviewers []*User `json:"requested_reviewers,omitempty"`
// RequestedTeams is populated as part of the PullRequestEvent.
// See, https://developer.github.com/v3/activity/events/types/#pullrequestevent for an example.
RequestedTeams []*Team `json:"requested_teams,omitempty"`
Links *PRLinks `json:"_links,omitempty"`
Head *PullRequestBranch `json:"head,omitempty"`
Base *PullRequestBranch `json:"base,omitempty"`

View File

@ -75,8 +75,7 @@ type Hook struct {
ID *int64 `json:"id,omitempty"`
// Only the following fields are used when creating a hook.
// Name and Config are required.
Name *string `json:"name,omitempty"`
// Config is required.
Config map[string]interface{} `json:"config,omitempty"`
Events []string `json:"events,omitempty"`
Active *bool `json:"active,omitempty"`
@ -92,16 +91,14 @@ func (h Hook) String() string {
// See https://github.com/google/go-github/issues/1015 for more
// information.
type createHookRequest struct {
// Name and Config are required.
// Name must be passed as "web".
Name *string `json:"name,omitempty"`
// Config is required.
Config map[string]interface{} `json:"config,omitempty"`
Events []string `json:"events,omitempty"`
Active *bool `json:"active,omitempty"`
}
// CreateHook creates a Hook for the specified repository.
// Name and Config are required fields.
// Config is a required field.
//
// Note that only a subset of the hook fields are used and hook must
// not be nil.
@ -111,7 +108,6 @@ func (s *RepositoriesService) CreateHook(ctx context.Context, owner, repo string
u := fmt.Sprintf("repos/%v/%v/hooks", owner, repo)
hookReq := &createHookRequest{
Name: hook.Name,
Events: hook.Events,
Active: hook.Active,
Config: hook.Config,

View File

@ -38,6 +38,18 @@ const (
ProxyExecModeScript ProxyExecMode = "script"
)
// UpstreamDestType is the type of upstream discovery mechanism.
type UpstreamDestType string
const (
// UpstreamDestTypeService discovers instances via healthy service lookup.
UpstreamDestTypeService UpstreamDestType = "service"
// UpstreamDestTypePreparedQuery discovers instances via prepared query
// execution.
UpstreamDestTypePreparedQuery UpstreamDestType = "prepared_query"
)
// AgentCheck represents a check known to the agent
type AgentCheck struct {
Node string
@ -59,7 +71,7 @@ type AgentWeights struct {
// AgentService represents a service known to the agent
type AgentService struct {
Kind ServiceKind
Kind ServiceKind `json:",omitempty"`
ID string
Service string
Tags []string
@ -68,24 +80,40 @@ type AgentService struct {
Address string
Weights AgentWeights
EnableTagOverride bool
CreateIndex uint64
ModifyIndex uint64
ProxyDestination string
Connect *AgentServiceConnect
CreateIndex uint64 `json:",omitempty"`
ModifyIndex uint64 `json:",omitempty"`
ContentHash string `json:",omitempty"`
// DEPRECATED (ProxyDestination) - remove this field
ProxyDestination string `json:",omitempty"`
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
Connect *AgentServiceConnect `json:",omitempty"`
}
// AgentServiceConnect represents the Connect configuration of a service.
type AgentServiceConnect struct {
Native bool
Proxy *AgentServiceConnectProxy
Native bool `json:",omitempty"`
Proxy *AgentServiceConnectProxy `json:",omitempty"`
SidecarService *AgentServiceRegistration `json:",omitempty"`
}
// AgentServiceConnectProxy represents the Connect Proxy configuration of a
// service.
type AgentServiceConnectProxy struct {
ExecMode ProxyExecMode
Command []string
Config map[string]interface{}
ExecMode ProxyExecMode `json:",omitempty"`
Command []string `json:",omitempty"`
Config map[string]interface{} `json:",omitempty"`
Upstreams []Upstream `json:",omitempty"`
}
// AgentServiceConnectProxyConfig is the proxy configuration in a connect-proxy
// ServiceDefinition or response.
type AgentServiceConnectProxyConfig struct {
DestinationServiceName string
DestinationServiceID string `json:",omitempty"`
LocalServiceAddress string `json:",omitempty"`
LocalServicePort int `json:",omitempty"`
Config map[string]interface{} `json:",omitempty"`
Upstreams []Upstream
}
// AgentMember represents a cluster member known to the agent
@ -129,8 +157,10 @@ type AgentServiceRegistration struct {
Weights *AgentWeights `json:",omitempty"`
Check *AgentServiceCheck
Checks AgentServiceChecks
ProxyDestination string `json:",omitempty"`
Connect *AgentServiceConnect `json:",omitempty"`
// DEPRECATED (ProxyDestination) - remove this field
ProxyDestination string `json:",omitempty"`
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
Connect *AgentServiceConnect `json:",omitempty"`
}
// AgentCheckRegistration is used to register a new check
@ -161,6 +191,8 @@ type AgentServiceCheck struct {
TLSSkipVerify bool `json:",omitempty"`
GRPC string `json:",omitempty"`
GRPCUseTLS bool `json:",omitempty"`
AliasNode string `json:",omitempty"`
AliasService string `json:",omitempty"`
// In Consul 0.7 and later, checks that are associated with a service
// may also contain this optional DeregisterCriticalServiceAfter field,
@ -233,9 +265,23 @@ type ConnectProxyConfig struct {
TargetServiceID string
TargetServiceName string
ContentHash string
ExecMode ProxyExecMode
Command []string
Config map[string]interface{}
// DEPRECATED(managed-proxies) - this struct is re-used for sidecar configs
// but they don't need ExecMode or Command
ExecMode ProxyExecMode `json:",omitempty"`
Command []string `json:",omitempty"`
Config map[string]interface{}
Upstreams []Upstream
}
// Upstream is the response structure for a proxy upstream configuration.
type Upstream struct {
DestinationType UpstreamDestType `json:",omitempty"`
DestinationNamespace string `json:",omitempty"`
DestinationName string
Datacenter string `json:",omitempty"`
LocalBindAddress string `json:",omitempty"`
LocalBindPort int `json:",omitempty"`
Config map[string]interface{} `json:",omitempty"`
}
// Agent can be used to query the Agent endpoints
@ -343,6 +389,33 @@ func (a *Agent) Services() (map[string]*AgentService, error) {
return out, nil
}
// Service returns a locally registered service instance and allows for
// hash-based blocking.
//
// Note that this uses an unconventional blocking mechanism since it's
// agent-local state. That means there is no persistent raft index so we block
// based on object hash instead.
func (a *Agent) Service(serviceID string, q *QueryOptions) (*AgentService, *QueryMeta, error) {
r := a.c.newRequest("GET", "/v1/agent/service/"+serviceID)
r.setQueryOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out *AgentService
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Members returns the known gossip members. The WAN
// flag can be used to query a server for WAN members.
func (a *Agent) Members(wan bool) ([]*AgentMember, error) {

View File

@ -61,6 +61,12 @@ const (
// HTTPSSLVerifyEnvName defines an environment variable name which sets
// whether or not to disable certificate checking.
HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY"
// GRPCAddrEnvName defines an environment variable name which sets the gRPC
// address for consul connect envoy. Note this isn't actually used by the api
// client in this package but is defined here for consistency with all the
// other ENV names we use.
GRPCAddrEnvName = "CONSUL_GRPC_ADDR"
)
// QueryOptions are used to parameterize a query
@ -78,6 +84,27 @@ type QueryOptions struct {
// read.
RequireConsistent bool
// UseCache requests that the agent cache results locally. See
// https://www.consul.io/api/index.html#agent-caching for more details on the
// semantics.
UseCache bool
// MaxAge limits how old a cached value will be returned if UseCache is true.
// If there is a cached response that is older than the MaxAge, it is treated
// as a cache miss and a new fetch invoked. If the fetch fails, the error is
// returned. Clients that wish to allow for stale results on error can set
// StaleIfError to a longer duration to change this behaviour. It is ignored
// if the endpoint supports background refresh caching. See
// https://www.consul.io/api/index.html#agent-caching for more details.
MaxAge time.Duration
// StaleIfError specifies how stale the client will accept a cached response
// if the servers are unavailable to fetch a fresh one. Only makes sense when
// UseCache is true and MaxAge is set to a lower, non-zero value. It is
// ignored if the endpoint supports background refresh caching. See
// https://www.consul.io/api/index.html#agent-caching for more details.
StaleIfError time.Duration
// WaitIndex is used to enable a blocking query. Waits
// until the timeout or the next index is reached
WaitIndex uint64
@ -196,6 +223,13 @@ type QueryMeta struct {
// Is address translation enabled for HTTP responses on this agent
AddressTranslationEnabled bool
// CacheHit is true if the result was served from agent-local cache.
CacheHit bool
// CacheAge is set if request was ?cached and indicates how stale the cached
// response is.
CacheAge time.Duration
}
// WriteMeta is used to return meta data about a write
@ -591,6 +625,20 @@ func (r *request) setQueryOptions(q *QueryOptions) {
if q.Connect {
r.params.Set("connect", "true")
}
if q.UseCache && !q.RequireConsistent {
r.params.Set("cached", "")
cc := []string{}
if q.MaxAge > 0 {
cc = append(cc, fmt.Sprintf("max-age=%.0f", q.MaxAge.Seconds()))
}
if q.StaleIfError > 0 {
cc = append(cc, fmt.Sprintf("stale-if-error=%.0f", q.StaleIfError.Seconds()))
}
if len(cc) > 0 {
r.header.Set("Cache-Control", strings.Join(cc, ", "))
}
}
r.ctx = q.ctx
}
@ -802,6 +850,18 @@ func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
q.AddressTranslationEnabled = false
}
// Parse Cache info
if cacheStr := header.Get("X-Cache"); cacheStr != "" {
q.CacheHit = strings.EqualFold(cacheStr, "HIT")
}
if ageStr := header.Get("Age"); ageStr != "" {
age, err := strconv.ParseUint(ageStr, 10, 64)
if err != nil {
return fmt.Errorf("Failed to parse Age Header: %v", err)
}
q.CacheAge = time.Duration(age) * time.Second
}
return nil
}

View File

@ -31,8 +31,11 @@ type CatalogService struct {
ServicePort int
ServiceWeights Weights
ServiceEnableTagOverride bool
CreateIndex uint64
ModifyIndex uint64
// DEPRECATED (ProxyDestination) - remove the next comment!
// We forgot to ever add ServiceProxyDestination here so no need to deprecate!
ServiceProxy *AgentServiceConnectProxyConfig
CreateIndex uint64
ModifyIndex uint64
}
type CatalogNode struct {
@ -162,23 +165,43 @@ func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, er
// Service is used to query catalog entries for a given service
func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
return c.service(service, tag, q, false)
var tags []string
if tag != "" {
tags = []string{tag}
}
return c.service(service, tags, q, false)
}
// Supports multiple tags for filtering
func (c *Catalog) ServiceMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
return c.service(service, tags, q, false)
}
// Connect is used to query catalog entries for a given Connect-enabled service
func (c *Catalog) Connect(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
return c.service(service, tag, q, true)
var tags []string
if tag != "" {
tags = []string{tag}
}
return c.service(service, tags, q, true)
}
func (c *Catalog) service(service, tag string, q *QueryOptions, connect bool) ([]*CatalogService, *QueryMeta, error) {
// Supports multiple tags for filtering
func (c *Catalog) ConnectMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
return c.service(service, tags, q, true)
}
func (c *Catalog) service(service string, tags []string, q *QueryOptions, connect bool) ([]*CatalogService, *QueryMeta, error) {
path := "/v1/catalog/service/" + service
if connect {
path = "/v1/catalog/connect/" + service
}
r := c.c.newRequest("GET", path)
r.setQueryOptions(q)
if tag != "" {
r.params.Set("tag", tag)
if len(tags) > 0 {
for _, tag := range tags {
r.params.Add("tag", tag)
}
}
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {

View File

@ -159,7 +159,15 @@ func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMe
// for a given service. It can optionally do server-side filtering on a tag
// or nodes with passing health checks only.
func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
return h.service(service, tag, passingOnly, q, false)
var tags []string
if tag != "" {
tags = []string{tag}
}
return h.service(service, tags, passingOnly, q, false)
}
func (h *Health) ServiceMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
return h.service(service, tags, passingOnly, q, false)
}
// Connect is equivalent to Service except that it will only return services
@ -168,18 +176,28 @@ func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions)
// passingOnly is true only instances where both the service and any proxy are
// healthy will be returned.
func (h *Health) Connect(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
return h.service(service, tag, passingOnly, q, true)
var tags []string
if tag != "" {
tags = []string{tag}
}
return h.service(service, tags, passingOnly, q, true)
}
func (h *Health) service(service, tag string, passingOnly bool, q *QueryOptions, connect bool) ([]*ServiceEntry, *QueryMeta, error) {
func (h *Health) ConnectMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
return h.service(service, tags, passingOnly, q, true)
}
func (h *Health) service(service string, tags []string, passingOnly bool, q *QueryOptions, connect bool) ([]*ServiceEntry, *QueryMeta, error) {
path := "/v1/health/service/" + service
if connect {
path = "/v1/health/connect/" + service
}
r := h.c.newRequest("GET", path)
r.setQueryOptions(q)
if tag != "" {
r.params.Set("tag", tag)
if len(tags) > 0 {
for _, tag := range tags {
r.params.Add("tag", tag)
}
}
if passingOnly {
r.params.Set(HealthPassing, "1")

View File

@ -15,7 +15,7 @@ var (
//
// Version must conform to the format expected by github.com/hashicorp/go-version
// for tests to work.
Version = "1.2.3"
Version = "1.3.0"
// A pre-release marker for the version. If this is "" (empty string)
// then it means that it is a final release. Otherwise, this is a pre-release

View File

@ -77,6 +77,7 @@ type Allocation struct {
TaskGroup string
Resources *Resources
TaskResources map[string]*Resources
AllocatedResources *AllocatedResources
Services map[string]string
Metrics *AllocationMetric
DesiredStatus string
@ -158,6 +159,29 @@ type AllocDeploymentStatus struct {
ModifyIndex uint64
}
type AllocatedResources struct {
Tasks map[string]*AllocatedTaskResources
Shared AllocatedSharedResources
}
type AllocatedTaskResources struct {
Cpu AllocatedCpuResources
Memory AllocatedMemoryResources
Networks []*NetworkResource
}
type AllocatedSharedResources struct {
DiskMB uint64
}
type AllocatedCpuResources struct {
CpuShares uint64
}
type AllocatedMemoryResources struct {
MemoryMB uint64
}
// AllocIndexSort reverse sorts allocs by CreateIndex.
type AllocIndexSort []*AllocationListStub

View File

@ -446,6 +446,8 @@ type Node struct {
Attributes map[string]string
Resources *Resources
Reserved *Resources
NodeResources *NodeResources
ReservedResources *NodeReservedResources
Links map[string]string
Meta map[string]string
NodeClass string
@ -461,6 +463,48 @@ type Node struct {
ModifyIndex uint64
}
type NodeResources struct {
Cpu NodeCpuResources
Memory NodeMemoryResources
Disk NodeDiskResources
Networks []*NetworkResource
}
type NodeCpuResources struct {
TotalShares uint64
}
type NodeMemoryResources struct {
MemoryMB uint64
}
type NodeDiskResources struct {
DiskMB uint64
}
type NodeReservedResources struct {
Cpu NodeReservedCpuResources
Memory NodeReservedMemoryResources
Disk NodeReservedDiskResources
Networks NodeReservedNetworkResources
}
type NodeReservedCpuResources struct {
TotalShares uint64
}
type NodeReservedMemoryResources struct {
MemoryMB uint64
}
type NodeReservedDiskResources struct {
DiskMB uint64
}
type NodeReservedNetworkResources struct {
ReservedHostPorts string
}
// DrainStrategy describes a Node's drain behavior.
type DrainStrategy struct {
// DrainSpec is the user declared drain specification

View File

@ -10,6 +10,7 @@ type Resources struct {
DiskMB *int `mapstructure:"disk"`
IOPS *int
Networks []*NetworkResource
Devices []*RequestedDevice
}
// Canonicalize will supply missing values in the cases
@ -28,6 +29,9 @@ func (r *Resources) Canonicalize() {
for _, n := range r.Networks {
n.Canonicalize()
}
for _, d := range r.Devices {
d.Canonicalize()
}
}
// DefaultResources is a small resources object that contains the
@ -75,6 +79,9 @@ func (r *Resources) Merge(other *Resources) {
if len(other.Networks) != 0 {
r.Networks = other.Networks
}
if len(other.Devices) != 0 {
r.Devices = other.Devices
}
}
type Port struct {
@ -98,3 +105,34 @@ func (n *NetworkResource) Canonicalize() {
n.MBits = helper.IntToPtr(10)
}
}
// RequestedDevice is used to request a device for a task.
type RequestedDevice struct {
// Name is the request name. The possible values are as follows:
// * <type>: A single value only specifies the type of request.
// * <vendor>/<type>: A single slash delimiter assumes the vendor and type of device is specified.
// * <vendor>/<type>/<name>: Two slash delimiters assume vendor, type and specific model are specified.
//
// Examples are as follows:
// * "gpu"
// * "nvidia/gpu"
// * "nvidia/gpu/GTX2080Ti"
Name string
// Count is the number of requested devices
Count *uint64
// Constraints are a set of constraints to apply when selecting the device
// to use.
Constraints []*Constraint
// Affinities are a set of affinites to apply when selecting the device
// to use.
Affinities []*Affinity
}
func (d *RequestedDevice) Canonicalize() {
if d.Count == nil {
d.Count = helper.Uint64ToPtr(1)
}
}

View File

@ -57,11 +57,16 @@ func Int64ToPtr(i int64) *int64 {
return &i
}
// UintToPtr returns the pointer to an uint
// Uint64ToPtr returns the pointer to an uint64
func Uint64ToPtr(u uint64) *uint64 {
return &u
}
// UintToPtr returns the pointer to an uint
func UintToPtr(u uint) *uint {
return &u
}
// StringToPtr returns the pointer to a string
func StringToPtr(str string) *string {
return &str
@ -72,6 +77,11 @@ func TimeToPtr(t time.Duration) *time.Duration {
return &t
}
// Float64ToPtr returns the pointer to an float64
func Float64ToPtr(f float64) *float64 {
return &f
}
func IntMin(a, b int) int {
if a < b {
return a

View File

@ -840,6 +840,11 @@ func (r *Resources) Diff(other *Resources, contextual bool) *ObjectDiff {
diff.Objects = append(diff.Objects, nDiffs...)
}
// Requested Devices diff
if nDiffs := requestedDevicesDiffs(r.Devices, other.Devices, contextual); nDiffs != nil {
diff.Objects = append(diff.Objects, nDiffs...)
}
return diff
}
@ -975,6 +980,67 @@ func portDiffs(old, new []Port, dynamic bool, contextual bool) []*ObjectDiff {
}
// Diff returns a diff of two requested devices. If contextual diff is enabled,
// non-changed fields will still be returned.
func (r *RequestedDevice) Diff(other *RequestedDevice, contextual bool) *ObjectDiff {
diff := &ObjectDiff{Type: DiffTypeNone, Name: "Device"}
var oldPrimitiveFlat, newPrimitiveFlat map[string]string
if reflect.DeepEqual(r, other) {
return nil
} else if r == nil {
diff.Type = DiffTypeAdded
newPrimitiveFlat = flatmap.Flatten(other, nil, true)
} else if other == nil {
diff.Type = DiffTypeDeleted
oldPrimitiveFlat = flatmap.Flatten(r, nil, true)
} else {
diff.Type = DiffTypeEdited
oldPrimitiveFlat = flatmap.Flatten(r, nil, true)
newPrimitiveFlat = flatmap.Flatten(other, nil, true)
}
// Diff the primitive fields.
diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual)
return diff
}
// requestedDevicesDiffs diffs a set of RequestedDevices. If contextual diff is enabled,
// non-changed fields will still be returned.
func requestedDevicesDiffs(old, new []*RequestedDevice, contextual bool) []*ObjectDiff {
makeSet := func(devices []*RequestedDevice) map[string]*RequestedDevice {
deviceMap := make(map[string]*RequestedDevice, len(devices))
for _, d := range devices {
deviceMap[d.Name] = d
}
return deviceMap
}
oldSet := makeSet(old)
newSet := makeSet(new)
var diffs []*ObjectDiff
for k, oldV := range oldSet {
newV := newSet[k]
if diff := oldV.Diff(newV, contextual); diff != nil {
diffs = append(diffs, diff)
}
}
for k, newV := range newSet {
if oldV, ok := oldSet[k]; !ok {
if diff := oldV.Diff(newV, contextual); diff != nil {
diffs = append(diffs, diff)
}
}
}
sort.Sort(ObjectDiffs(diffs))
return diffs
}
// configDiff returns the diff of two Task Config objects. If contextual diff is
// enabled, all fields will be returned, even if no diff occurred.
func configDiff(old, new map[string]interface{}, contextual bool) *ObjectDiff {

View File

@ -7,6 +7,7 @@ import (
"fmt"
"math"
"sort"
"strconv"
"strings"
"golang.org/x/crypto/blake2b"
@ -98,16 +99,12 @@ func FilterTerminalAllocs(allocs []*Allocation) ([]*Allocation, map[string]*Allo
// The netIdx can optionally be provided if its already been computed.
// If the netIdx is provided, it is assumed that the client has already
// ensured there are no collisions.
func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, string, *Resources, error) {
func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, string, *ComparableResources, error) {
// Compute the utilization from zero
used := new(Resources)
used := new(ComparableResources)
// Add the reserved resources of the node
if node.Reserved != nil {
if err := used.Add(node.Reserved); err != nil {
return false, "", nil, err
}
}
used.Add(node.ComparableReservedResources())
// For each alloc, add the resources
for _, alloc := range allocs {
@ -116,32 +113,12 @@ func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, st
continue
}
if alloc.Resources != nil {
if err := used.Add(alloc.Resources); err != nil {
return false, "", nil, err
}
} else if alloc.TaskResources != nil {
// Adding the shared resource asks for the allocation to the used
// resources
if err := used.Add(alloc.SharedResources); err != nil {
return false, "", nil, err
}
// Allocations within the plan have the combined resources stripped
// to save space, so sum up the individual task resources.
for _, taskResource := range alloc.TaskResources {
if err := used.Add(taskResource); err != nil {
return false, "", nil, err
}
}
} else {
return false, "", nil, fmt.Errorf("allocation %q has no resources set", alloc.ID)
}
used.Add(alloc.ComparableResources())
}
// Check that the node resources are a super set of those
// that are being allocated
if superset, dimension := node.Resources.Superset(used); !superset {
if superset, dimension := node.ComparableResources().Superset(used); !superset {
return false, dimension, used, nil
}
@ -166,20 +143,22 @@ func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, st
// ScoreFit is used to score the fit based on the Google work published here:
// http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt
// This is equivalent to their BestFit v3
func ScoreFit(node *Node, util *Resources) float64 {
func ScoreFit(node *Node, util *ComparableResources) float64 {
// COMPAT(0.11): Remove in 0.11
reserved := node.ComparableReservedResources()
res := node.ComparableResources()
// Determine the node availability
nodeCpu := float64(node.Resources.CPU)
if node.Reserved != nil {
nodeCpu -= float64(node.Reserved.CPU)
}
nodeMem := float64(node.Resources.MemoryMB)
if node.Reserved != nil {
nodeMem -= float64(node.Reserved.MemoryMB)
nodeCpu := float64(res.Flattened.Cpu.CpuShares)
nodeMem := float64(res.Flattened.Memory.MemoryMB)
if reserved != nil {
nodeCpu -= float64(reserved.Flattened.Cpu.CpuShares)
nodeMem -= float64(reserved.Flattened.Memory.MemoryMB)
}
// Compute the free percentage
freePctCpu := 1 - (float64(util.CPU) / nodeCpu)
freePctRam := 1 - (float64(util.MemoryMB) / nodeMem)
freePctCpu := 1 - (float64(util.Flattened.Cpu.CpuShares) / nodeCpu)
freePctRam := 1 - (float64(util.Flattened.Memory.MemoryMB) / nodeMem)
// Total will be "maximized" the smaller the value is.
// At 100% utilization, the total is 2, while at 0% util it is 20.
@ -378,3 +357,67 @@ func CompareMigrateToken(allocID, nodeSecretID, otherMigrateToken string) bool {
}
return subtle.ConstantTimeCompare(h.Sum(nil), otherBytes) == 1
}
// ParsePortRanges parses the passed port range string and returns a list of the
// ports. The specification is a comma separated list of either port numbers or
// port ranges. A port number is a single integer and a port range is two
// integers separated by a hyphen. As an example the following spec would
// convert to: ParsePortRanges("10,12-14,16") -> []uint64{10, 12, 13, 14, 16}
func ParsePortRanges(spec string) ([]uint64, error) {
parts := strings.Split(spec, ",")
// Hot path the empty case
if len(parts) == 1 && parts[0] == "" {
return nil, nil
}
ports := make(map[uint64]struct{})
for _, part := range parts {
part = strings.TrimSpace(part)
rangeParts := strings.Split(part, "-")
l := len(rangeParts)
switch l {
case 1:
if val := rangeParts[0]; val == "" {
return nil, fmt.Errorf("can't specify empty port")
} else {
port, err := strconv.ParseUint(val, 10, 0)
if err != nil {
return nil, err
}
ports[port] = struct{}{}
}
case 2:
// We are parsing a range
start, err := strconv.ParseUint(rangeParts[0], 10, 0)
if err != nil {
return nil, err
}
end, err := strconv.ParseUint(rangeParts[1], 10, 0)
if err != nil {
return nil, err
}
if end < start {
return nil, fmt.Errorf("invalid range: starting value (%v) less than ending (%v) value", end, start)
}
for i := start; i <= end; i++ {
ports[i] = struct{}{}
}
default:
return nil, fmt.Errorf("can only parse single port numbers or port ranges (ex. 80,100-120,150)")
}
}
var results []uint64
for port := range ports {
results = append(results, port)
}
sort.Slice(results, func(i, j int) bool {
return results[i] < results[j]
})
return results, nil
}

View File

@ -70,22 +70,36 @@ func (idx *NetworkIndex) Overcommitted() bool {
// SetNode is used to setup the available network resources. Returns
// true if there is a collision
func (idx *NetworkIndex) SetNode(node *Node) (collide bool) {
// COMPAT(0.11): Remove in 0.11
// Grab the network resources, handling both new and old
var networks []*NetworkResource
if node.NodeResources != nil && len(node.NodeResources.Networks) != 0 {
networks = node.NodeResources.Networks
} else if node.Resources != nil {
networks = node.Resources.Networks
}
// Add the available CIDR blocks
for _, n := range node.Resources.Networks {
for _, n := range networks {
if n.Device != "" {
idx.AvailNetworks = append(idx.AvailNetworks, n)
idx.AvailBandwidth[n.Device] = n.MBits
}
}
// Add the reserved resources
if r := node.Reserved; r != nil {
for _, n := range r.Networks {
// COMPAT(0.11): Remove in 0.11
// Handle reserving ports, handling both new and old
if node.ReservedResources != nil && node.ReservedResources.Networks.ReservedHostPorts != "" {
collide = idx.AddReservedPortRange(node.ReservedResources.Networks.ReservedHostPorts)
} else if node.Reserved != nil {
for _, n := range node.Reserved.Networks {
if idx.AddReserved(n) {
collide = true
}
}
}
return
}
@ -93,13 +107,31 @@ func (idx *NetworkIndex) SetNode(node *Node) (collide bool) {
// true if there is a collision
func (idx *NetworkIndex) AddAllocs(allocs []*Allocation) (collide bool) {
for _, alloc := range allocs {
for _, task := range alloc.TaskResources {
if len(task.Networks) == 0 {
continue
// Do not consider the resource impact of terminal allocations
if alloc.TerminalStatus() {
continue
}
if alloc.AllocatedResources != nil {
for _, task := range alloc.AllocatedResources.Tasks {
if len(task.Networks) == 0 {
continue
}
n := task.Networks[0]
if idx.AddReserved(n) {
collide = true
}
}
n := task.Networks[0]
if idx.AddReserved(n) {
collide = true
} else {
// COMPAT(0.11): Remove in 0.11
for _, task := range alloc.TaskResources {
if len(task.Networks) == 0 {
continue
}
n := task.Networks[0]
if idx.AddReserved(n) {
collide = true
}
}
}
}
@ -142,6 +174,49 @@ func (idx *NetworkIndex) AddReserved(n *NetworkResource) (collide bool) {
return
}
// AddReservedPortRange marks the ports given as reserved on all network
// interfaces. The port format is comma delimited, with spans given as n1-n2
// (80,100-200,205)
func (idx *NetworkIndex) AddReservedPortRange(ports string) (collide bool) {
// Convert the ports into a slice of ints
resPorts, err := ParsePortRanges(ports)
if err != nil {
return
}
// Ensure we create a bitmap for each available network
for _, n := range idx.AvailNetworks {
used := idx.UsedPorts[n.IP]
if used == nil {
// Try to get a bitmap from the pool, else create
raw := bitmapPool.Get()
if raw != nil {
used = raw.(Bitmap)
used.Clear()
} else {
used, _ = NewBitmap(maxValidPort)
}
idx.UsedPorts[n.IP] = used
}
}
for _, used := range idx.UsedPorts {
for _, port := range resPorts {
// Guard against invalid port
if port < 0 || port >= maxValidPort {
return true
}
if used.Check(uint(port)) {
collide = true
} else {
used.Set(uint(port))
}
}
}
return
}
// yieldIP is used to iteratively invoke the callback with
// an available IP
func (idx *NetworkIndex) yieldIP(cb func(net *NetworkResource, ip net.IP) bool) {

View File

@ -1425,6 +1425,13 @@ type Node struct {
// "docker.runtime=1.8.3"
Attributes map[string]string
// NodeResources captures the available resources on the client.
NodeResources *NodeResources
// ReservedResources captures the set resources on the client that are
// reserved from scheduling.
ReservedResources *NodeReservedResources
// Resources is the available resources on the client.
// For example 'cpu=2' 'memory=2048'
Resources *Resources
@ -1522,6 +1529,8 @@ func (n *Node) Copy() *Node {
nn.Attributes = helper.CopyMapStringString(nn.Attributes)
nn.Resources = nn.Resources.Copy()
nn.Reserved = nn.Reserved.Copy()
nn.NodeResources = nn.NodeResources.Copy()
nn.ReservedResources = nn.ReservedResources.Copy()
nn.Links = helper.CopyMapStringString(nn.Links)
nn.Meta = helper.CopyMapStringString(nn.Meta)
nn.Events = copyNodeEvents(n.Events)
@ -1569,6 +1578,64 @@ func (n *Node) TerminalStatus() bool {
}
}
// COMPAT(0.11): Remove in 0.11
// ComparableReservedResources returns the reserved resouces on the node
// handling upgrade paths. Reserved networks must be handled separately. After
// 0.11 calls to this should be replaced with:
// node.ReservedResources.Comparable()
func (n *Node) ComparableReservedResources() *ComparableResources {
// See if we can no-op
if n.Reserved == nil && n.ReservedResources == nil {
return nil
}
// Node already has 0.9+ behavior
if n.ReservedResources != nil {
return n.ReservedResources.Comparable()
}
// Upgrade path
return &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: uint64(n.Reserved.CPU),
},
Memory: AllocatedMemoryResources{
MemoryMB: uint64(n.Reserved.MemoryMB),
},
},
Shared: AllocatedSharedResources{
DiskMB: uint64(n.Reserved.DiskMB),
},
}
}
// COMPAT(0.11): Remove in 0.11
// ComparableResources returns the resouces on the node
// handling upgrade paths. Networking must be handled separately. After 0.11
// calls to this should be replaced with: node.NodeResources.Comparable()
func (n *Node) ComparableResources() *ComparableResources {
// Node already has 0.9+ behavior
if n.NodeResources != nil {
return n.NodeResources.Comparable()
}
// Upgrade path
return &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: uint64(n.Resources.CPU),
},
Memory: AllocatedMemoryResources{
MemoryMB: uint64(n.Resources.MemoryMB),
},
},
Shared: AllocatedSharedResources{
DiskMB: uint64(n.Resources.DiskMB),
},
}
}
// Stub returns a summarized version of the node
func (n *Node) Stub() *NodeListStub {
@ -1609,26 +1676,6 @@ type NodeListStub struct {
ModifyIndex uint64
}
// Networks defined for a task on the Resources struct.
type Networks []*NetworkResource
// Port assignment and IP for the given label or empty values.
func (ns Networks) Port(label string) (string, int) {
for _, n := range ns {
for _, p := range n.ReservedPorts {
if p.Label == label {
return n.IP, p.Value
}
}
for _, p := range n.DynamicPorts {
if p.Label == label {
return n.IP, p.Value
}
}
}
return "", 0
}
// Resources is used to define the resources available
// on a client
type Resources struct {
@ -1637,6 +1684,7 @@ type Resources struct {
DiskMB int
IOPS int
Networks Networks
Devices []*RequestedDevice
}
const (
@ -1690,6 +1738,9 @@ func (r *Resources) Merge(other *Resources) {
if len(other.Networks) != 0 {
r.Networks = other.Networks
}
if len(other.Devices) != 0 {
r.Devices = other.Devices
}
}
func (r *Resources) Canonicalize() {
@ -1698,6 +1749,9 @@ func (r *Resources) Canonicalize() {
if len(r.Networks) == 0 {
r.Networks = nil
}
if len(r.Devices) == 0 {
r.Devices = nil
}
for _, n := range r.Networks {
n.Canonicalize()
@ -1735,6 +1789,8 @@ func (r *Resources) Copy() *Resources {
}
newR := new(Resources)
*newR = *r
// Copy the network objects
if r.Networks != nil {
n := len(r.Networks)
newR.Networks = make([]*NetworkResource, n)
@ -1742,17 +1798,22 @@ func (r *Resources) Copy() *Resources {
newR.Networks[i] = r.Networks[i].Copy()
}
}
// Copy the devices
if r.Devices != nil {
n := len(r.Devices)
newR.Devices = make([]*RequestedDevice, n)
for i := 0; i < n; i++ {
newR.Devices[i] = r.Devices[i].Copy()
}
}
return newR
}
// NetIndex finds the matching net index using device name
func (r *Resources) NetIndex(n *NetworkResource) int {
for idx, net := range r.Networks {
if net.Device == n.Device {
return idx
}
}
return -1
return r.Networks.NetIndex(n)
}
// Superset checks if one set of resources is a superset
@ -1927,6 +1988,511 @@ func (n *NetworkResource) PortLabels() map[string]int {
return labelValues
}
// Networks defined for a task on the Resources struct.
type Networks []*NetworkResource
// Port assignment and IP for the given label or empty values.
func (ns Networks) Port(label string) (string, int) {
for _, n := range ns {
for _, p := range n.ReservedPorts {
if p.Label == label {
return n.IP, p.Value
}
}
for _, p := range n.DynamicPorts {
if p.Label == label {
return n.IP, p.Value
}
}
}
return "", 0
}
func (ns Networks) NetIndex(n *NetworkResource) int {
for idx, net := range ns {
if net.Device == n.Device {
return idx
}
}
return -1
}
// RequestedDevice is used to request a device for a task.
type RequestedDevice struct {
// Name is the request name. The possible values are as follows:
// * <type>: A single value only specifies the type of request.
// * <vendor>/<type>: A single slash delimiter assumes the vendor and type of device is specified.
// * <vendor>/<type>/<name>: Two slash delimiters assume vendor, type and specific model are specified.
//
// Examples are as follows:
// * "gpu"
// * "nvidia/gpu"
// * "nvidia/gpu/GTX2080Ti"
Name string
// Count is the number of requested devices
Count uint64
// TODO validate
// Constraints are a set of constraints to apply when selecting the device
// to use.
Constraints []*Constraint
// Affinities are a set of affinites to apply when selecting the device
// to use.
Affinities []*Affinity
}
func (r *RequestedDevice) Copy() *RequestedDevice {
if r == nil {
return nil
}
nr := *r
nr.Constraints = CopySliceConstraints(nr.Constraints)
nr.Affinities = CopySliceAffinities(nr.Affinities)
return &nr
}
// NodeResources is used to define the resources available on a client node.
type NodeResources struct {
Cpu NodeCpuResources
Memory NodeMemoryResources
Disk NodeDiskResources
Networks Networks
}
func (n *NodeResources) Copy() *NodeResources {
if n == nil {
return nil
}
newN := new(NodeResources)
*newN = *n
if n.Networks != nil {
networks := len(n.Networks)
newN.Networks = make([]*NetworkResource, networks)
for i := 0; i < networks; i++ {
newN.Networks[i] = n.Networks[i].Copy()
}
}
return newN
}
// Comparable returns a comparable version of the nodes resources. This
// conversion can be lossy so care must be taken when using it.
func (n *NodeResources) Comparable() *ComparableResources {
if n == nil {
return nil
}
c := &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: n.Cpu.CpuShares,
},
Memory: AllocatedMemoryResources{
MemoryMB: n.Memory.MemoryMB,
},
Networks: n.Networks,
},
Shared: AllocatedSharedResources{
DiskMB: n.Disk.DiskMB,
},
}
return c
}
func (n *NodeResources) Merge(o *NodeResources) {
if o == nil {
return
}
n.Cpu.Merge(&o.Cpu)
n.Memory.Merge(&o.Memory)
n.Disk.Merge(&o.Disk)
if len(o.Networks) != 0 {
n.Networks = o.Networks
}
}
func (n *NodeResources) Equals(o *NodeResources) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if !n.Cpu.Equals(&o.Cpu) {
return false
}
if !n.Memory.Equals(&o.Memory) {
return false
}
if !n.Disk.Equals(&o.Disk) {
return false
}
if len(n.Networks) != len(o.Networks) {
return false
}
for i, n := range n.Networks {
if !n.Equals(o.Networks[i]) {
return false
}
}
return true
}
// NodeCpuResources captures the CPU resources of the node.
type NodeCpuResources struct {
// CpuShares is the CPU shares available. This is calculated by number of
// cores multiplied by the core frequency.
CpuShares uint64
}
func (n *NodeCpuResources) Merge(o *NodeCpuResources) {
if o == nil {
return
}
if o.CpuShares != 0 {
n.CpuShares = o.CpuShares
}
}
func (n *NodeCpuResources) Equals(o *NodeCpuResources) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if n.CpuShares != o.CpuShares {
return false
}
return true
}
// NodeMemoryResources captures the memory resources of the node
type NodeMemoryResources struct {
// MemoryMB is the total available memory on the node
MemoryMB uint64
}
func (n *NodeMemoryResources) Merge(o *NodeMemoryResources) {
if o == nil {
return
}
if o.MemoryMB != 0 {
n.MemoryMB = o.MemoryMB
}
}
func (n *NodeMemoryResources) Equals(o *NodeMemoryResources) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if n.MemoryMB != o.MemoryMB {
return false
}
return true
}
// NodeDiskResources captures the disk resources of the node
type NodeDiskResources struct {
// DiskMB is the total available disk space on the node
DiskMB uint64
}
func (n *NodeDiskResources) Merge(o *NodeDiskResources) {
if o == nil {
return
}
if o.DiskMB != 0 {
n.DiskMB = o.DiskMB
}
}
func (n *NodeDiskResources) Equals(o *NodeDiskResources) bool {
if o == nil && n == nil {
return true
} else if o == nil {
return false
} else if n == nil {
return false
}
if n.DiskMB != o.DiskMB {
return false
}
return true
}
// NodeReservedResources is used to capture the resources on a client node that
// should be reserved and not made available to jobs.
type NodeReservedResources struct {
Cpu NodeReservedCpuResources
Memory NodeReservedMemoryResources
Disk NodeReservedDiskResources
Networks NodeReservedNetworkResources
}
func (n *NodeReservedResources) Copy() *NodeReservedResources {
if n == nil {
return nil
}
newN := new(NodeReservedResources)
*newN = *n
return newN
}
// Comparable returns a comparable version of the node's reserved resources. The
// returned resources doesn't contain any network information. This conversion
// can be lossy so care must be taken when using it.
func (n *NodeReservedResources) Comparable() *ComparableResources {
if n == nil {
return nil
}
c := &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: n.Cpu.CpuShares,
},
Memory: AllocatedMemoryResources{
MemoryMB: n.Memory.MemoryMB,
},
},
Shared: AllocatedSharedResources{
DiskMB: n.Disk.DiskMB,
},
}
return c
}
// NodeReservedCpuResources captures the reserved CPU resources of the node.
type NodeReservedCpuResources struct {
CpuShares uint64
}
// NodeReservedMemoryResources captures the reserved memory resources of the node.
type NodeReservedMemoryResources struct {
MemoryMB uint64
}
// NodeReservedDiskResources captures the reserved disk resources of the node.
type NodeReservedDiskResources struct {
DiskMB uint64
}
// NodeReservedNetworkResources captures the reserved network resources of the node.
type NodeReservedNetworkResources struct {
// ReservedHostPorts is the set of ports reserved on all host network
// interfaces. Its format is a comma separate list of integers or integer
// ranges. (80,443,1000-2000,2005)
ReservedHostPorts string
}
// ParsePortHostPorts returns the reserved host ports.
func (n *NodeReservedNetworkResources) ParseReservedHostPorts() ([]uint64, error) {
return ParsePortRanges(n.ReservedHostPorts)
}
// AllocatedResources is the set of resources to be used by an allocation.
type AllocatedResources struct {
// Tasks is a mapping of task name to the resources for the task.
Tasks map[string]*AllocatedTaskResources
// Shared is the set of resource that are shared by all tasks in the group.
Shared AllocatedSharedResources
}
func (a *AllocatedResources) Copy() *AllocatedResources {
if a == nil {
return nil
}
newA := new(AllocatedResources)
*newA = *a
if a.Tasks != nil {
tr := make(map[string]*AllocatedTaskResources, len(newA.Tasks))
for task, resource := range newA.Tasks {
tr[task] = resource.Copy()
}
newA.Tasks = tr
}
return newA
}
// Comparable returns a comparable version of the allocations allocated
// resources. This conversion can be lossy so care must be taken when using it.
func (a *AllocatedResources) Comparable() *ComparableResources {
if a == nil {
return nil
}
c := &ComparableResources{
Shared: a.Shared,
}
for _, r := range a.Tasks {
c.Flattened.Add(r)
}
return c
}
// OldTaskResources returns the pre-0.9.0 map of task resources
func (a *AllocatedResources) OldTaskResources() map[string]*Resources {
m := make(map[string]*Resources, len(a.Tasks))
for name, res := range a.Tasks {
m[name] = &Resources{
CPU: int(res.Cpu.CpuShares),
MemoryMB: int(res.Memory.MemoryMB),
Networks: res.Networks,
}
}
return m
}
// AllocatedTaskResources are the set of resources allocated to a task.
type AllocatedTaskResources struct {
Cpu AllocatedCpuResources
Memory AllocatedMemoryResources
Networks Networks
}
func (a *AllocatedTaskResources) Copy() *AllocatedTaskResources {
if a == nil {
return nil
}
newA := new(AllocatedTaskResources)
*newA = *a
if a.Networks != nil {
n := len(a.Networks)
newA.Networks = make([]*NetworkResource, n)
for i := 0; i < n; i++ {
newA.Networks[i] = a.Networks[i].Copy()
}
}
return newA
}
// NetIndex finds the matching net index using device name
func (a *AllocatedTaskResources) NetIndex(n *NetworkResource) int {
return a.Networks.NetIndex(n)
}
func (a *AllocatedTaskResources) Add(delta *AllocatedTaskResources) {
if delta == nil {
return
}
a.Cpu.Add(&delta.Cpu)
a.Memory.Add(&delta.Memory)
for _, n := range delta.Networks {
// Find the matching interface by IP or CIDR
idx := a.NetIndex(n)
if idx == -1 {
a.Networks = append(a.Networks, n.Copy())
} else {
a.Networks[idx].Add(n)
}
}
}
// AllocatedSharedResources are the set of resources allocated to a task group.
type AllocatedSharedResources struct {
DiskMB uint64
}
func (a *AllocatedSharedResources) Add(delta *AllocatedSharedResources) {
if delta == nil {
return
}
a.DiskMB += delta.DiskMB
}
// AllocatedCpuResources captures the allocated CPU resources.
type AllocatedCpuResources struct {
CpuShares uint64
}
func (a *AllocatedCpuResources) Add(delta *AllocatedCpuResources) {
if delta == nil {
return
}
a.CpuShares += delta.CpuShares
}
// AllocatedMemoryResources captures the allocated memory resources.
type AllocatedMemoryResources struct {
MemoryMB uint64
}
func (a *AllocatedMemoryResources) Add(delta *AllocatedMemoryResources) {
if delta == nil {
return
}
a.MemoryMB += delta.MemoryMB
}
// ComparableResources is the set of resources allocated to a task group but
// not keyed by Task, making it easier to compare.
type ComparableResources struct {
Flattened AllocatedTaskResources
Shared AllocatedSharedResources
}
func (c *ComparableResources) Add(delta *ComparableResources) {
if delta == nil {
return
}
c.Flattened.Add(&delta.Flattened)
c.Shared.Add(&delta.Shared)
}
// Superset checks if one set of resources is a superset of another. This
// ignores network resources, and the NetworkIndex should be used for that.
func (c *ComparableResources) Superset(other *ComparableResources) (bool, string) {
if c.Flattened.Cpu.CpuShares < other.Flattened.Cpu.CpuShares {
return false, "cpu"
}
if c.Flattened.Memory.MemoryMB < other.Flattened.Memory.MemoryMB {
return false, "memory"
}
if c.Shared.DiskMB < other.Shared.DiskMB {
return false, "disk"
}
return true, ""
}
// allocated finds the matching net index using device name
func (c *ComparableResources) NetIndex(n *NetworkResource) int {
return c.Flattened.Networks.NetIndex(n)
}
const (
// JobTypeNomad is reserved for internal system tasks and is
// always handled by the CoreScheduler.
@ -3427,19 +3993,6 @@ func (tg *TaskGroup) Canonicalize(job *Job) {
for _, task := range tg.Tasks {
task.Canonicalize(job, tg)
}
// Add up the disk resources to EphemeralDisk. This is done so that users
// are not required to move their disk attribute from resources to
// EphemeralDisk section of the job spec in Nomad 0.5
// COMPAT 0.4.1 -> 0.5
// Remove in 0.6
var diskMB int
for _, task := range tg.Tasks {
diskMB += task.Resources.DiskMB
}
if diskMB > 0 {
tg.EphemeralDisk.SizeMB = diskMB
}
}
// Validate is used to sanity check a task group
@ -3622,17 +4175,6 @@ func (tg *TaskGroup) GoString() string {
return fmt.Sprintf("*%#v", *tg)
}
// CombinedResources returns the combined resources for the task group
func (tg *TaskGroup) CombinedResources() *Resources {
r := &Resources{
DiskMB: tg.EphemeralDisk.SizeMB,
}
for _, task := range tg.Tasks {
r.Add(task.Resources)
}
return r
}
// CheckRestart describes if and when a task should be restarted based on
// failing health checks.
type CheckRestart struct {
@ -6032,18 +6574,24 @@ type Allocation struct {
// TaskGroup is the name of the task group that should be run
TaskGroup string
// COMPAT(0.11): Remove in 0.11
// Resources is the total set of resources allocated as part
// of this allocation of the task group.
Resources *Resources
// COMPAT(0.11): Remove in 0.11
// SharedResources are the resources that are shared by all the tasks in an
// allocation
SharedResources *Resources
// COMPAT(0.11): Remove in 0.11
// TaskResources is the set of resources allocated to each
// task. These should sum to the total Resources.
TaskResources map[string]*Resources
// AllocatedResources is the total resources allocated for the task group.
AllocatedResources *AllocatedResources
// Metrics associated with this allocation
Metrics *AllocMetric
@ -6137,6 +6685,7 @@ func (a *Allocation) copyImpl(job bool) *Allocation {
na.Job = na.Job.Copy()
}
na.AllocatedResources = na.AllocatedResources.Copy()
na.Resources = na.Resources.Copy()
na.SharedResources = na.SharedResources.Copy()
@ -6401,6 +6950,43 @@ func (a *Allocation) SetEventDisplayMessages() {
setDisplayMsg(a.TaskStates)
}
// COMPAT(0.11): Remove in 0.11
// ComparableResources returns the resouces on the allocation
// handling upgrade paths. After 0.11 calls to this should be replaced with:
// alloc.AllocatedResources.Comparable()
func (a *Allocation) ComparableResources() *ComparableResources {
// ALloc already has 0.9+ behavior
if a.AllocatedResources != nil {
return a.AllocatedResources.Comparable()
}
var resources *Resources
if a.Resources != nil {
resources = a.Resources
} else if a.TaskResources != nil {
resources = new(Resources)
resources.Add(a.SharedResources)
for _, taskResource := range a.TaskResources {
resources.Add(taskResource)
}
}
// Upgrade path
return &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: uint64(resources.CPU),
},
Memory: AllocatedMemoryResources{
MemoryMB: uint64(resources.MemoryMB),
},
},
Shared: AllocatedSharedResources{
DiskMB: uint64(resources.DiskMB),
},
}
}
// Stub returns a list stub for the allocation
func (a *Allocation) Stub() *AllocListStub {
return &AllocListStub{

View File

@ -15,7 +15,7 @@
version = "1.1"
[[projects]]
digest = "1:cde027e8bb29425770dd8ddc87789e0139f2dc53a80e248c8d6a0698c7e3f0bc"
digest = "1:fd5206897fecaccd4d4f247bfb399a8e183b54a06426ffb9b157ed1119a3910f"
name = "github.com/aliyun/alibaba-cloud-sdk-go"
packages = [
"sdk",
@ -31,19 +31,19 @@
"services/sts",
]
pruneopts = "UT"
revision = "ef9535c490beb6b59620d93f6c7ba88e9b3b1ad0"
version = "1.26.2"
revision = "9669db6328e053fefc47bfe8ddf2e82625444fab"
version = "1.31.4"
[[projects]]
branch = "master"
digest = "1:6bf6d532e503d9526d46e69aff04d11632c8c1e28b847dbd226babc1689aa723"
digest = "1:c47f4964978e211c6e566596ec6246c329912ea92e9bb99c00798bb4564c5b09"
name = "github.com/armon/go-radix"
packages = ["."]
pruneopts = "UT"
revision = "7fddfc383310abc091d79a27f116d30cf0424032"
revision = "1a2de0c21c94309923825da3df33a4381872c795"
version = "v1.0.0"
[[projects]]
digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260"
digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf"
name = "github.com/golang/protobuf"
packages = [
"proto",
@ -53,8 +53,8 @@
"ptypes/timestamp",
]
pruneopts = "UT"
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
version = "v1.2.0"
[[projects]]
branch = "master"
@ -66,51 +66,51 @@
[[projects]]
branch = "master"
digest = "1:d1971637b21871ec2033a44ca87c99c5608a7340cb34ec75fab8d2ab503276c9"
digest = "1:0ade334594e69404d80d9d323445d2297ff8161637f9b2d347cc6973d2d6f05b"
name = "github.com/hashicorp/errwrap"
packages = ["."]
pruneopts = "UT"
revision = "d6c0cd88035724dd42e0f335ae30161c20575ecc"
revision = "8a6fb523712970c966eefc6b39ed2c5e74880354"
[[projects]]
branch = "master"
digest = "1:77cb3be9b21ba7f1a4701e870c84ea8b66e7d74c7c8951c58155fdadae9414ec"
digest = "1:f47d6109c2034cb16bd62b220e18afd5aa9d5a1630fe5d937ad96a4fb7cbb277"
name = "github.com/hashicorp/go-cleanhttp"
packages = ["."]
pruneopts = "UT"
revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d"
revision = "e8ab9daed8d1ddd2d3c4efba338fe2eeae2e4f18"
[[projects]]
branch = "master"
digest = "1:e8d99882caa8c74d68f340ddb9bba3f7e433117ce57c3e52501edfa7e195d2c7"
digest = "1:0876aeb6edb07e20b6b0ce1d346655cb63dbe0a26ccfb47b68a9b7697709777b"
name = "github.com/hashicorp/go-hclog"
packages = ["."]
pruneopts = "UT"
revision = "ff2cf002a8dd750586d91dddd4470c341f981fe1"
revision = "61d530d6c27f994fb6c83b80f99a69c54125ec8a"
[[projects]]
branch = "master"
digest = "1:2394f5a25132b3868eff44599cc28d44bdd0330806e34c495d754dd052df612b"
digest = "1:2be5a35f0c5b35162c41bb24971e5dcf6ce825403296ee435429cdcc4e1e847e"
name = "github.com/hashicorp/go-immutable-radix"
packages = ["."]
pruneopts = "UT"
revision = "7f3cd4390caab3250a57f30efdb2a65dd7649ecf"
revision = "27df80928bb34bb1b0d6d0e01b9e679902e7a6b5"
version = "v1.0.0"
[[projects]]
branch = "master"
digest = "1:46fb6a9f1b9667f32ac93e08b1da118b2c666991424ea12e848b05d4fe5155ef"
digest = "1:f668349b83f7d779567c880550534addeca7ebadfdcf44b0b9c39be61864b4b7"
name = "github.com/hashicorp/go-multierror"
packages = ["."]
pruneopts = "UT"
revision = "3d5d8f294aa03d8e98859feac328afbdf1ae0703"
revision = "886a7fbe3eb1c874d46f623bfa70af45f425b3d1"
version = "v1.0.0"
[[projects]]
branch = "master"
digest = "1:20f78c1cf1b6fe6c55ba1407350d6fc7dc77d1591f8106ba693c28014a1a1b37"
digest = "1:ed6b6f1d3d949ad31aba00953f0fc58aaaa1df1a37102ff5646df82233329853"
name = "github.com/hashicorp/go-plugin"
packages = ["."]
pruneopts = "UT"
revision = "a4620f9913d19f03a6bf19b2f304daaaf83ea130"
revision = "1faddcf740b61468a23dacc67369c28ec96d7fc7"
[[projects]]
branch = "master"
@ -137,35 +137,34 @@
revision = "6d291a969b86c4b633730bfc6b8b9d64c3aafed9"
[[projects]]
branch = "master"
digest = "1:354978aad16c56c27f57e5b152224806d87902e4935da3b03e18263d82ae77aa"
digest = "1:12ed7dcca9531e58c65cdadb8af0052724bef7fa1581380523fb9cb1215faf0d"
name = "github.com/hashicorp/go-uuid"
packages = ["."]
pruneopts = "UT"
revision = "27454136f0364f2d44b1276c552d69105cf8c498"
revision = "de160f5c59f693fed329e73e291bb751fe4ea4dc"
version = "v1.0.0"
[[projects]]
branch = "master"
digest = "1:32c0e96a63bd093eccf37db757fb314be5996f34de93969321c2cbef893a7bd6"
digest = "1:77395dd3847dac9c45118c668f5dab85aedf0163dc3b38aea6578c5cf0d502f9"
name = "github.com/hashicorp/go-version"
packages = ["."]
pruneopts = "UT"
revision = "270f2f71b1ee587f3b609f00f422b76a6b28f348"
revision = "b5a281d3160aa11950a6182bd9a9dc2cb1e02d50"
version = "v1.0.0"
[[projects]]
branch = "master"
digest = "1:cf296baa185baae04a9a7004efee8511d08e2f5f51d4cbe5375da89722d681db"
digest = "1:8ec8d88c248041a6df5f6574b87bc00e7e0b493881dad2e7ef47b11dc69093b5"
name = "github.com/hashicorp/golang-lru"
packages = [
".",
"simplelru",
]
pruneopts = "UT"
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768"
version = "v0.5.0"
[[projects]]
branch = "master"
digest = "1:12247a2e99a060cc692f6680e5272c8adf0b8f572e6bce0d7095e624c958a240"
digest = "1:ea40c24cdbacd054a6ae9de03e62c5f252479b96c716375aace5c120d68647c8"
name = "github.com/hashicorp/hcl"
packages = [
".",
@ -179,11 +178,12 @@
"json/token",
]
pruneopts = "UT"
revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168"
revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241"
version = "v1.0.0"
[[projects]]
branch = "master"
digest = "1:c9caf8fc607b9b8fa503965eca966ae6f1fb96573a0a1c04017b9cd0a98adad3"
digest = "1:9b851e29f662c4522e3c9a235bb23008b8fce207b071e50eee2a014fd50f1059"
name = "github.com/hashicorp/vault"
packages = [
"api",
@ -194,6 +194,7 @@
"helper/errutil",
"helper/hclutil",
"helper/jsonutil",
"helper/license",
"helper/locksutil",
"helper/logging",
"helper/mlock",
@ -212,15 +213,15 @@
"version",
]
pruneopts = "UT"
revision = "8655d167084028d627f687ddc25d0c71307eb5be"
revision = "e7a0452736177a4ecf6955cdf72a93c325943a18"
[[projects]]
branch = "master"
digest = "1:89658943622e6bc5e76b4da027ee9583fa0b321db0c797bd554edab96c1ca2b1"
digest = "1:a4826c308e84f5f161b90b54a814f0be7d112b80164b9b884698a6903ea47ab3"
name = "github.com/hashicorp/yamux"
packages = ["."]
pruneopts = "UT"
revision = "3520598351bb3500a49ae9563f5539666ae0a27c"
revision = "7221087c3d281fda5f794e28c2ea4c6e4d5c4558"
[[projects]]
digest = "1:b87714e57a511d88f307aba7d5b63522da12bed0a050889c81272fc50f71100e"
@ -239,28 +240,28 @@
version = "1.1.5"
[[projects]]
branch = "master"
digest = "1:c7354463195544b1ab3c1f1fadb41430947f5d28dfbf2cdbd38268c5717a5a03"
digest = "1:78bbb1ba5b7c3f2ed0ea1eab57bdd3859aec7e177811563edc41198a760b06af"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
pruneopts = "UT"
revision = "58046073cbffe2f25d425fe1331102f55cf719de"
revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4"
version = "v1.0.0"
[[projects]]
branch = "master"
digest = "1:cae1afe858922bd10e9573b87130f730a6e4183a00eba79920d6656629468bfa"
digest = "1:42eb1f52b84a06820cedc9baec2e710bfbda3ee6dac6cdb97f8b9a5066134ec6"
name = "github.com/mitchellh/go-testing-interface"
packages = ["."]
pruneopts = "UT"
revision = "a61a99592b77c9ba629d254a693acffaeb4b7e28"
revision = "6d0b8010fcc857872e42fc6c931227569016843c"
version = "v1.0.0"
[[projects]]
branch = "master"
digest = "1:5ab79470a1d0fb19b041a624415612f8236b3c06070161a910562f2b2d064355"
digest = "1:e32dfc6abff6a3633ef4d9a1022fd707c8ef26f1e1e8f855dc58dc415ce7c8f3"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
pruneopts = "UT"
revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac"
revision = "fe40af7a9c397fa3ddba203c38a5042c5d0475ad"
version = "v1.1.1"
[[projects]]
digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563"
@ -286,6 +287,17 @@
revision = "4dadeb3030eda0273a12382bb2348ffc7c9d1a39"
version = "v1.0.0"
[[projects]]
digest = "1:4f0885b3f0dba96128a09a6f4b4231c42688fbd05f323224c6aa5adc9f4e87bf"
name = "github.com/pierrec/lz4"
packages = [
".",
"internal/xxh32",
]
pruneopts = "UT"
revision = "bb6bfd13c6a262f1943c0446eb25b7f54c1fb9a2"
version = "v2.0.6"
[[projects]]
digest = "1:0e792eea6c96ec55ff302ef33886acbaa5006e900fefe82689e88d96439dcd84"
name = "github.com/ryanuber/go-glob"
@ -304,7 +316,7 @@
[[projects]]
branch = "master"
digest = "1:b5c3834d33445efdc5a8dcb154bed9e4c211edadbf02f6f5cc20c5e9be26a499"
digest = "1:505dbee0833715a72a529bb57c354826ad42a4496fad787fa143699b4de1a6d0"
name = "golang.org/x/net"
packages = [
"context",
@ -316,15 +328,15 @@
"trace",
]
pruneopts = "UT"
revision = "aaf60122140d3fcf75376d319f0554393160eb50"
revision = "f5e5bdd778241bfefa8627f7124c39cd6ad8d74f"
[[projects]]
branch = "master"
digest = "1:05662433b3a13c921587a6e622b5722072edff83211efd1cd79eeaeedfd83f07"
digest = "1:746ccf620ef9726c42453032e8e039860851ab5914278d24202f343a479a3073"
name = "golang.org/x/sys"
packages = ["unix"]
pruneopts = "UT"
revision = "1c9583448a9c3aa0f9a6a5241bf73c0bd8aafded"
revision = "af653ce8b74f808d092db8ca9741fbb63d2a469d"
[[projects]]
digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
@ -359,14 +371,14 @@
[[projects]]
branch = "master"
digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c"
digest = "1:1e6b0176e8c5dd8ff551af65c76f8b73a99bcf4d812cedff1b91711b7df4804c"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
pruneopts = "UT"
revision = "d0a8f471bba2dbb160885b0000d814ee5d559bad"
revision = "c7e5094acea1ca1b899e2259d80a6b0f882f81f8"
[[projects]]
digest = "1:047efbc3c9a51f3002b0002f92543857d372654a676fb6b01931982cd80467dd"
digest = "1:1b6d2676ea895d33cbd1999c75dfc8e25b103c754ccfc66dc06ae845ce3a47bc"
name = "google.golang.org/grpc"
packages = [
".",
@ -399,8 +411,8 @@
"tap",
]
pruneopts = "UT"
revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455"
version = "v1.14.0"
revision = "8dea3dc473e90c8179e519d91302d0597c0ca1d1"
version = "v1.15.0"
[solve-meta]
analyzer-name = "dep"
@ -408,6 +420,7 @@
input-imports = [
"github.com/aliyun/alibaba-cloud-sdk-go/sdk",
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth",
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials",
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers",
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints",
"github.com/aliyun/alibaba-cloud-sdk-go/services/sts",
@ -415,6 +428,7 @@
"github.com/hashicorp/go-cleanhttp",
"github.com/hashicorp/go-hclog",
"github.com/hashicorp/go-sockaddr",
"github.com/hashicorp/go-uuid",
"github.com/hashicorp/vault/api",
"github.com/hashicorp/vault/helper/cidrutil",
"github.com/hashicorp/vault/helper/parseutil",

View File

@ -289,7 +289,7 @@ func (b *GcpAuthBackend) pathRoleRead(ctx context.Context, req *logical.Request,
resp := make(map[string]interface{})
if role.RoleType != "" {
resp["role_type"] = role.RoleType
resp["role"] = role.RoleType
}
if role.ProjectId != "" {
resp["project_id"] = role.ProjectId

View File

@ -5,10 +5,6 @@ This plugin allows for JWTs (including OIDC tokens) to authenticate with Vault.
**Please note**: We take Vault's security and our users' trust very seriously. If you believe you have found a security issue in Vault, _please responsibly disclose_ by contacting us at [security@hashicorp.com](mailto:security@hashicorp.com).
## IMPORTANT
This plugin is in pre-release state. It is not well tested (in fact, not tested at all) and there is no documentation currently available.
## Quick Links
- Vault Website: https://www.vaultproject.io
- JWT Auth Docs: https://www.vaultproject.io/docs/auth/jwt.html

Some files were not shown because too many files have changed in this diff Show More