diff --git a/api/sys_health.go b/api/sys_health.go index bd74e8269..e4c60d446 100644 --- a/api/sys_health.go +++ b/api/sys_health.go @@ -36,4 +36,5 @@ type HealthResponse struct { Version string `json:"version"` ClusterName string `json:"cluster_name,omitempty"` ClusterID string `json:"cluster_id,omitempty"` + LastWAL uint64 `json:"last_wal,omitempty"` } diff --git a/api/sys_leader.go b/api/sys_leader.go index dfef8345c..8846dcdfa 100644 --- a/api/sys_leader.go +++ b/api/sys_leader.go @@ -25,4 +25,5 @@ type LeaderResponse struct { LeaderClusterAddress string `json:"leader_cluster_address"` PerfStandby bool `json:"performance_standby"` PerfStandbyLastRemoteWAL uint64 `json:"performance_standby_last_remote_wal"` + LastWAL uint64 `json:"last_wal"` } diff --git a/builtin/logical/ssh/path_config_zeroaddress.go b/builtin/logical/ssh/path_config_zeroaddress.go index 02754baac..d91c6810d 100644 --- a/builtin/logical/ssh/path_config_zeroaddress.go +++ b/builtin/logical/ssh/path_config_zeroaddress.go @@ -3,7 +3,6 @@ package ssh import ( "context" "fmt" - "strings" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" @@ -19,7 +18,7 @@ func pathConfigZeroAddress(b *backend) *framework.Path { Pattern: "config/zeroaddress", Fields: map[string]*framework.FieldSchema{ "roles": &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeCommaStringSlice, Description: `[Required] Comma separated list of role names which allows credentials to be requested for any IP address. CIDR blocks previously registered under these roles will be ignored.`, @@ -60,13 +59,12 @@ func (b *backend) pathConfigZeroAddressRead(ctx context.Context, req *logical.Re } func (b *backend) pathConfigZeroAddressWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - roleNames := d.Get("roles").(string) - if roleNames == "" { + roles := d.Get("roles").([]string) + if len(roles) == 0 { return logical.ErrorResponse("Missing roles"), nil } // Check if the roles listed actually exist in the backend - roles := strings.Split(roleNames, ",") for _, item := range roles { role, err := b.getRole(ctx, req.Storage, item) if err != nil { diff --git a/command/format.go b/command/format.go index 10244a8aa..93f12a04b 100644 --- a/command/format.go +++ b/command/format.go @@ -379,6 +379,10 @@ func OutputSealStatus(ui cli.Ui, client *api.Client, status *api.SealStatusRespo } } + if leaderStatus.LastWAL != 0 { + out = append(out, fmt.Sprintf("Last WAL | %d", leaderStatus.LastWAL)) + } + ui.Output(tableOutput(out, nil)) return 0 } diff --git a/http/sys_health.go b/http/sys_health.go index 089b9949a..02136ceca 100644 --- a/http/sys_health.go +++ b/http/sys_health.go @@ -179,6 +179,11 @@ func getSysHealth(core *vault.Core, r *http.Request) (int, *HealthResponse, erro ClusterName: clusterName, ClusterID: clusterID, } + + if init && !sealed && !standby { + body.LastWAL = vault.LastWAL(core) + } + return code, body, nil } @@ -193,4 +198,5 @@ type HealthResponse struct { Version string `json:"version"` ClusterName string `json:"cluster_name,omitempty"` ClusterID string `json:"cluster_id,omitempty"` + LastWAL uint64 `json:"last_wal,omitempty"` } diff --git a/http/sys_leader.go b/http/sys_leader.go index 0f5305a3f..76ba92b2b 100644 --- a/http/sys_leader.go +++ b/http/sys_leader.go @@ -38,6 +38,8 @@ func handleSysLeaderGet(core *vault.Core, w http.ResponseWriter, r *http.Request } if resp.PerfStandby { resp.PerfStandbyLastRemoteWAL = vault.LastRemoteWAL(core) + } else if isLeader || !haEnabled { + resp.LastWAL = vault.LastWAL(core) } respondOk(w, resp) @@ -50,4 +52,5 @@ type LeaderResponse struct { LeaderClusterAddress string `json:"leader_cluster_address"` PerfStandby bool `json:"performance_standby"` PerfStandbyLastRemoteWAL uint64 `json:"performance_standby_last_remote_wal"` + LastWAL uint64 `json:"last_wal,omitempty"` } diff --git a/logical/framework/field_data.go b/logical/framework/field_data.go index 2ee529116..ae4eefa7c 100644 --- a/logical/framework/field_data.go +++ b/logical/framework/field_data.go @@ -63,8 +63,10 @@ func (d *FieldData) Get(k string) interface{} { panic(fmt.Sprintf("field %s not in the schema", k)) } + // If the value can't be decoded, use the zero or default value for the field + // type value, ok := d.GetOk(k) - if !ok { + if !ok || value == nil { value = schema.DefaultOrZero() } @@ -96,8 +98,10 @@ func (d *FieldData) GetFirst(k ...string) (interface{}, bool) { return nil, false } -// GetOk gets the value for the given field. The second return value -// will be false if the key is invalid or the key is not set at all. +// GetOk gets the value for the given field. The second return value will be +// false if the key is invalid or the key is not set at all. If the field k is +// set and the decoded value is nil, the default or zero value +// will be returned instead. func (d *FieldData) GetOk(k string) (interface{}, bool) { schema, ok := d.Schema[k] if !ok { @@ -147,49 +151,49 @@ func (d *FieldData) getPrimitive(k string, schema *FieldSchema) (interface{}, bo case TypeBool: var result bool if err := mapstructure.WeakDecode(raw, &result); err != nil { - return nil, true, err + return nil, false, err } return result, true, nil case TypeInt: var result int if err := mapstructure.WeakDecode(raw, &result); err != nil { - return nil, true, err + return nil, false, err } return result, true, nil case TypeString: var result string if err := mapstructure.WeakDecode(raw, &result); err != nil { - return nil, true, err + return nil, false, err } return result, true, nil case TypeLowerCaseString: var result string if err := mapstructure.WeakDecode(raw, &result); err != nil { - return nil, true, err + return nil, false, err } return strings.ToLower(result), true, nil case TypeNameString: var result string if err := mapstructure.WeakDecode(raw, &result); err != nil { - return nil, true, err + return nil, false, err } matched, err := regexp.MatchString("^\\w(([\\w-.]+)?\\w)?$", result) if err != nil { - return nil, true, err + return nil, false, err } if !matched { - return nil, true, errors.New("field does not match the formatting rules") + return nil, false, errors.New("field does not match the formatting rules") } return result, true, nil case TypeMap: var result map[string]interface{} if err := mapstructure.WeakDecode(raw, &result); err != nil { - return nil, true, err + return nil, false, err } return result, true, nil @@ -217,20 +221,20 @@ func (d *FieldData) getPrimitive(k string, schema *FieldSchema) (interface{}, bo case string: dur, err := parseutil.ParseDurationSecond(inp) if err != nil { - return nil, true, err + return nil, false, err } result = int(dur.Seconds()) case json.Number: valInt64, err := inp.Int64() if err != nil { - return nil, true, err + return nil, false, err } result = int(valInt64) default: return nil, false, fmt.Errorf("invalid input '%v'", raw) } if result < 0 { - return nil, true, fmt.Errorf("cannot provide negative value '%d'", result) + return nil, false, fmt.Errorf("cannot provide negative value '%d'", result) } return result, true, nil @@ -243,24 +247,33 @@ func (d *FieldData) getPrimitive(k string, schema *FieldSchema) (interface{}, bo } decoder, err := mapstructure.NewDecoder(config) if err != nil { - return nil, true, err + return nil, false, err } if err := decoder.Decode(raw); err != nil { - return nil, true, err + return nil, false, err + } + if len(result) == 0 { + return make([]int, 0), true, nil } return result, true, nil case TypeSlice: var result []interface{} if err := mapstructure.WeakDecode(raw, &result); err != nil { - return nil, true, err + return nil, false, err + } + if len(result) == 0 { + return make([]interface{}, 0), true, nil } return result, true, nil case TypeStringSlice: var result []string if err := mapstructure.WeakDecode(raw, &result); err != nil { - return nil, true, err + return nil, false, err + } + if len(result) == 0 { + return make([]string, 0), true, nil } return strutil.TrimStrings(result), true, nil @@ -281,7 +294,7 @@ func (d *FieldData) getPrimitive(k string, schema *FieldSchema) (interface{}, bo // If map parse fails, parse as a string list of = delimited pairs var listResult []string if err := mapstructure.WeakDecode(raw, &listResult); err != nil { - return nil, true, err + return nil, false, err } result := make(map[string]string, len(listResult)) @@ -350,7 +363,7 @@ func (d *FieldData) getPrimitive(k string, schema *FieldSchema) (interface{}, bo if err := mapstructure.WeakDecode(raw, &resultMap); err == nil { result, err = toHeader(resultMap) if err != nil { - return nil, true, err + return nil, false, err } return result, true, nil } @@ -364,11 +377,11 @@ func (d *FieldData) getPrimitive(k string, schema *FieldSchema) (interface{}, bo headerBytes = []byte(headerStr) } if err := json.NewDecoder(bytes.NewReader(headerBytes)).Decode(&resultMap); err != nil { - return nil, true, err + return nil, false, err } result, err = toHeader(resultMap) if err != nil { - return nil, true, err + return nil, false, err } return result, true, nil } @@ -379,17 +392,17 @@ func (d *FieldData) getPrimitive(k string, schema *FieldSchema) (interface{}, bo for _, keyPairIfc := range keyPairs { keyPair, ok := keyPairIfc.(string) if !ok { - return nil, true, fmt.Errorf("invalid key pair %q", keyPair) + return nil, false, fmt.Errorf("invalid key pair %q", keyPair) } keyPairSlice := strings.SplitN(keyPair, ":", 2) if len(keyPairSlice) != 2 || keyPairSlice[0] == "" { - return nil, true, fmt.Errorf("invalid key pair %q", keyPair) + return nil, false, fmt.Errorf("invalid key pair %q", keyPair) } result.Add(keyPairSlice[0], keyPairSlice[1]) } return result, true, nil } - return nil, true, fmt.Errorf("%s not provided an expected format", raw) + return nil, false, fmt.Errorf("%s not provided an expected format", raw) default: panic(fmt.Sprintf("Unknown type: %s", schema.Type)) diff --git a/physical/error.go b/physical/error.go index cf206b115..d4c6f80e1 100644 --- a/physical/error.go +++ b/physical/error.go @@ -35,7 +35,7 @@ var _ Transactional = (*TransactionalErrorInjector)(nil) // NewErrorInjector returns a wrapped physical backend to inject error func NewErrorInjector(b Backend, errorPercent int, logger log.Logger) *ErrorInjector { if errorPercent < 0 || errorPercent > 100 { - errorPercent = DefaultJitterPercent + errorPercent = DefaultErrorPercent } logger.Info("creating error injector") diff --git a/physical/latency.go b/physical/latency.go index 7aa9fab98..188297142 100644 --- a/physical/latency.go +++ b/physical/latency.go @@ -57,9 +57,12 @@ func NewTransactionalLatencyInjector(b Backend, latency time.Duration, jitter in func (l *LatencyInjector) addLatency() { // Calculate a value between 1 +- jitter% - min := 100 - l.jitterPercent - max := 100 + l.jitterPercent - percent := l.random.Intn(max-min) + min + percent := 100 + if l.jitterPercent > 0 { + min := 100 - l.jitterPercent + max := 100 + l.jitterPercent + percent = l.random.Intn(max-min) + min + } latencyDuration := time.Duration(int(l.latency) * percent / 100) time.Sleep(latencyDuration) } diff --git a/vault/core.go b/vault/core.go index 5fa03e76d..f1f54451c 100644 --- a/vault/core.go +++ b/vault/core.go @@ -86,6 +86,7 @@ var ( enterprisePreSeal = enterprisePreSealImpl startReplication = startReplicationImpl stopReplication = stopReplicationImpl + LastWAL = lastWALImpl LastRemoteWAL = lastRemoteWALImpl WaitUntilWALShipped = waitUntilWALShippedImpl ) @@ -1514,6 +1515,10 @@ func waitUntilWALShippedImpl(ctx context.Context, c *Core, index uint64) bool { return true } +func lastWALImpl(c *Core) uint64 { + return 0 +} + func lastRemoteWALImpl(c *Core) uint64 { return 0 } diff --git a/vendor/cloud.google.com/go/spanner/client.go b/vendor/cloud.google.com/go/spanner/client.go index 309b90371..2ff856fd5 100644 --- a/vendor/cloud.google.com/go/spanner/client.go +++ b/vendor/cloud.google.com/go/spanner/client.go @@ -18,7 +18,6 @@ package spanner import ( "fmt" - "log" "regexp" "sync/atomic" "time" @@ -255,26 +254,12 @@ func (c *Client) BatchReadOnlyTransaction(ctx context.Context, tb TimestampBound ) defer func() { if err != nil && sh != nil { - e := runRetryable(ctx, func(ctx context.Context) error { - _, e := s.client.DeleteSession(ctx, &sppb.DeleteSessionRequest{Name: s.getID()}) - return e - }) - if e != nil { - log.Printf("Failed to delete session %v. Error: %v", s.getID(), e) - } + s.delete(ctx) } }() // create session sc := c.rrNext() - err = runRetryable(ctx, func(ctx context.Context) error { - sid, e := sc.CreateSession(ctx, &sppb.CreateSessionRequest{Database: c.database, Session: &sppb.Session{Labels: c.sessionLabels}}) - if e != nil { - return e - } - // If no error, construct the new session. - s = &session{valid: true, client: sc, id: sid.Name, createTime: time.Now(), md: c.md} - return nil - }) + s, err = createSession(ctx, sc, c.database, c.sessionLabels, c.md) if err != nil { return nil, err } diff --git a/vendor/cloud.google.com/go/spanner/doc.go b/vendor/cloud.google.com/go/spanner/doc.go index 8f0303bb3..4954082e8 100644 --- a/vendor/cloud.google.com/go/spanner/doc.go +++ b/vendor/cloud.google.com/go/spanner/doc.go @@ -306,6 +306,19 @@ mutations, which will all be executed at the end of the transaction: return nil }) + +DML and Partitioned DML + +Spanner supports DML statements like INSERT, UPDATE and DELETE. Use +ReadWriteTransaction.Update to run DML statements. It returns the number of rows +affected. (You can call use ReadWriteTransaction.Query with a DML statement. The first +call to Next on the resulting RowIterator will return iterator.Done, and the RowCount +field of the iterator will hold the number of affected rows.) + +For large databases, it may be more efficient to partition the DML statement. Use +client.PartitionedUpdate to run a DML statement in this way. Not all DML statements +can be partitioned. + Tracing This client has been instrumented to use OpenCensus tracing (http://opencensus.io). diff --git a/vendor/cloud.google.com/go/spanner/pdml.go b/vendor/cloud.google.com/go/spanner/pdml.go new file mode 100644 index 000000000..8cf486e06 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/pdml.go @@ -0,0 +1,101 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spanner + +import ( + "time" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// PartitionedUpdate executes a DML statement in parallel across the database, using +// separate, internal transactions that commit independently. The DML statement must +// be fully partitionable: it must be expressible as the union of many statements +// each of which accesses only a single row of the table. The statement should also be +// idempotent, because it may be applied more than once. +// +// PartitionedUpdate returns an estimated count of the number of rows affected. The actual +// number of affected rows may be greater than the estimate. +func (c *Client) PartitionedUpdate(ctx context.Context, statement Statement) (count int64, err error) { + ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.PartitionedUpdate") + defer func() { traceEndSpan(ctx, err) }() + if err := checkNestedTxn(ctx); err != nil { + return 0, err + } + + var ( + tx transactionID + s *session + sh *sessionHandle + ) + // create session + sc := c.rrNext() + s, err = createSession(ctx, sc, c.database, c.sessionLabels, c.md) + if err != nil { + return 0, toSpannerError(err) + } + defer s.delete(ctx) + sh = &sessionHandle{session: s} + // begin transaction + err = runRetryable(contextWithOutgoingMetadata(ctx, sh.getMetadata()), func(ctx context.Context) error { + res, e := sc.BeginTransaction(ctx, &sppb.BeginTransactionRequest{ + Session: sh.getID(), + Options: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_PartitionedDml_{PartitionedDml: &sppb.TransactionOptions_PartitionedDml{}}, + }, + }) + if e != nil { + return e + } + tx = res.Id + return nil + }) + if err != nil { + return 0, toSpannerError(err) + } + req := &sppb.ExecuteSqlRequest{ + Session: sh.getID(), + Transaction: &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_Id{Id: tx}, + }, + Sql: statement.SQL, + } + rpc := func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) { + req.ResumeToken = resumeToken + return sc.ExecuteStreamingSql(ctx, req) + } + iter := stream(contextWithOutgoingMetadata(ctx, sh.getMetadata()), + rpc, func(time.Time) {}, func(error) {}) + // TODO(jba): factor out the following code from here and ReadWriteTransaction.Update. + defer iter.Stop() + for { + _, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + return 0, toSpannerError(err) + } + time.Sleep(time.Second) + } + + if !iter.sawStats { + return 0, spannerErrorf(codes.InvalidArgument, "query passed to Update: %q", statement.SQL) + } + return iter.RowCount, nil +} diff --git a/vendor/cloud.google.com/go/spanner/read.go b/vendor/cloud.google.com/go/spanner/read.go index 08cbfa6aa..b9459e97a 100644 --- a/vendor/cloud.google.com/go/spanner/read.go +++ b/vendor/cloud.google.com/go/spanner/read.go @@ -67,6 +67,10 @@ type RowIterator struct { // if QueryWithStats was called. QueryStats map[string]interface{} + // For a DML statement, the number of rows affected. For PDML, this is a lower bound. + // Available for DML statements after RowIterator.Next returns iterator.Done. + RowCount int64 + streamd *resumableStreamDecoder rowd *partialResultSetDecoder setTimestamp func(time.Time) @@ -74,6 +78,7 @@ type RowIterator struct { cancel func() err error rows []*Row + sawStats bool } // Next returns the next result. Its second return value is iterator.Done if @@ -86,8 +91,16 @@ func (r *RowIterator) Next() (*Row, error) { for len(r.rows) == 0 && r.streamd.next() { prs := r.streamd.get() if prs.Stats != nil { + r.sawStats = true r.QueryPlan = prs.Stats.QueryPlan r.QueryStats = protostruct.DecodeToMap(prs.Stats.QueryStats) + if prs.Stats.RowCount != nil { + rc, err := extractRowCount(prs.Stats) + if err != nil { + return nil, err + } + r.RowCount = rc + } } r.rows, r.err = r.rowd.add(prs) if r.err != nil { @@ -113,6 +126,20 @@ func (r *RowIterator) Next() (*Row, error) { return nil, r.err } +func extractRowCount(stats *sppb.ResultSetStats) (int64, error) { + if stats.RowCount == nil { + return 0, spannerErrorf(codes.Internal, "missing RowCount") + } + switch rc := stats.RowCount.(type) { + case *sppb.ResultSetStats_RowCountExact: + return rc.RowCountExact, nil + case *sppb.ResultSetStats_RowCountLowerBound: + return rc.RowCountLowerBound, nil + default: + return 0, spannerErrorf(codes.Internal, "unknown RowCount type %T", stats.RowCount) + } +} + // Do calls the provided function once in sequence for each row in the iteration. If the // function returns a non-nil error, Do immediately returns that error. // diff --git a/vendor/cloud.google.com/go/spanner/session.go b/vendor/cloud.google.com/go/spanner/session.go index 1c922a846..142992eb7 100644 --- a/vendor/cloud.google.com/go/spanner/session.go +++ b/vendor/cloud.google.com/go/spanner/session.go @@ -260,6 +260,11 @@ func (s *session) destroy(isExpire bool) bool { // Remove s from Cloud Spanner service. ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() + s.delete(ctx) + return true +} + +func (s *session) delete(ctx context.Context) { // Ignore the error returned by runRetryable because even if we fail to explicitly destroy the session, // it will be eventually garbage collected by Cloud Spanner. err := runRetryable(ctx, func(ctx context.Context) error { @@ -269,7 +274,6 @@ func (s *session) destroy(isExpire bool) bool { if err != nil { log.Printf("Failed to delete session %v. Error: %v", s.getID(), err) } - return true } // prepareForWrite prepares the session for write if it is not already in that state. @@ -464,29 +468,38 @@ func (p *sessionPool) createSession(ctx context.Context) (*session, error) { doneCreate(false) return nil, err } - var s *session - err = runRetryable(ctx, func(ctx context.Context) error { - sid, e := sc.CreateSession(ctx, &sppb.CreateSessionRequest{ - Database: p.db, - Session: &sppb.Session{Labels: p.sessionLabels}, - }) - if e != nil { - return e - } - // If no error, construct the new session. - s = &session{valid: true, client: sc, id: sid.Name, pool: p, createTime: time.Now(), md: p.md} - p.hc.register(s) - return nil - }) + s, err := createSession(ctx, sc, p.db, p.sessionLabels, p.md) if err != nil { doneCreate(false) // Should return error directly because of the previous retries on CreateSession RPC. return nil, err } + s.pool = p + p.hc.register(s) doneCreate(true) return s, nil } +func createSession(ctx context.Context, sc sppb.SpannerClient, db string, labels map[string]string, md metadata.MD) (*session, error) { + var s *session + err := runRetryable(ctx, func(ctx context.Context) error { + sid, e := sc.CreateSession(ctx, &sppb.CreateSessionRequest{ + Database: db, + Session: &sppb.Session{Labels: labels}, + }) + if e != nil { + return e + } + // If no error, construct the new session. + s = &session{valid: true, client: sc, id: sid.Name, createTime: time.Now(), md: md} + return nil + }) + if err != nil { + return nil, err + } + return s, nil +} + func (p *sessionPool) isHealthy(s *session) bool { if s.getNextCheck().Add(2 * p.hc.getInterval()).Before(time.Now()) { // TODO: figure out if we need to schedule a new healthcheck worker here. diff --git a/vendor/cloud.google.com/go/spanner/transaction.go b/vendor/cloud.google.com/go/spanner/transaction.go index b2610315d..7624fe35f 100644 --- a/vendor/cloud.google.com/go/spanner/transaction.go +++ b/vendor/cloud.google.com/go/spanner/transaction.go @@ -18,6 +18,7 @@ package spanner import ( "sync" + "sync/atomic" "time" "golang.org/x/net/context" @@ -46,6 +47,8 @@ type txReadEnv interface { type txReadOnly struct { // read-transaction environment for performing transactional read operations. txReadEnv + + sequenceNumber int64 // Atomic. Only needed for DML statements, but used for all. } // errSessionClosed returns error for using a recycled/destroyed session @@ -159,7 +162,7 @@ func (t *txReadOnly) Query(ctx context.Context, statement Statement) *RowIterato return t.query(ctx, statement, sppb.ExecuteSqlRequest_NORMAL) } -// Query executes a query against the database. It returns a RowIterator +// Query executes a SQL statement against the database. It returns a RowIterator // for retrieving the resulting rows. The RowIterator will also be populated // with a query plan and execution statistics. func (t *txReadOnly) QueryWithStats(ctx context.Context, statement Statement) *RowIterator { @@ -188,29 +191,11 @@ func (t *txReadOnly) AnalyzeQuery(ctx context.Context, statement Statement) (*sp func (t *txReadOnly) query(ctx context.Context, statement Statement, mode sppb.ExecuteSqlRequest_QueryMode) (ri *RowIterator) { ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.Query") defer func() { traceEndSpan(ctx, ri.err) }() - var ( - sh *sessionHandle - ts *sppb.TransactionSelector - err error - ) - if sh, ts, err = t.acquire(ctx); err != nil { - return &RowIterator{err: err} - } - // Cloud Spanner will return "Session not found" on bad sessions. - sid, client := sh.getID(), sh.getClient() - if sid == "" || client == nil { - // Might happen if transaction is closed in the middle of a API call. - return &RowIterator{err: errSessionClosed(sh)} - } - req := &sppb.ExecuteSqlRequest{ - Session: sid, - Transaction: ts, - Sql: statement.SQL, - QueryMode: mode, - } - if err := statement.bindParams(req); err != nil { + req, sh, err := t.prepareExecuteSql(ctx, statement, mode) + if err != nil { return &RowIterator{err: err} } + client := sh.getClient() return stream( contextWithOutgoingMetadata(ctx, sh.getMetadata()), func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) { @@ -221,6 +206,31 @@ func (t *txReadOnly) query(ctx context.Context, statement Statement, mode sppb.E t.release) } +func (t *txReadOnly) prepareExecuteSql(ctx context.Context, stmt Statement, mode sppb.ExecuteSqlRequest_QueryMode) ( + *sppb.ExecuteSqlRequest, *sessionHandle, error) { + sh, ts, err := t.acquire(ctx) + if err != nil { + return nil, nil, err + } + // Cloud Spanner will return "Session not found" on bad sessions. + sid := sh.getID() + if sid == "" { + // Might happen if transaction is closed in the middle of a API call. + return nil, nil, errSessionClosed(sh) + } + req := &sppb.ExecuteSqlRequest{ + Session: sid, + Transaction: ts, + Sql: stmt.SQL, + QueryMode: mode, + Seqno: atomic.AddInt64(&t.sequenceNumber, 1), + } + if err := stmt.bindParams(req); err != nil { + return nil, nil, err + } + return req, sh, nil +} + // txState is the status of a transaction. type txState int @@ -648,6 +658,27 @@ func (t *ReadWriteTransaction) BufferWrite(ms []*Mutation) error { return nil } +// Update executes a DML statement against the database. It returns the number of +// affected rows. +// Update returns an error if the statement is a query. However, the +// query is executed, and any data read will be validated upon commit. +func (t *ReadWriteTransaction) Update(ctx context.Context, stmt Statement) (rowCount int64, err error) { + ctx = traceStartSpan(ctx, "cloud.google.com/go/spanner.Update") + defer func() { traceEndSpan(ctx, err) }() + req, sh, err := t.prepareExecuteSql(ctx, stmt, sppb.ExecuteSqlRequest_NORMAL) + if err != nil { + return 0, err + } + resultSet, err := sh.getClient().ExecuteSql(ctx, req) + if err != nil { + return 0, err + } + if resultSet.Stats == nil { + return 0, spannerErrorf(codes.InvalidArgument, "query passed to Update: %q", stmt.SQL) + } + return extractRowCount(resultSet.Stats) +} + // acquire implements txReadEnv.acquire. func (t *ReadWriteTransaction) acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { ts := &sppb.TransactionSelector{ diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go index 55238ab15..f90050cb0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go @@ -169,7 +169,7 @@ func (q *Queue) GetMetadata(options *QueueServiceOptions) error { params = addTimeout(params, options.Timeout) headers = mergeHeaders(headers, headersFromStruct(*options)) } - uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}}) + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go index a9cd65df5..a1b88228f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -18,4 +18,4 @@ package version // Changes may cause incorrect behavior and will be lost if the code is regenerated. // Number contains the semantic version of this SDK. -const Number = "v21.1.0" +const Number = "v21.2.0" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go index bee5e61dd..8c83a917f 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -19,10 +19,6 @@ import ( "net/url" ) -const ( - activeDirectoryAPIVersion = "1.0" -) - // OAuthConfig represents the endpoints needed // in OAuth operations type OAuthConfig struct { @@ -46,11 +42,25 @@ func validateStringParam(param, name string) error { // NewOAuthConfig returns an OAuthConfig with tenant specific urls func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + apiVer := "1.0" + return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) +} + +// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. +// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. +func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { return nil, err } + api := "" // it's legal for tenantID to be empty so don't validate it - const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s" + if apiVersion != nil { + if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { + return nil, err + } + api = fmt.Sprintf("?api-version=%s", *apiVersion) + } + const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" u, err := url.Parse(activeDirectoryEndpoint) if err != nil { return nil, err @@ -59,15 +69,15 @@ func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, err if err != nil { return nil, err } - authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion)) + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) if err != nil { return nil, err } - tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion)) + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) if err != nil { return nil, err } - deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion)) + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index 6f59bfd08..2fd340d69 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -226,6 +226,8 @@ func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalTo token := jwt.New(jwt.SigningMethodRS256) token.Header["x5t"] = thumbprint + x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} + token.Header["x5c"] = x5c token.Claims = jwt.MapClaims{ "aud": spt.inner.OauthConfig.TokenEndpoint.String(), "iss": spt.inner.ClientID, diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go index e43f7a2c4..603f2dc1d 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -58,10 +58,7 @@ func NewFuture(req *http.Request) Future { // with the initial response from an asynchronous operation. func NewFutureFromResponse(resp *http.Response) (Future, error) { pt, err := createPollingTracker(resp) - if err != nil { - return Future{}, err - } - return Future{pt: pt}, nil + return Future{pt: pt}, err } // Response returns the last HTTP response. diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go index a52625750..dcd232f6e 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go @@ -31,6 +31,7 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/azure/cli" "github.com/dimchansky/utfbom" "golang.org/x/crypto/pkcs12" ) @@ -167,6 +168,35 @@ func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, er return autorest.NewBearerAuthorizer(spToken), nil } +// NewAuthorizerFromCLI creates an Authorizer configured from Azure CLI 2.0 for local development scenarios. +func NewAuthorizerFromCLI() (autorest.Authorizer, error) { + settings, err := getAuthenticationSettings() + if err != nil { + return nil, err + } + + if settings.resource == "" { + settings.resource = settings.environment.ResourceManagerEndpoint + } + + return NewAuthorizerFromCLIWithResource(settings.resource) +} + +// NewAuthorizerFromCLIWithResource creates an Authorizer configured from Azure CLI 2.0 for local development scenarios. +func NewAuthorizerFromCLIWithResource(resource string) (autorest.Authorizer, error) { + token, err := cli.GetTokenFromCLI(resource) + if err != nil { + return nil, err + } + + adalToken, err := token.ToADALToken() + if err != nil { + return nil, err + } + + return autorest.NewBearerAuthorizer(&adalToken), nil +} + func getAuthFile() (*file, error) { fileLocation := os.Getenv("AZURE_AUTH_LOCATION") if fileLocation == "" { diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go new file mode 100644 index 000000000..b62bf03ba --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go @@ -0,0 +1,72 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + + "github.com/dimchansky/utfbom" + "github.com/mitchellh/go-homedir" +) + +// Profile represents a Profile from the Azure CLI +type Profile struct { + InstallationID string `json:"installationId"` + Subscriptions []Subscription `json:"subscriptions"` +} + +// Subscription represents a Subscription from the Azure CLI +type Subscription struct { + EnvironmentName string `json:"environmentName"` + ID string `json:"id"` + IsDefault bool `json:"isDefault"` + Name string `json:"name"` + State string `json:"state"` + TenantID string `json:"tenantId"` + User *User `json:"user"` +} + +// User represents a User from the Azure CLI +type User struct { + Name string `json:"name"` + Type string `json:"type"` +} + +// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI +func ProfilePath() (string, error) { + return homedir.Expand("~/.azure/azureProfile.json") +} + +// LoadProfile restores a Profile object from a file located at 'path'. +func LoadProfile(path string) (result Profile, err error) { + var contents []byte + contents, err = ioutil.ReadFile(path) + if err != nil { + err = fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + return + } + reader := utfbom.SkipOnly(bytes.NewReader(contents)) + + dec := json.NewDecoder(reader) + if err = dec.Decode(&result); err != nil { + err = fmt.Errorf("failed to decode contents of file (%s) into a Profile representation: %v", path, err) + return + } + + return +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go new file mode 100644 index 000000000..dece9ec63 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go @@ -0,0 +1,170 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "os/exec" + "regexp" + "runtime" + "strconv" + "time" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/date" + "github.com/mitchellh/go-homedir" +) + +// Token represents an AccessToken from the Azure CLI +type Token struct { + AccessToken string `json:"accessToken"` + Authority string `json:"_authority"` + ClientID string `json:"_clientId"` + ExpiresOn string `json:"expiresOn"` + IdentityProvider string `json:"identityProvider"` + IsMRRT bool `json:"isMRRT"` + RefreshToken string `json:"refreshToken"` + Resource string `json:"resource"` + TokenType string `json:"tokenType"` + UserID string `json:"userId"` +} + +// ToADALToken converts an Azure CLI `Token`` to an `adal.Token`` +func (t Token) ToADALToken() (converted adal.Token, err error) { + tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn) + if err != nil { + err = fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err) + return + } + + difference := tokenExpirationDate.Sub(date.UnixEpoch()) + + converted = adal.Token{ + AccessToken: t.AccessToken, + Type: t.TokenType, + ExpiresIn: "3600", + ExpiresOn: json.Number(strconv.Itoa(int(difference.Seconds()))), + RefreshToken: t.RefreshToken, + Resource: t.Resource, + } + return +} + +// AccessTokensPath returns the path where access tokens are stored from the Azure CLI +// TODO(#199): add unit test. +func AccessTokensPath() (string, error) { + // Azure-CLI allows user to customize the path of access tokens thorugh environment variable. + var accessTokenPath = os.Getenv("AZURE_ACCESS_TOKEN_FILE") + var err error + + // Fallback logic to default path on non-cloud-shell environment. + // TODO(#200): remove the dependency on hard-coding path. + if accessTokenPath == "" { + accessTokenPath, err = homedir.Expand("~/.azure/accessTokens.json") + } + + return accessTokenPath, err +} + +// ParseExpirationDate parses either a Azure CLI or CloudShell date into a time object +func ParseExpirationDate(input string) (*time.Time, error) { + // CloudShell (and potentially the Azure CLI in future) + expirationDate, cloudShellErr := time.Parse(time.RFC3339, input) + if cloudShellErr != nil { + // Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone) + const cliFormat = "2006-01-02 15:04:05.999999" + expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local) + if cliErr == nil { + return &expirationDate, nil + } + + return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr) + } + + return &expirationDate, nil +} + +// LoadTokens restores a set of Token objects from a file located at 'path'. +func LoadTokens(path string) ([]Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var tokens []Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&tokens); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into a `cli.Token` representation: %v", path, err) + } + + return tokens, nil +} + +// GetTokenFromCLI gets a token using Azure CLI 2.0 for local development scenarios. +func GetTokenFromCLI(resource string) (*Token, error) { + // This is the path that a developer can set to tell this class what the install path for Azure CLI is. + const azureCLIPath = "AzureCLIPath" + + // The default install paths are used to find Azure CLI. This is for security, so that any path in the calling program's Path environment is not used to execute Azure CLI. + azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles")) + + // Default path for non-Windows. + const azureCLIDefaultPath = "/usr/bin:/usr/local/bin" + + // Validate resource, since it gets sent as a command line argument to Azure CLI + const invalidResourceErrorTemplate = "Resource %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed." + match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) + if err != nil { + return nil, err + } + if !match { + return nil, fmt.Errorf(invalidResourceErrorTemplate, resource) + } + + // Execute Azure CLI to get token + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + cliCmd = exec.Command(fmt.Sprintf("%s\\system32\\cmd.exe", os.Getenv("windir"))) + cliCmd.Env = os.Environ() + cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s;%s", os.Getenv(azureCLIPath), azureCLIDefaultPathWindows)) + cliCmd.Args = append(cliCmd.Args, "/c") + } else { + cliCmd = exec.Command(os.Getenv("SHELL")) + cliCmd.Env = os.Environ() + cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv(azureCLIPath), azureCLIDefaultPath)) + } + cliCmd.Args = append(cliCmd.Args, "az", "account", "get-access-token", "-o", "json", "--resource", resource) + + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + + output, err := cliCmd.Output() + if err != nil { + return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", stderr.String()) + } + + tokenResponse := Token{} + err = json.Unmarshal(output, &tokenResponse) + if err != nil { + return nil, err + } + + return &tokenResponse, err +} diff --git a/vendor/github.com/Azure/go-autorest/version/version.go b/vendor/github.com/Azure/go-autorest/version/version.go index 180bbbbf2..f7480b3e9 100644 --- a/vendor/github.com/Azure/go-autorest/version/version.go +++ b/vendor/github.com/Azure/go-autorest/version/version.go @@ -20,7 +20,7 @@ import ( ) // Number contains the semantic version of this SDK. -const Number = "v11.0.0" +const Number = "v11.1.1" var ( userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/utils.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/utils.go index 99a2189b7..a00c775c2 100644 --- a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/utils.go +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/utils.go @@ -20,12 +20,11 @@ import ( "encoding/hex" "encoding/json" "fmt" + "github.com/satori/go.uuid" "net/url" "reflect" "strconv" "time" - - "github.com/satori/go.uuid" ) // if you use go 1.10 or higher, you can hack this util by these to avoid "TimeZone.zip not found" on Windows diff --git a/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/cluster.go b/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/cluster.go index a900f6569..7e8d113d5 100644 --- a/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/cluster.go +++ b/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/cluster.go @@ -23,7 +23,7 @@ package fdb /* - #define FDB_API_VERSION 600 + #define FDB_API_VERSION 610 #include */ import "C" diff --git a/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/database.go b/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/database.go index 84aa6ebb8..005538075 100644 --- a/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/database.go +++ b/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/database.go @@ -23,7 +23,7 @@ package fdb /* - #define FDB_API_VERSION 600 + #define FDB_API_VERSION 610 #include */ import "C" diff --git a/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/doc.go b/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/doc.go index a7308887a..b12cb4a22 100644 --- a/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/doc.go +++ b/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/doc.go @@ -46,7 +46,7 @@ A basic interaction with the FoundationDB API is demonstrated below: func main() { // Different API versions may expose different runtime behaviors. - fdb.MustAPIVersion(600) + fdb.MustAPIVersion(610) // Open the default database from the system cluster db := fdb.MustOpenDefault() diff --git a/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/errors.go b/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/errors.go index 15bd8081c..5b39724d7 100644 --- a/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/errors.go +++ b/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/errors.go @@ -23,7 +23,7 @@ package fdb /* - #define FDB_API_VERSION 600 + #define FDB_API_VERSION 610 #include */ import "C" diff --git a/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/fdb.go b/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/fdb.go index 76cf5ac67..d4512f872 100644 --- a/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/fdb.go +++ b/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/fdb.go @@ -23,7 +23,7 @@ package fdb /* - #define FDB_API_VERSION 600 + #define FDB_API_VERSION 610 #include #include */ @@ -109,7 +109,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error { // library, an error will be returned. APIVersion must be called prior to any // other functions in the fdb package. // -// Currently, this package supports API versions 200 through 600. +// Currently, this package supports API versions 200 through 610. // // Warning: When using the multi-version client API, setting an API version that // is not supported by a particular client library will prevent that client from @@ -117,7 +117,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error { // the API version of your application after upgrading your client until the // cluster has also been upgraded. func APIVersion(version int) error { - headerVersion := 600 + headerVersion := 610 networkMutex.Lock() defer networkMutex.Unlock() @@ -129,7 +129,7 @@ func APIVersion(version int) error { return errAPIVersionAlreadySet } - if version < 200 || version > 600 { + if version < 200 || version > 610 { return errAPIVersionNotSupported } diff --git a/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/futures.go b/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/futures.go index c86441254..ec257dd3f 100644 --- a/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/futures.go +++ b/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/futures.go @@ -24,7 +24,7 @@ package fdb /* #cgo LDFLAGS: -lfdb_c -lm - #define FDB_API_VERSION 600 + #define FDB_API_VERSION 610 #include #include diff --git a/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/range.go b/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/range.go index 01d2ef59c..583e14d75 100644 --- a/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/range.go +++ b/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/range.go @@ -23,7 +23,7 @@ package fdb /* - #define FDB_API_VERSION 600 + #define FDB_API_VERSION 610 #include */ import "C" diff --git a/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/transaction.go b/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/transaction.go index 549558406..79a8011fa 100644 --- a/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/transaction.go +++ b/vendor/github.com/apple/foundationdb/bindings/go/src/fdb/transaction.go @@ -23,7 +23,7 @@ package fdb /* - #define FDB_API_VERSION 600 + #define FDB_API_VERSION 610 #include */ import "C" diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go index 4b0d630e4..6f57024d7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -48,4 +48,6 @@ type metric struct { DNSLatency *int `json:"DnsLatency,omitempty"` TCPLatency *int `json:"TcpLatency,omitempty"` SSLLatency *int `json:"SslLatency,omitempty"` + + MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go index 691f8513a..118618442 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -112,15 +112,16 @@ func (rep *Reporter) sendAPICallMetric(r *request.Request) { now := time.Now() m := metric{ - ClientID: aws.String(rep.clientID), - API: aws.String(r.Operation.Name), - Service: aws.String(r.ClientInfo.ServiceID), - Timestamp: (*metricTime)(&now), - Type: aws.String("ApiCall"), - AttemptCount: aws.Int(r.RetryCount + 1), - Region: r.Config.Region, - Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), - XAmzRequestID: aws.String(r.RequestID), + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Region: r.Config.Region, + Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), } // TODO: Probably want to figure something out for logging dropped @@ -230,3 +231,12 @@ func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler) } + +// boolIntValue return 1 for true and 0 for false. +func boolIntValue(b bool) int { + if b { + return 1 + } + + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 6cd84cd96..23bb639e0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -24,6 +24,7 @@ import ( "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" ) // A Defaults provides a collection of default values for SDK clients. @@ -114,7 +115,6 @@ func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Pro const ( httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" - ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" ) // RemoteCredProvider returns a credentials provider for the default remote @@ -124,8 +124,8 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P return localHTTPCredProvider(cfg, handlers, u) } - if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 { - u := fmt.Sprintf("http://169.254.170.2%s", uri) + if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) return httpCredProvider(cfg, handlers, u) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go index 401246228..bcfd947a3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -17,6 +17,10 @@ const ( ParamMinValueErrCode = "ParamMinValueError" // ParamMinLenErrCode is the error code for fields without enough elements. ParamMinLenErrCode = "ParamMinLenError" + + // ParamFormatErrCode is the error code for a field with invalid + // format or characters. + ParamFormatErrCode = "ParamFormatInvalidError" ) // Validator provides a way for types to perform validation logic on their @@ -232,3 +236,26 @@ func NewErrParamMinLen(field string, min int) *ErrParamMinLen { func (e *ErrParamMinLen) MinLen() int { return e.min } + +// An ErrParamFormat represents a invalid format parameter error. +type ErrParamFormat struct { + errInvalidParam + format string +} + +// NewErrParamFormat creates a new invalid format parameter error. +func NewErrParamFormat(field string, format, value string) *ErrParamFormat { + return &ErrParamFormat{ + errInvalidParam: errInvalidParam{ + code: ParamFormatErrCode, + field: field, + msg: fmt.Sprintf("format %v, %v", format, value), + }, + format: format, + } +} + +// Format returns the field's required format. +func (e *ErrParamFormat) Format() string { + return e.format +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 51f305563..5d7b28950 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -19,8 +19,26 @@ import ( "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" ) +const ( + // ErrCodeSharedConfig represents an error that occurs in the shared + // configuration logic + ErrCodeSharedConfig = "SharedConfigErr" +) + +// ErrSharedConfigSourceCollision will be returned if a section contains both +// source_profile and credential_source +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil) + +// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment +// variables are empty and Environment was set as the credential source +var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) + +// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided +var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) + // A Session provides a central location to create service clients from and // store configurations and request handlers for those services. // @@ -436,6 +454,57 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share // Configure credentials if not already set if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + + // inspect the profile to see if a credential source has been specified. + if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 { + + // if both credential_source and source_profile have been set, return an error + // as this is undefined behavior. + if len(sharedCfg.AssumeRole.SourceProfile) > 0 { + return ErrSharedConfigSourceCollision + } + + // valid credential source values + const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" + ) + + switch sharedCfg.AssumeRole.CredentialSource { + case credSourceEc2Metadata: + cfgCp := *cfg + p := defaults.RemoteCredProvider(cfgCp, handlers) + cfgCp.Credentials = credentials.NewCredentials(p) + + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return AssumeRoleTokenProviderNotSetError{} + } + + cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) + case credSourceEnvironment: + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + envCfg.Creds, + ) + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return ErrSharedConfigECSContainerEnvVarEmpty + } + + cfgCp := *cfg + p := defaults.RemoteCredProvider(cfgCp, handlers) + creds := credentials.NewCredentials(p) + + cfg.Credentials = creds + default: + return ErrSharedConfigInvalidCredSource + } + + return nil + } + if len(envCfg.Creds.AccessKeyID) > 0 { cfg.Credentials = credentials.NewStaticCredentialsFromCreds( envCfg.Creds, @@ -445,32 +514,14 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( sharedCfg.AssumeRoleSource.Creds, ) + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { // AssumeRole Token provider is required if doing Assume Role // with MFA. return AssumeRoleTokenProviderNotSetError{} } - cfg.Credentials = stscreds.NewCredentials( - &Session{ - Config: &cfgCp, - Handlers: handlers.Copy(), - }, - sharedCfg.AssumeRole.RoleARN, - func(opt *stscreds.AssumeRoleProvider) { - opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName - // Assume role with external ID - if len(sharedCfg.AssumeRole.ExternalID) > 0 { - opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.AssumeRole.MFASerial) > 0 { - opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) - opt.TokenProvider = sessOpts.AssumeRoleTokenProvider - } - }, - ) + cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) } else if len(sharedCfg.Creds.AccessKeyID) > 0 { cfg.Credentials = credentials.NewStaticCredentialsFromCreds( sharedCfg.Creds, @@ -493,6 +544,30 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share return nil } +func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials { + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.AssumeRole.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName + + // Assume role with external ID + if len(sharedCfg.AssumeRole.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.AssumeRole.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ) +} + // AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the // MFAToken option is not set when shared config is configured load assume a // role with an MFA token. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 09c8e5bc7..565a0b795 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -16,11 +16,12 @@ const ( sessionTokenKey = `aws_session_token` // optional // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional // Additional Config fields regionKey = `region` @@ -32,11 +33,12 @@ const ( ) type assumeRoleConfig struct { - RoleARN string - SourceProfile string - ExternalID string - MFASerial string - RoleSessionName string + RoleARN string + SourceProfile string + CredentialSource string + ExternalID string + MFASerial string + RoleSessionName string } // sharedConfig represents the configuration fields of the SDK config files. @@ -127,6 +129,13 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { var assumeRoleSrc sharedConfig + if len(cfg.AssumeRole.CredentialSource) > 0 { + // setAssumeRoleSource is only called when source_profile is found. + // If both source_profile and credential_source are set, then + // ErrSharedConfigSourceCollision will be returned + return ErrSharedConfigSourceCollision + } + // Multiple level assume role chains are not support if cfg.AssumeRole.SourceProfile == origProfile { assumeRoleSrc = *cfg @@ -195,13 +204,16 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) e // Assume Role roleArn := section.Key(roleArnKey).String() srcProfile := section.Key(sourceProfileKey).String() - if len(roleArn) > 0 && len(srcProfile) > 0 { + credentialSource := section.Key(credentialSourceKey).String() + hasSource := len(srcProfile) > 0 || len(credentialSource) > 0 + if len(roleArn) > 0 && hasSource { cfg.AssumeRole = assumeRoleConfig{ - RoleARN: roleArn, - SourceProfile: srcProfile, - ExternalID: section.Key(externalIDKey).String(), - MFASerial: section.Key(mfaSerialKey).String(), - RoleSessionName: section.Key(roleSessionNameKey).String(), + RoleARN: roleArn, + SourceProfile: srcProfile, + CredentialSource: credentialSource, + ExternalID: section.Key(externalIDKey).String(), + MFASerial: section.Key(mfaSerialKey).String(), + RoleSessionName: section.Key(roleSessionNameKey).String(), } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index d282a4f82..7a7b37f37 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.15.47" +const SDKVersion = "1.15.55" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go b/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go new file mode 100644 index 000000000..0b9b0dfce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go @@ -0,0 +1,57 @@ +package s3err + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequestFailure provides additional S3 specific metadata for the request +// failure. +type RequestFailure struct { + awserr.RequestFailure + + hostID string +} + +// NewRequestFailure returns a request failure error decordated with S3 +// specific metadata. +func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure { + return &RequestFailure{RequestFailure: err, hostID: hostID} +} + +func (r RequestFailure) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", + r.StatusCode(), r.RequestID(), r.hostID) + return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} +func (r RequestFailure) String() string { + return r.Error() +} + +// HostID returns the HostID request response value. +func (r RequestFailure) HostID() string { + return r.hostID +} + +// RequestFailureWrapperHandler returns a handler to rap an +// awserr.RequestFailure with the S3 request ID 2 from the response. +func RequestFailureWrapperHandler() request.NamedHandler { + return request.NamedHandler{ + Name: "awssdk.s3.errorHandler", + Fn: func(req *request.Request) { + reqErr, ok := req.Error.(awserr.RequestFailure) + if !ok || reqErr == nil { + return + } + + hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2") + if req.Error == nil { + return + } + + req.Error = NewRequestFailure(reqErr, hostID) + }, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go new file mode 100644 index 000000000..b63e4c263 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go @@ -0,0 +1,12 @@ +package shareddefaults + +const ( + // ECSCredsProviderEnvVar is an environmental variable key used to + // determine which path needs to be hit. + ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// ECSContainerCredentialsURI is the endpoint to retrieve container +// credentials. This can be overriden to test to ensure the credential process +// is behaving correctly. +var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go new file mode 100644 index 000000000..f06f44ee1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -0,0 +1,21 @@ +package protocol + +// ValidHostLabel returns if the label is a valid RFC 1123 Section 2.1 domain +// host label name. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go index 332b76dd4..551ef3a3f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go @@ -18,7 +18,7 @@ const opBatchGetItem = "BatchGetItem" // BatchGetItemRequest generates a "aws/request.Request" representing the // client's request for the BatchGetItem operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -213,7 +213,7 @@ const opBatchWriteItem = "BatchWriteItem" // BatchWriteItemRequest generates a "aws/request.Request" representing the // client's request for the BatchWriteItem operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -379,7 +379,7 @@ const opCreateBackup = "CreateBackup" // CreateBackupRequest generates a "aws/request.Request" representing the // client's request for the CreateBackup operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -514,7 +514,7 @@ const opCreateGlobalTable = "CreateGlobalTable" // CreateGlobalTableRequest generates a "aws/request.Request" representing the // client's request for the CreateGlobalTable operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -645,7 +645,7 @@ const opCreateTable = "CreateTable" // CreateTableRequest generates a "aws/request.Request" representing the // client's request for the CreateTable operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -756,7 +756,7 @@ const opDeleteBackup = "DeleteBackup" // DeleteBackupRequest generates a "aws/request.Request" representing the // client's request for the DeleteBackup operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -857,7 +857,7 @@ const opDeleteItem = "DeleteItem" // DeleteItemRequest generates a "aws/request.Request" representing the // client's request for the DeleteItem operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -968,7 +968,7 @@ const opDeleteTable = "DeleteTable" // DeleteTableRequest generates a "aws/request.Request" representing the // client's request for the DeleteTable operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1086,7 +1086,7 @@ const opDescribeBackup = "DescribeBackup" // DescribeBackupRequest generates a "aws/request.Request" representing the // client's request for the DescribeBackup operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1170,7 +1170,7 @@ const opDescribeContinuousBackups = "DescribeContinuousBackups" // DescribeContinuousBackupsRequest generates a "aws/request.Request" representing the // client's request for the DescribeContinuousBackups operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1265,7 +1265,7 @@ const opDescribeEndpoints = "DescribeEndpoints" // DescribeEndpointsRequest generates a "aws/request.Request" representing the // client's request for the DescribeEndpoints operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1337,7 +1337,7 @@ const opDescribeGlobalTable = "DescribeGlobalTable" // DescribeGlobalTableRequest generates a "aws/request.Request" representing the // client's request for the DescribeGlobalTable operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1419,7 +1419,7 @@ const opDescribeGlobalTableSettings = "DescribeGlobalTableSettings" // DescribeGlobalTableSettingsRequest generates a "aws/request.Request" representing the // client's request for the DescribeGlobalTableSettings operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1501,7 +1501,7 @@ const opDescribeLimits = "DescribeLimits" // DescribeLimitsRequest generates a "aws/request.Request" representing the // client's request for the DescribeLimits operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1636,7 +1636,7 @@ const opDescribeTable = "DescribeTable" // DescribeTableRequest generates a "aws/request.Request" representing the // client's request for the DescribeTable operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1727,7 +1727,7 @@ const opDescribeTimeToLive = "DescribeTimeToLive" // DescribeTimeToLiveRequest generates a "aws/request.Request" representing the // client's request for the DescribeTimeToLive operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1810,7 +1810,7 @@ const opGetItem = "GetItem" // GetItemRequest generates a "aws/request.Request" representing the // client's request for the GetItem operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1908,7 +1908,7 @@ const opListBackups = "ListBackups" // ListBackupsRequest generates a "aws/request.Request" representing the // client's request for the ListBackups operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1995,7 +1995,7 @@ const opListGlobalTables = "ListGlobalTables" // ListGlobalTablesRequest generates a "aws/request.Request" representing the // client's request for the ListGlobalTables operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2074,7 +2074,7 @@ const opListTables = "ListTables" // ListTablesRequest generates a "aws/request.Request" representing the // client's request for the ListTables operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2211,7 +2211,7 @@ const opListTagsOfResource = "ListTagsOfResource" // ListTagsOfResourceRequest generates a "aws/request.Request" representing the // client's request for the ListTagsOfResource operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2298,7 +2298,7 @@ const opPutItem = "PutItem" // PutItemRequest generates a "aws/request.Request" representing the // client's request for the PutItem operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2439,7 +2439,7 @@ const opQuery = "Query" // QueryRequest generates a "aws/request.Request" representing the // client's request for the Query operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2633,7 +2633,7 @@ const opRestoreTableFromBackup = "RestoreTableFromBackup" // RestoreTableFromBackupRequest generates a "aws/request.Request" representing the // client's request for the RestoreTableFromBackup operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2755,7 +2755,7 @@ const opRestoreTableToPointInTime = "RestoreTableToPointInTime" // RestoreTableToPointInTimeRequest generates a "aws/request.Request" representing the // client's request for the RestoreTableToPointInTime operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2901,7 +2901,7 @@ const opScan = "Scan" // ScanRequest generates a "aws/request.Request" representing the // client's request for the Scan operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3075,7 +3075,7 @@ const opTagResource = "TagResource" // TagResourceRequest generates a "aws/request.Request" representing the // client's request for the TagResource operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3184,7 +3184,7 @@ const opUntagResource = "UntagResource" // UntagResourceRequest generates a "aws/request.Request" representing the // client's request for the UntagResource operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3291,7 +3291,7 @@ const opUpdateContinuousBackups = "UpdateContinuousBackups" // UpdateContinuousBackupsRequest generates a "aws/request.Request" representing the // client's request for the UpdateContinuousBackups operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3387,7 +3387,7 @@ const opUpdateGlobalTable = "UpdateGlobalTable" // UpdateGlobalTableRequest generates a "aws/request.Request" representing the // client's request for the UpdateGlobalTable operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3498,7 +3498,7 @@ const opUpdateGlobalTableSettings = "UpdateGlobalTableSettings" // UpdateGlobalTableSettingsRequest generates a "aws/request.Request" representing the // client's request for the UpdateGlobalTableSettings operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3604,7 +3604,7 @@ const opUpdateItem = "UpdateItem" // UpdateItemRequest generates a "aws/request.Request" representing the // client's request for the UpdateItem operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3709,7 +3709,7 @@ const opUpdateTable = "UpdateTable" // UpdateTableRequest generates a "aws/request.Request" representing the // client's request for the UpdateTable operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3827,7 +3827,7 @@ const opUpdateTimeToLive = "UpdateTimeToLive" // UpdateTimeToLiveRequest generates a "aws/request.Request" representing the // client's request for the UpdateTimeToLive operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 5e18932db..bc96eea57 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -18,7 +18,7 @@ const opAcceptReservedInstancesExchangeQuote = "AcceptReservedInstancesExchangeQ // AcceptReservedInstancesExchangeQuoteRequest generates a "aws/request.Request" representing the // client's request for the AcceptReservedInstancesExchangeQuote operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -93,7 +93,7 @@ const opAcceptVpcEndpointConnections = "AcceptVpcEndpointConnections" // AcceptVpcEndpointConnectionsRequest generates a "aws/request.Request" representing the // client's request for the AcceptVpcEndpointConnections operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -168,7 +168,7 @@ const opAcceptVpcPeeringConnection = "AcceptVpcPeeringConnection" // AcceptVpcPeeringConnectionRequest generates a "aws/request.Request" representing the // client's request for the AcceptVpcPeeringConnection operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -248,7 +248,7 @@ const opAllocateAddress = "AllocateAddress" // AllocateAddressRequest generates a "aws/request.Request" representing the // client's request for the AllocateAddress operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -338,7 +338,7 @@ const opAllocateHosts = "AllocateHosts" // AllocateHostsRequest generates a "aws/request.Request" representing the // client's request for the AllocateHosts operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -413,7 +413,7 @@ const opAssignIpv6Addresses = "AssignIpv6Addresses" // AssignIpv6AddressesRequest generates a "aws/request.Request" representing the // client's request for the AssignIpv6Addresses operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -494,7 +494,7 @@ const opAssignPrivateIpAddresses = "AssignPrivateIpAddresses" // AssignPrivateIpAddressesRequest generates a "aws/request.Request" representing the // client's request for the AssignPrivateIpAddresses operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -580,7 +580,7 @@ const opAssociateAddress = "AssociateAddress" // AssociateAddressRequest generates a "aws/request.Request" representing the // client's request for the AssociateAddress operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -678,7 +678,7 @@ const opAssociateDhcpOptions = "AssociateDhcpOptions" // AssociateDhcpOptionsRequest generates a "aws/request.Request" representing the // client's request for the AssociateDhcpOptions operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -765,7 +765,7 @@ const opAssociateIamInstanceProfile = "AssociateIamInstanceProfile" // AssociateIamInstanceProfileRequest generates a "aws/request.Request" representing the // client's request for the AssociateIamInstanceProfile operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -840,7 +840,7 @@ const opAssociateRouteTable = "AssociateRouteTable" // AssociateRouteTableRequest generates a "aws/request.Request" representing the // client's request for the AssociateRouteTable operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -921,7 +921,7 @@ const opAssociateSubnetCidrBlock = "AssociateSubnetCidrBlock" // AssociateSubnetCidrBlockRequest generates a "aws/request.Request" representing the // client's request for the AssociateSubnetCidrBlock operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -997,7 +997,7 @@ const opAssociateVpcCidrBlock = "AssociateVpcCidrBlock" // AssociateVpcCidrBlockRequest generates a "aws/request.Request" representing the // client's request for the AssociateVpcCidrBlock operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1077,7 +1077,7 @@ const opAttachClassicLinkVpc = "AttachClassicLinkVpc" // AttachClassicLinkVpcRequest generates a "aws/request.Request" representing the // client's request for the AttachClassicLinkVpc operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1162,7 +1162,7 @@ const opAttachInternetGateway = "AttachInternetGateway" // AttachInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the AttachInternetGateway operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1240,7 +1240,7 @@ const opAttachNetworkInterface = "AttachNetworkInterface" // AttachNetworkInterfaceRequest generates a "aws/request.Request" representing the // client's request for the AttachNetworkInterface operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1314,7 +1314,7 @@ const opAttachVolume = "AttachVolume" // AttachVolumeRequest generates a "aws/request.Request" representing the // client's request for the AttachVolume operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1415,7 +1415,7 @@ const opAttachVpnGateway = "AttachVpnGateway" // AttachVpnGatewayRequest generates a "aws/request.Request" representing the // client's request for the AttachVpnGateway operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1493,7 +1493,7 @@ const opAuthorizeSecurityGroupEgress = "AuthorizeSecurityGroupEgress" // AuthorizeSecurityGroupEgressRequest generates a "aws/request.Request" representing the // client's request for the AuthorizeSecurityGroupEgress operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1586,7 +1586,7 @@ const opAuthorizeSecurityGroupIngress = "AuthorizeSecurityGroupIngress" // AuthorizeSecurityGroupIngressRequest generates a "aws/request.Request" representing the // client's request for the AuthorizeSecurityGroupIngress operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1680,7 +1680,7 @@ const opBundleInstance = "BundleInstance" // BundleInstanceRequest generates a "aws/request.Request" representing the // client's request for the BundleInstance operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1760,7 +1760,7 @@ const opCancelBundleTask = "CancelBundleTask" // CancelBundleTaskRequest generates a "aws/request.Request" representing the // client's request for the CancelBundleTask operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1834,7 +1834,7 @@ const opCancelConversionTask = "CancelConversionTask" // CancelConversionTaskRequest generates a "aws/request.Request" representing the // client's request for the CancelConversionTask operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1917,7 +1917,7 @@ const opCancelExportTask = "CancelExportTask" // CancelExportTaskRequest generates a "aws/request.Request" representing the // client's request for the CancelExportTask operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1996,7 +1996,7 @@ const opCancelImportTask = "CancelImportTask" // CancelImportTaskRequest generates a "aws/request.Request" representing the // client's request for the CancelImportTask operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2070,7 +2070,7 @@ const opCancelReservedInstancesListing = "CancelReservedInstancesListing" // CancelReservedInstancesListingRequest generates a "aws/request.Request" representing the // client's request for the CancelReservedInstancesListing operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2148,7 +2148,7 @@ const opCancelSpotFleetRequests = "CancelSpotFleetRequests" // CancelSpotFleetRequestsRequest generates a "aws/request.Request" representing the // client's request for the CancelSpotFleetRequests operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2229,7 +2229,7 @@ const opCancelSpotInstanceRequests = "CancelSpotInstanceRequests" // CancelSpotInstanceRequestsRequest generates a "aws/request.Request" representing the // client's request for the CancelSpotInstanceRequests operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2306,7 +2306,7 @@ const opConfirmProductInstance = "ConfirmProductInstance" // ConfirmProductInstanceRequest generates a "aws/request.Request" representing the // client's request for the ConfirmProductInstance operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2382,7 +2382,7 @@ const opCopyFpgaImage = "CopyFpgaImage" // CopyFpgaImageRequest generates a "aws/request.Request" representing the // client's request for the CopyFpgaImage operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2456,7 +2456,7 @@ const opCopyImage = "CopyImage" // CopyImageRequest generates a "aws/request.Request" representing the // client's request for the CopyImage operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2541,7 +2541,7 @@ const opCopySnapshot = "CopySnapshot" // CopySnapshotRequest generates a "aws/request.Request" representing the // client's request for the CopySnapshot operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2634,7 +2634,7 @@ const opCreateCustomerGateway = "CreateCustomerGateway" // CreateCustomerGatewayRequest generates a "aws/request.Request" representing the // client's request for the CreateCustomerGateway operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2732,7 +2732,7 @@ const opCreateDefaultSubnet = "CreateDefaultSubnet" // CreateDefaultSubnetRequest generates a "aws/request.Request" representing the // client's request for the CreateDefaultSubnet operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2810,7 +2810,7 @@ const opCreateDefaultVpc = "CreateDefaultVpc" // CreateDefaultVpcRequest generates a "aws/request.Request" representing the // client's request for the CreateDefaultVpc operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2896,7 +2896,7 @@ const opCreateDhcpOptions = "CreateDhcpOptions" // CreateDhcpOptionsRequest generates a "aws/request.Request" representing the // client's request for the CreateDhcpOptions operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3008,7 +3008,7 @@ const opCreateEgressOnlyInternetGateway = "CreateEgressOnlyInternetGateway" // CreateEgressOnlyInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the CreateEgressOnlyInternetGateway operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3085,7 +3085,7 @@ const opCreateFleet = "CreateFleet" // CreateFleetRequest generates a "aws/request.Request" representing the // client's request for the CreateFleet operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3165,7 +3165,7 @@ const opCreateFlowLogs = "CreateFlowLogs" // CreateFlowLogsRequest generates a "aws/request.Request" representing the // client's request for the CreateFlowLogs operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3254,7 +3254,7 @@ const opCreateFpgaImage = "CreateFpgaImage" // CreateFpgaImageRequest generates a "aws/request.Request" representing the // client's request for the CreateFpgaImage operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3335,7 +3335,7 @@ const opCreateImage = "CreateImage" // CreateImageRequest generates a "aws/request.Request" representing the // client's request for the CreateImage operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3418,7 +3418,7 @@ const opCreateInstanceExportTask = "CreateInstanceExportTask" // CreateInstanceExportTaskRequest generates a "aws/request.Request" representing the // client's request for the CreateInstanceExportTask operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3497,7 +3497,7 @@ const opCreateInternetGateway = "CreateInternetGateway" // CreateInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the CreateInternetGateway operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3575,7 +3575,7 @@ const opCreateKeyPair = "CreateKeyPair" // CreateKeyPairRequest generates a "aws/request.Request" representing the // client's request for the CreateKeyPair operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3661,7 +3661,7 @@ const opCreateLaunchTemplate = "CreateLaunchTemplate" // CreateLaunchTemplateRequest generates a "aws/request.Request" representing the // client's request for the CreateLaunchTemplate operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3737,7 +3737,7 @@ const opCreateLaunchTemplateVersion = "CreateLaunchTemplateVersion" // CreateLaunchTemplateVersionRequest generates a "aws/request.Request" representing the // client's request for the CreateLaunchTemplateVersion operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3815,7 +3815,7 @@ const opCreateNatGateway = "CreateNatGateway" // CreateNatGatewayRequest generates a "aws/request.Request" representing the // client's request for the CreateNatGateway operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3895,7 +3895,7 @@ const opCreateNetworkAcl = "CreateNetworkAcl" // CreateNetworkAclRequest generates a "aws/request.Request" representing the // client's request for the CreateNetworkAcl operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3973,7 +3973,7 @@ const opCreateNetworkAclEntry = "CreateNetworkAclEntry" // CreateNetworkAclEntryRequest generates a "aws/request.Request" representing the // client's request for the CreateNetworkAclEntry operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4065,7 +4065,7 @@ const opCreateNetworkInterface = "CreateNetworkInterface" // CreateNetworkInterfaceRequest generates a "aws/request.Request" representing the // client's request for the CreateNetworkInterface operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4143,7 +4143,7 @@ const opCreateNetworkInterfacePermission = "CreateNetworkInterfacePermission" // CreateNetworkInterfacePermissionRequest generates a "aws/request.Request" representing the // client's request for the CreateNetworkInterfacePermission operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4221,7 +4221,7 @@ const opCreatePlacementGroup = "CreatePlacementGroup" // CreatePlacementGroupRequest generates a "aws/request.Request" representing the // client's request for the CreatePlacementGroup operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4305,7 +4305,7 @@ const opCreateReservedInstancesListing = "CreateReservedInstancesListing" // CreateReservedInstancesListingRequest generates a "aws/request.Request" representing the // client's request for the CreateReservedInstancesListing operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4402,7 +4402,7 @@ const opCreateRoute = "CreateRoute" // CreateRouteRequest generates a "aws/request.Request" representing the // client's request for the CreateRoute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4495,7 +4495,7 @@ const opCreateRouteTable = "CreateRouteTable" // CreateRouteTableRequest generates a "aws/request.Request" representing the // client's request for the CreateRouteTable operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4573,7 +4573,7 @@ const opCreateSecurityGroup = "CreateSecurityGroup" // CreateSecurityGroupRequest generates a "aws/request.Request" representing the // client's request for the CreateSecurityGroup operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4673,7 +4673,7 @@ const opCreateSnapshot = "CreateSnapshot" // CreateSnapshotRequest generates a "aws/request.Request" representing the // client's request for the CreateSnapshot operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4778,7 +4778,7 @@ const opCreateSpotDatafeedSubscription = "CreateSpotDatafeedSubscription" // CreateSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the // client's request for the CreateSpotDatafeedSubscription operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4855,7 +4855,7 @@ const opCreateSubnet = "CreateSubnet" // CreateSubnetRequest generates a "aws/request.Request" representing the // client's request for the CreateSubnet operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4955,7 +4955,7 @@ const opCreateTags = "CreateTags" // CreateTagsRequest generates a "aws/request.Request" representing the // client's request for the CreateTags operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5039,7 +5039,7 @@ const opCreateVolume = "CreateVolume" // CreateVolumeRequest generates a "aws/request.Request" representing the // client's request for the CreateVolume operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5132,7 +5132,7 @@ const opCreateVpc = "CreateVpc" // CreateVpcRequest generates a "aws/request.Request" representing the // client's request for the CreateVpc operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5224,7 +5224,7 @@ const opCreateVpcEndpoint = "CreateVpcEndpoint" // CreateVpcEndpointRequest generates a "aws/request.Request" representing the // client's request for the CreateVpcEndpoint operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5314,7 +5314,7 @@ const opCreateVpcEndpointConnectionNotification = "CreateVpcEndpointConnectionNo // CreateVpcEndpointConnectionNotificationRequest generates a "aws/request.Request" representing the // client's request for the CreateVpcEndpointConnectionNotification operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5394,7 +5394,7 @@ const opCreateVpcEndpointServiceConfiguration = "CreateVpcEndpointServiceConfigu // CreateVpcEndpointServiceConfigurationRequest generates a "aws/request.Request" representing the // client's request for the CreateVpcEndpointServiceConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5475,7 +5475,7 @@ const opCreateVpcPeeringConnection = "CreateVpcPeeringConnection" // CreateVpcPeeringConnectionRequest generates a "aws/request.Request" representing the // client's request for the CreateVpcPeeringConnection operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5564,7 +5564,7 @@ const opCreateVpnConnection = "CreateVpnConnection" // CreateVpnConnectionRequest generates a "aws/request.Request" representing the // client's request for the CreateVpnConnection operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5656,7 +5656,7 @@ const opCreateVpnConnectionRoute = "CreateVpnConnectionRoute" // CreateVpnConnectionRouteRequest generates a "aws/request.Request" representing the // client's request for the CreateVpnConnectionRoute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5739,7 +5739,7 @@ const opCreateVpnGateway = "CreateVpnGateway" // CreateVpnGatewayRequest generates a "aws/request.Request" representing the // client's request for the CreateVpnGateway operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5819,7 +5819,7 @@ const opDeleteCustomerGateway = "DeleteCustomerGateway" // DeleteCustomerGatewayRequest generates a "aws/request.Request" representing the // client's request for the DeleteCustomerGateway operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5896,7 +5896,7 @@ const opDeleteDhcpOptions = "DeleteDhcpOptions" // DeleteDhcpOptionsRequest generates a "aws/request.Request" representing the // client's request for the DeleteDhcpOptions operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5975,7 +5975,7 @@ const opDeleteEgressOnlyInternetGateway = "DeleteEgressOnlyInternetGateway" // DeleteEgressOnlyInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the DeleteEgressOnlyInternetGateway operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6049,7 +6049,7 @@ const opDeleteFleets = "DeleteFleets" // DeleteFleetsRequest generates a "aws/request.Request" representing the // client's request for the DeleteFleets operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6129,7 +6129,7 @@ const opDeleteFlowLogs = "DeleteFlowLogs" // DeleteFlowLogsRequest generates a "aws/request.Request" representing the // client's request for the DeleteFlowLogs operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6203,7 +6203,7 @@ const opDeleteFpgaImage = "DeleteFpgaImage" // DeleteFpgaImageRequest generates a "aws/request.Request" representing the // client's request for the DeleteFpgaImage operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6277,7 +6277,7 @@ const opDeleteInternetGateway = "DeleteInternetGateway" // DeleteInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the DeleteInternetGateway operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6354,7 +6354,7 @@ const opDeleteKeyPair = "DeleteKeyPair" // DeleteKeyPairRequest generates a "aws/request.Request" representing the // client's request for the DeleteKeyPair operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6430,7 +6430,7 @@ const opDeleteLaunchTemplate = "DeleteLaunchTemplate" // DeleteLaunchTemplateRequest generates a "aws/request.Request" representing the // client's request for the DeleteLaunchTemplate operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6505,7 +6505,7 @@ const opDeleteLaunchTemplateVersions = "DeleteLaunchTemplateVersions" // DeleteLaunchTemplateVersionsRequest generates a "aws/request.Request" representing the // client's request for the DeleteLaunchTemplateVersions operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6582,7 +6582,7 @@ const opDeleteNatGateway = "DeleteNatGateway" // DeleteNatGatewayRequest generates a "aws/request.Request" representing the // client's request for the DeleteNatGateway operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6658,7 +6658,7 @@ const opDeleteNetworkAcl = "DeleteNetworkAcl" // DeleteNetworkAclRequest generates a "aws/request.Request" representing the // client's request for the DeleteNetworkAcl operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6735,7 +6735,7 @@ const opDeleteNetworkAclEntry = "DeleteNetworkAclEntry" // DeleteNetworkAclEntryRequest generates a "aws/request.Request" representing the // client's request for the DeleteNetworkAclEntry operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6812,7 +6812,7 @@ const opDeleteNetworkInterface = "DeleteNetworkInterface" // DeleteNetworkInterfaceRequest generates a "aws/request.Request" representing the // client's request for the DeleteNetworkInterface operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6889,7 +6889,7 @@ const opDeleteNetworkInterfacePermission = "DeleteNetworkInterfacePermission" // DeleteNetworkInterfacePermissionRequest generates a "aws/request.Request" representing the // client's request for the DeleteNetworkInterfacePermission operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6966,7 +6966,7 @@ const opDeletePlacementGroup = "DeletePlacementGroup" // DeletePlacementGroupRequest generates a "aws/request.Request" representing the // client's request for the DeletePlacementGroup operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7045,7 +7045,7 @@ const opDeleteRoute = "DeleteRoute" // DeleteRouteRequest generates a "aws/request.Request" representing the // client's request for the DeleteRoute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7121,7 +7121,7 @@ const opDeleteRouteTable = "DeleteRouteTable" // DeleteRouteTableRequest generates a "aws/request.Request" representing the // client's request for the DeleteRouteTable operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7199,7 +7199,7 @@ const opDeleteSecurityGroup = "DeleteSecurityGroup" // DeleteSecurityGroupRequest generates a "aws/request.Request" representing the // client's request for the DeleteSecurityGroup operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7279,7 +7279,7 @@ const opDeleteSnapshot = "DeleteSnapshot" // DeleteSnapshotRequest generates a "aws/request.Request" representing the // client's request for the DeleteSnapshot operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7369,7 +7369,7 @@ const opDeleteSpotDatafeedSubscription = "DeleteSpotDatafeedSubscription" // DeleteSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the // client's request for the DeleteSpotDatafeedSubscription operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7445,7 +7445,7 @@ const opDeleteSubnet = "DeleteSubnet" // DeleteSubnetRequest generates a "aws/request.Request" representing the // client's request for the DeleteSubnet operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7522,7 +7522,7 @@ const opDeleteTags = "DeleteTags" // DeleteTagsRequest generates a "aws/request.Request" representing the // client's request for the DeleteTags operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7602,7 +7602,7 @@ const opDeleteVolume = "DeleteVolume" // DeleteVolumeRequest generates a "aws/request.Request" representing the // client's request for the DeleteVolume operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7684,7 +7684,7 @@ const opDeleteVpc = "DeleteVpc" // DeleteVpcRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpc operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7764,7 +7764,7 @@ const opDeleteVpcEndpointConnectionNotifications = "DeleteVpcEndpointConnectionN // DeleteVpcEndpointConnectionNotificationsRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpcEndpointConnectionNotifications operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7838,7 +7838,7 @@ const opDeleteVpcEndpointServiceConfigurations = "DeleteVpcEndpointServiceConfig // DeleteVpcEndpointServiceConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpcEndpointServiceConfigurations operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7915,7 +7915,7 @@ const opDeleteVpcEndpoints = "DeleteVpcEndpoints" // DeleteVpcEndpointsRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpcEndpoints operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7992,7 +7992,7 @@ const opDeleteVpcPeeringConnection = "DeleteVpcPeeringConnection" // DeleteVpcPeeringConnectionRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpcPeeringConnection operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8070,7 +8070,7 @@ const opDeleteVpnConnection = "DeleteVpnConnection" // DeleteVpnConnectionRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpnConnection operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8155,7 +8155,7 @@ const opDeleteVpnConnectionRoute = "DeleteVpnConnectionRoute" // DeleteVpnConnectionRouteRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpnConnectionRoute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8234,7 +8234,7 @@ const opDeleteVpnGateway = "DeleteVpnGateway" // DeleteVpnGatewayRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpnGateway operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8314,7 +8314,7 @@ const opDeregisterImage = "DeregisterImage" // DeregisterImageRequest generates a "aws/request.Request" representing the // client's request for the DeregisterImage operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8398,7 +8398,7 @@ const opDescribeAccountAttributes = "DescribeAccountAttributes" // DescribeAccountAttributesRequest generates a "aws/request.Request" representing the // client's request for the DescribeAccountAttributes operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8490,7 +8490,7 @@ const opDescribeAddresses = "DescribeAddresses" // DescribeAddressesRequest generates a "aws/request.Request" representing the // client's request for the DescribeAddresses operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8568,7 +8568,7 @@ const opDescribeAggregateIdFormat = "DescribeAggregateIdFormat" // DescribeAggregateIdFormatRequest generates a "aws/request.Request" representing the // client's request for the DescribeAggregateIdFormat operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8656,7 +8656,7 @@ const opDescribeAvailabilityZones = "DescribeAvailabilityZones" // DescribeAvailabilityZonesRequest generates a "aws/request.Request" representing the // client's request for the DescribeAvailabilityZones operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8736,7 +8736,7 @@ const opDescribeBundleTasks = "DescribeBundleTasks" // DescribeBundleTasksRequest generates a "aws/request.Request" representing the // client's request for the DescribeBundleTasks operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8815,7 +8815,7 @@ const opDescribeClassicLinkInstances = "DescribeClassicLinkInstances" // DescribeClassicLinkInstancesRequest generates a "aws/request.Request" representing the // client's request for the DescribeClassicLinkInstances operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8892,7 +8892,7 @@ const opDescribeConversionTasks = "DescribeConversionTasks" // DescribeConversionTasksRequest generates a "aws/request.Request" representing the // client's request for the DescribeConversionTasks operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8970,7 +8970,7 @@ const opDescribeCustomerGateways = "DescribeCustomerGateways" // DescribeCustomerGatewaysRequest generates a "aws/request.Request" representing the // client's request for the DescribeCustomerGateways operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9048,7 +9048,7 @@ const opDescribeDhcpOptions = "DescribeDhcpOptions" // DescribeDhcpOptionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeDhcpOptions operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9125,7 +9125,7 @@ const opDescribeEgressOnlyInternetGateways = "DescribeEgressOnlyInternetGateways // DescribeEgressOnlyInternetGatewaysRequest generates a "aws/request.Request" representing the // client's request for the DescribeEgressOnlyInternetGateways operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9199,7 +9199,7 @@ const opDescribeElasticGpus = "DescribeElasticGpus" // DescribeElasticGpusRequest generates a "aws/request.Request" representing the // client's request for the DescribeElasticGpus operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9274,7 +9274,7 @@ const opDescribeExportTasks = "DescribeExportTasks" // DescribeExportTasksRequest generates a "aws/request.Request" representing the // client's request for the DescribeExportTasks operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9348,7 +9348,7 @@ const opDescribeFleetHistory = "DescribeFleetHistory" // DescribeFleetHistoryRequest generates a "aws/request.Request" representing the // client's request for the DescribeFleetHistory operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9422,7 +9422,7 @@ const opDescribeFleetInstances = "DescribeFleetInstances" // DescribeFleetInstancesRequest generates a "aws/request.Request" representing the // client's request for the DescribeFleetInstances operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9496,7 +9496,7 @@ const opDescribeFleets = "DescribeFleets" // DescribeFleetsRequest generates a "aws/request.Request" representing the // client's request for the DescribeFleets operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9570,7 +9570,7 @@ const opDescribeFlowLogs = "DescribeFlowLogs" // DescribeFlowLogsRequest generates a "aws/request.Request" representing the // client's request for the DescribeFlowLogs operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9646,7 +9646,7 @@ const opDescribeFpgaImageAttribute = "DescribeFpgaImageAttribute" // DescribeFpgaImageAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeFpgaImageAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9720,7 +9720,7 @@ const opDescribeFpgaImages = "DescribeFpgaImages" // DescribeFpgaImagesRequest generates a "aws/request.Request" representing the // client's request for the DescribeFpgaImages operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9796,7 +9796,7 @@ const opDescribeHostReservationOfferings = "DescribeHostReservationOfferings" // DescribeHostReservationOfferingsRequest generates a "aws/request.Request" representing the // client's request for the DescribeHostReservationOfferings operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9878,7 +9878,7 @@ const opDescribeHostReservations = "DescribeHostReservations" // DescribeHostReservationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeHostReservations operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9952,7 +9952,7 @@ const opDescribeHosts = "DescribeHosts" // DescribeHostsRequest generates a "aws/request.Request" representing the // client's request for the DescribeHosts operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10030,7 +10030,7 @@ const opDescribeIamInstanceProfileAssociations = "DescribeIamInstanceProfileAsso // DescribeIamInstanceProfileAssociationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeIamInstanceProfileAssociations operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10104,7 +10104,7 @@ const opDescribeIdFormat = "DescribeIdFormat" // DescribeIdFormatRequest generates a "aws/request.Request" representing the // client's request for the DescribeIdFormat operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10196,7 +10196,7 @@ const opDescribeIdentityIdFormat = "DescribeIdentityIdFormat" // DescribeIdentityIdFormatRequest generates a "aws/request.Request" representing the // client's request for the DescribeIdentityIdFormat operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10286,7 +10286,7 @@ const opDescribeImageAttribute = "DescribeImageAttribute" // DescribeImageAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeImageAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10361,7 +10361,7 @@ const opDescribeImages = "DescribeImages" // DescribeImagesRequest generates a "aws/request.Request" representing the // client's request for the DescribeImages operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10441,7 +10441,7 @@ const opDescribeImportImageTasks = "DescribeImportImageTasks" // DescribeImportImageTasksRequest generates a "aws/request.Request" representing the // client's request for the DescribeImportImageTasks operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10516,7 +10516,7 @@ const opDescribeImportSnapshotTasks = "DescribeImportSnapshotTasks" // DescribeImportSnapshotTasksRequest generates a "aws/request.Request" representing the // client's request for the DescribeImportSnapshotTasks operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10590,7 +10590,7 @@ const opDescribeInstanceAttribute = "DescribeInstanceAttribute" // DescribeInstanceAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeInstanceAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10668,7 +10668,7 @@ const opDescribeInstanceCreditSpecifications = "DescribeInstanceCreditSpecificat // DescribeInstanceCreditSpecificationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeInstanceCreditSpecifications operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10765,7 +10765,7 @@ const opDescribeInstanceStatus = "DescribeInstanceStatus" // DescribeInstanceStatusRequest generates a "aws/request.Request" representing the // client's request for the DescribeInstanceStatus operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10916,7 +10916,7 @@ const opDescribeInstances = "DescribeInstances" // DescribeInstancesRequest generates a "aws/request.Request" representing the // client's request for the DescribeInstances operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11061,7 +11061,7 @@ const opDescribeInternetGateways = "DescribeInternetGateways" // DescribeInternetGatewaysRequest generates a "aws/request.Request" representing the // client's request for the DescribeInternetGateways operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11135,7 +11135,7 @@ const opDescribeKeyPairs = "DescribeKeyPairs" // DescribeKeyPairsRequest generates a "aws/request.Request" representing the // client's request for the DescribeKeyPairs operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11212,7 +11212,7 @@ const opDescribeLaunchTemplateVersions = "DescribeLaunchTemplateVersions" // DescribeLaunchTemplateVersionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeLaunchTemplateVersions operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11287,7 +11287,7 @@ const opDescribeLaunchTemplates = "DescribeLaunchTemplates" // DescribeLaunchTemplatesRequest generates a "aws/request.Request" representing the // client's request for the DescribeLaunchTemplates operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11361,7 +11361,7 @@ const opDescribeMovingAddresses = "DescribeMovingAddresses" // DescribeMovingAddressesRequest generates a "aws/request.Request" representing the // client's request for the DescribeMovingAddresses operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11437,7 +11437,7 @@ const opDescribeNatGateways = "DescribeNatGateways" // DescribeNatGatewaysRequest generates a "aws/request.Request" representing the // client's request for the DescribeNatGateways operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11567,7 +11567,7 @@ const opDescribeNetworkAcls = "DescribeNetworkAcls" // DescribeNetworkAclsRequest generates a "aws/request.Request" representing the // client's request for the DescribeNetworkAcls operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11644,7 +11644,7 @@ const opDescribeNetworkInterfaceAttribute = "DescribeNetworkInterfaceAttribute" // DescribeNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeNetworkInterfaceAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11719,7 +11719,7 @@ const opDescribeNetworkInterfacePermissions = "DescribeNetworkInterfacePermissio // DescribeNetworkInterfacePermissionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeNetworkInterfacePermissions operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11793,7 +11793,7 @@ const opDescribeNetworkInterfaces = "DescribeNetworkInterfaces" // DescribeNetworkInterfacesRequest generates a "aws/request.Request" representing the // client's request for the DescribeNetworkInterfaces operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11923,7 +11923,7 @@ const opDescribePlacementGroups = "DescribePlacementGroups" // DescribePlacementGroupsRequest generates a "aws/request.Request" representing the // client's request for the DescribePlacementGroups operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11999,7 +11999,7 @@ const opDescribePrefixLists = "DescribePrefixLists" // DescribePrefixListsRequest generates a "aws/request.Request" representing the // client's request for the DescribePrefixLists operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12078,7 +12078,7 @@ const opDescribePrincipalIdFormat = "DescribePrincipalIdFormat" // DescribePrincipalIdFormatRequest generates a "aws/request.Request" representing the // client's request for the DescribePrincipalIdFormat operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12166,7 +12166,7 @@ const opDescribeRegions = "DescribeRegions" // DescribeRegionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeRegions operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12243,7 +12243,7 @@ const opDescribeReservedInstances = "DescribeReservedInstances" // DescribeReservedInstancesRequest generates a "aws/request.Request" representing the // client's request for the DescribeReservedInstances operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12320,7 +12320,7 @@ const opDescribeReservedInstancesListings = "DescribeReservedInstancesListings" // DescribeReservedInstancesListingsRequest generates a "aws/request.Request" representing the // client's request for the DescribeReservedInstancesListings operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12415,7 +12415,7 @@ const opDescribeReservedInstancesModifications = "DescribeReservedInstancesModif // DescribeReservedInstancesModificationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeReservedInstancesModifications operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12551,7 +12551,7 @@ const opDescribeReservedInstancesOfferings = "DescribeReservedInstancesOfferings // DescribeReservedInstancesOfferingsRequest generates a "aws/request.Request" representing the // client's request for the DescribeReservedInstancesOfferings operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12692,7 +12692,7 @@ const opDescribeRouteTables = "DescribeRouteTables" // DescribeRouteTablesRequest generates a "aws/request.Request" representing the // client's request for the DescribeRouteTables operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12774,7 +12774,7 @@ const opDescribeScheduledInstanceAvailability = "DescribeScheduledInstanceAvaila // DescribeScheduledInstanceAvailabilityRequest generates a "aws/request.Request" representing the // client's request for the DescribeScheduledInstanceAvailability operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12856,7 +12856,7 @@ const opDescribeScheduledInstances = "DescribeScheduledInstances" // DescribeScheduledInstancesRequest generates a "aws/request.Request" representing the // client's request for the DescribeScheduledInstances operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12930,7 +12930,7 @@ const opDescribeSecurityGroupReferences = "DescribeSecurityGroupReferences" // DescribeSecurityGroupReferencesRequest generates a "aws/request.Request" representing the // client's request for the DescribeSecurityGroupReferences operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13005,7 +13005,7 @@ const opDescribeSecurityGroups = "DescribeSecurityGroups" // DescribeSecurityGroupsRequest generates a "aws/request.Request" representing the // client's request for the DescribeSecurityGroups operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13086,7 +13086,7 @@ const opDescribeSnapshotAttribute = "DescribeSnapshotAttribute" // DescribeSnapshotAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeSnapshotAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13164,7 +13164,7 @@ const opDescribeSnapshots = "DescribeSnapshots" // DescribeSnapshotsRequest generates a "aws/request.Request" representing the // client's request for the DescribeSnapshots operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13339,7 +13339,7 @@ const opDescribeSpotDatafeedSubscription = "DescribeSpotDatafeedSubscription" // DescribeSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotDatafeedSubscription operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13415,7 +13415,7 @@ const opDescribeSpotFleetInstances = "DescribeSpotFleetInstances" // DescribeSpotFleetInstancesRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotFleetInstances operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13489,7 +13489,7 @@ const opDescribeSpotFleetRequestHistory = "DescribeSpotFleetRequestHistory" // DescribeSpotFleetRequestHistoryRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotFleetRequestHistory operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13568,7 +13568,7 @@ const opDescribeSpotFleetRequests = "DescribeSpotFleetRequests" // DescribeSpotFleetRequestsRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotFleetRequests operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13701,7 +13701,7 @@ const opDescribeSpotInstanceRequests = "DescribeSpotInstanceRequests" // DescribeSpotInstanceRequestsRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotInstanceRequests operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13784,7 +13784,7 @@ const opDescribeSpotPriceHistory = "DescribeSpotPriceHistory" // DescribeSpotPriceHistoryRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotPriceHistory operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13921,7 +13921,7 @@ const opDescribeStaleSecurityGroups = "DescribeStaleSecurityGroups" // DescribeStaleSecurityGroupsRequest generates a "aws/request.Request" representing the // client's request for the DescribeStaleSecurityGroups operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13998,7 +13998,7 @@ const opDescribeSubnets = "DescribeSubnets" // DescribeSubnetsRequest generates a "aws/request.Request" representing the // client's request for the DescribeSubnets operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -14075,7 +14075,7 @@ const opDescribeTags = "DescribeTags" // DescribeTagsRequest generates a "aws/request.Request" representing the // client's request for the DescribeTags operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -14208,7 +14208,7 @@ const opDescribeVolumeAttribute = "DescribeVolumeAttribute" // DescribeVolumeAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeVolumeAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -14286,7 +14286,7 @@ const opDescribeVolumeStatus = "DescribeVolumeStatus" // DescribeVolumeStatusRequest generates a "aws/request.Request" representing the // client's request for the DescribeVolumeStatus operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -14451,7 +14451,7 @@ const opDescribeVolumes = "DescribeVolumes" // DescribeVolumesRequest generates a "aws/request.Request" representing the // client's request for the DescribeVolumes operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -14591,7 +14591,7 @@ const opDescribeVolumesModifications = "DescribeVolumesModifications" // DescribeVolumesModificationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVolumesModifications operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -14678,7 +14678,7 @@ const opDescribeVpcAttribute = "DescribeVpcAttribute" // DescribeVpcAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -14753,7 +14753,7 @@ const opDescribeVpcClassicLink = "DescribeVpcClassicLink" // DescribeVpcClassicLinkRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcClassicLink operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -14827,7 +14827,7 @@ const opDescribeVpcClassicLinkDnsSupport = "DescribeVpcClassicLinkDnsSupport" // DescribeVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcClassicLinkDnsSupport operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -14907,7 +14907,7 @@ const opDescribeVpcEndpointConnectionNotifications = "DescribeVpcEndpointConnect // DescribeVpcEndpointConnectionNotificationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcEndpointConnectionNotifications operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -14982,7 +14982,7 @@ const opDescribeVpcEndpointConnections = "DescribeVpcEndpointConnections" // DescribeVpcEndpointConnectionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcEndpointConnections operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -15057,7 +15057,7 @@ const opDescribeVpcEndpointServiceConfigurations = "DescribeVpcEndpointServiceCo // DescribeVpcEndpointServiceConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcEndpointServiceConfigurations operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -15131,7 +15131,7 @@ const opDescribeVpcEndpointServicePermissions = "DescribeVpcEndpointServicePermi // DescribeVpcEndpointServicePermissionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcEndpointServicePermissions operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -15206,7 +15206,7 @@ const opDescribeVpcEndpointServices = "DescribeVpcEndpointServices" // DescribeVpcEndpointServicesRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcEndpointServices operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -15280,7 +15280,7 @@ const opDescribeVpcEndpoints = "DescribeVpcEndpoints" // DescribeVpcEndpointsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcEndpoints operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -15354,7 +15354,7 @@ const opDescribeVpcPeeringConnections = "DescribeVpcPeeringConnections" // DescribeVpcPeeringConnectionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcPeeringConnections operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -15428,7 +15428,7 @@ const opDescribeVpcs = "DescribeVpcs" // DescribeVpcsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcs operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -15502,7 +15502,7 @@ const opDescribeVpnConnections = "DescribeVpnConnections" // DescribeVpnConnectionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpnConnections operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -15580,7 +15580,7 @@ const opDescribeVpnGateways = "DescribeVpnGateways" // DescribeVpnGatewaysRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpnGateways operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -15658,7 +15658,7 @@ const opDetachClassicLinkVpc = "DetachClassicLinkVpc" // DetachClassicLinkVpcRequest generates a "aws/request.Request" representing the // client's request for the DetachClassicLinkVpc operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -15734,7 +15734,7 @@ const opDetachInternetGateway = "DetachInternetGateway" // DetachInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the DetachInternetGateway operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -15812,7 +15812,7 @@ const opDetachNetworkInterface = "DetachNetworkInterface" // DetachNetworkInterfaceRequest generates a "aws/request.Request" representing the // client's request for the DetachNetworkInterface operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -15888,7 +15888,7 @@ const opDetachVolume = "DetachVolume" // DetachVolumeRequest generates a "aws/request.Request" representing the // client's request for the DetachVolume operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -15975,7 +15975,7 @@ const opDetachVpnGateway = "DetachVpnGateway" // DetachVpnGatewayRequest generates a "aws/request.Request" representing the // client's request for the DetachVpnGateway operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -16058,7 +16058,7 @@ const opDisableVgwRoutePropagation = "DisableVgwRoutePropagation" // DisableVgwRoutePropagationRequest generates a "aws/request.Request" representing the // client's request for the DisableVgwRoutePropagation operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -16135,7 +16135,7 @@ const opDisableVpcClassicLink = "DisableVpcClassicLink" // DisableVpcClassicLinkRequest generates a "aws/request.Request" representing the // client's request for the DisableVpcClassicLink operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -16210,7 +16210,7 @@ const opDisableVpcClassicLinkDnsSupport = "DisableVpcClassicLinkDnsSupport" // DisableVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the // client's request for the DisableVpcClassicLinkDnsSupport operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -16288,7 +16288,7 @@ const opDisassociateAddress = "DisassociateAddress" // DisassociateAddressRequest generates a "aws/request.Request" representing the // client's request for the DisassociateAddress operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -16372,7 +16372,7 @@ const opDisassociateIamInstanceProfile = "DisassociateIamInstanceProfile" // DisassociateIamInstanceProfileRequest generates a "aws/request.Request" representing the // client's request for the DisassociateIamInstanceProfile operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -16448,7 +16448,7 @@ const opDisassociateRouteTable = "DisassociateRouteTable" // DisassociateRouteTableRequest generates a "aws/request.Request" representing the // client's request for the DisassociateRouteTable operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -16529,7 +16529,7 @@ const opDisassociateSubnetCidrBlock = "DisassociateSubnetCidrBlock" // DisassociateSubnetCidrBlockRequest generates a "aws/request.Request" representing the // client's request for the DisassociateSubnetCidrBlock operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -16605,7 +16605,7 @@ const opDisassociateVpcCidrBlock = "DisassociateVpcCidrBlock" // DisassociateVpcCidrBlockRequest generates a "aws/request.Request" representing the // client's request for the DisassociateVpcCidrBlock operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -16685,7 +16685,7 @@ const opEnableVgwRoutePropagation = "EnableVgwRoutePropagation" // EnableVgwRoutePropagationRequest generates a "aws/request.Request" representing the // client's request for the EnableVgwRoutePropagation operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -16762,7 +16762,7 @@ const opEnableVolumeIO = "EnableVolumeIO" // EnableVolumeIORequest generates a "aws/request.Request" representing the // client's request for the EnableVolumeIO operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -16839,7 +16839,7 @@ const opEnableVpcClassicLink = "EnableVpcClassicLink" // EnableVpcClassicLinkRequest generates a "aws/request.Request" representing the // client's request for the EnableVpcClassicLink operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -16919,7 +16919,7 @@ const opEnableVpcClassicLinkDnsSupport = "EnableVpcClassicLinkDnsSupport" // EnableVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the // client's request for the EnableVpcClassicLinkDnsSupport operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -16999,7 +16999,7 @@ const opGetConsoleOutput = "GetConsoleOutput" // GetConsoleOutputRequest generates a "aws/request.Request" representing the // client's request for the GetConsoleOutput operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -17089,7 +17089,7 @@ const opGetConsoleScreenshot = "GetConsoleScreenshot" // GetConsoleScreenshotRequest generates a "aws/request.Request" representing the // client's request for the GetConsoleScreenshot operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -17165,7 +17165,7 @@ const opGetHostReservationPurchasePreview = "GetHostReservationPurchasePreview" // GetHostReservationPurchasePreviewRequest generates a "aws/request.Request" representing the // client's request for the GetHostReservationPurchasePreview operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -17244,7 +17244,7 @@ const opGetLaunchTemplateData = "GetLaunchTemplateData" // GetLaunchTemplateDataRequest generates a "aws/request.Request" representing the // client's request for the GetLaunchTemplateData operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -17319,7 +17319,7 @@ const opGetPasswordData = "GetPasswordData" // GetPasswordDataRequest generates a "aws/request.Request" representing the // client's request for the GetPasswordData operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -17410,7 +17410,7 @@ const opGetReservedInstancesExchangeQuote = "GetReservedInstancesExchangeQuote" // GetReservedInstancesExchangeQuoteRequest generates a "aws/request.Request" representing the // client's request for the GetReservedInstancesExchangeQuote operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -17487,7 +17487,7 @@ const opImportImage = "ImportImage" // ImportImageRequest generates a "aws/request.Request" representing the // client's request for the ImportImage operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -17564,7 +17564,7 @@ const opImportInstance = "ImportInstance" // ImportInstanceRequest generates a "aws/request.Request" representing the // client's request for the ImportInstance operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -17644,7 +17644,7 @@ const opImportKeyPair = "ImportKeyPair" // ImportKeyPairRequest generates a "aws/request.Request" representing the // client's request for the ImportKeyPair operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -17725,7 +17725,7 @@ const opImportSnapshot = "ImportSnapshot" // ImportSnapshotRequest generates a "aws/request.Request" representing the // client's request for the ImportSnapshot operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -17799,7 +17799,7 @@ const opImportVolume = "ImportVolume" // ImportVolumeRequest generates a "aws/request.Request" representing the // client's request for the ImportVolume operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -17877,7 +17877,7 @@ const opModifyFleet = "ModifyFleet" // ModifyFleetRequest generates a "aws/request.Request" representing the // client's request for the ModifyFleet operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -17953,7 +17953,7 @@ const opModifyFpgaImageAttribute = "ModifyFpgaImageAttribute" // ModifyFpgaImageAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifyFpgaImageAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -18027,7 +18027,7 @@ const opModifyHosts = "ModifyHosts" // ModifyHostsRequest generates a "aws/request.Request" representing the // client's request for the ModifyHosts operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -18107,7 +18107,7 @@ const opModifyIdFormat = "ModifyIdFormat" // ModifyIdFormatRequest generates a "aws/request.Request" representing the // client's request for the ModifyIdFormat operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -18205,7 +18205,7 @@ const opModifyIdentityIdFormat = "ModifyIdentityIdFormat" // ModifyIdentityIdFormatRequest generates a "aws/request.Request" representing the // client's request for the ModifyIdentityIdFormat operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -18303,7 +18303,7 @@ const opModifyImageAttribute = "ModifyImageAttribute" // ModifyImageAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifyImageAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -18388,7 +18388,7 @@ const opModifyInstanceAttribute = "ModifyInstanceAttribute" // ModifyInstanceAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifyInstanceAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -18475,7 +18475,7 @@ const opModifyInstanceCreditSpecification = "ModifyInstanceCreditSpecification" // ModifyInstanceCreditSpecificationRequest generates a "aws/request.Request" representing the // client's request for the ModifyInstanceCreditSpecification operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -18553,7 +18553,7 @@ const opModifyInstancePlacement = "ModifyInstancePlacement" // ModifyInstancePlacementRequest generates a "aws/request.Request" representing the // client's request for the ModifyInstancePlacement operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -18648,7 +18648,7 @@ const opModifyLaunchTemplate = "ModifyLaunchTemplate" // ModifyLaunchTemplateRequest generates a "aws/request.Request" representing the // client's request for the ModifyLaunchTemplate operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -18724,7 +18724,7 @@ const opModifyNetworkInterfaceAttribute = "ModifyNetworkInterfaceAttribute" // ModifyNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifyNetworkInterfaceAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -18801,7 +18801,7 @@ const opModifyReservedInstances = "ModifyReservedInstances" // ModifyReservedInstancesRequest generates a "aws/request.Request" representing the // client's request for the ModifyReservedInstances operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -18881,7 +18881,7 @@ const opModifySnapshotAttribute = "ModifySnapshotAttribute" // ModifySnapshotAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifySnapshotAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -18969,7 +18969,7 @@ const opModifySpotFleetRequest = "ModifySpotFleetRequest" // ModifySpotFleetRequestRequest generates a "aws/request.Request" representing the // client's request for the ModifySpotFleetRequest operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -19065,7 +19065,7 @@ const opModifySubnetAttribute = "ModifySubnetAttribute" // ModifySubnetAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifySubnetAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -19141,7 +19141,7 @@ const opModifyVolume = "ModifyVolume" // ModifyVolumeRequest generates a "aws/request.Request" representing the // client's request for the ModifyVolume operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -19246,7 +19246,7 @@ const opModifyVolumeAttribute = "ModifyVolumeAttribute" // ModifyVolumeAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifyVolumeAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -19331,7 +19331,7 @@ const opModifyVpcAttribute = "ModifyVpcAttribute" // ModifyVpcAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifyVpcAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -19407,7 +19407,7 @@ const opModifyVpcEndpoint = "ModifyVpcEndpoint" // ModifyVpcEndpointRequest generates a "aws/request.Request" representing the // client's request for the ModifyVpcEndpoint operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -19484,7 +19484,7 @@ const opModifyVpcEndpointConnectionNotification = "ModifyVpcEndpointConnectionNo // ModifyVpcEndpointConnectionNotificationRequest generates a "aws/request.Request" representing the // client's request for the ModifyVpcEndpointConnectionNotification operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -19560,7 +19560,7 @@ const opModifyVpcEndpointServiceConfiguration = "ModifyVpcEndpointServiceConfigu // ModifyVpcEndpointServiceConfigurationRequest generates a "aws/request.Request" representing the // client's request for the ModifyVpcEndpointServiceConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -19637,7 +19637,7 @@ const opModifyVpcEndpointServicePermissions = "ModifyVpcEndpointServicePermissio // ModifyVpcEndpointServicePermissionsRequest generates a "aws/request.Request" representing the // client's request for the ModifyVpcEndpointServicePermissions operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -19718,7 +19718,7 @@ const opModifyVpcPeeringConnectionOptions = "ModifyVpcPeeringConnectionOptions" // ModifyVpcPeeringConnectionOptionsRequest generates a "aws/request.Request" representing the // client's request for the ModifyVpcPeeringConnectionOptions operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -19811,7 +19811,7 @@ const opModifyVpcTenancy = "ModifyVpcTenancy" // ModifyVpcTenancyRequest generates a "aws/request.Request" representing the // client's request for the ModifyVpcTenancy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -19894,7 +19894,7 @@ const opMonitorInstances = "MonitorInstances" // MonitorInstancesRequest generates a "aws/request.Request" representing the // client's request for the MonitorInstances operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -19973,7 +19973,7 @@ const opMoveAddressToVpc = "MoveAddressToVpc" // MoveAddressToVpcRequest generates a "aws/request.Request" representing the // client's request for the MoveAddressToVpc operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -20053,7 +20053,7 @@ const opPurchaseHostReservation = "PurchaseHostReservation" // PurchaseHostReservationRequest generates a "aws/request.Request" representing the // client's request for the PurchaseHostReservation operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -20130,7 +20130,7 @@ const opPurchaseReservedInstancesOffering = "PurchaseReservedInstancesOffering" // PurchaseReservedInstancesOfferingRequest generates a "aws/request.Request" representing the // client's request for the PurchaseReservedInstancesOffering operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -20213,7 +20213,7 @@ const opPurchaseScheduledInstances = "PurchaseScheduledInstances" // PurchaseScheduledInstancesRequest generates a "aws/request.Request" representing the // client's request for the PurchaseScheduledInstances operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -20296,7 +20296,7 @@ const opRebootInstances = "RebootInstances" // RebootInstancesRequest generates a "aws/request.Request" representing the // client's request for the RebootInstances operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -20382,7 +20382,7 @@ const opRegisterImage = "RegisterImage" // RegisterImageRequest generates a "aws/request.Request" representing the // client's request for the RegisterImage operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -20487,7 +20487,7 @@ const opRejectVpcEndpointConnections = "RejectVpcEndpointConnections" // RejectVpcEndpointConnectionsRequest generates a "aws/request.Request" representing the // client's request for the RejectVpcEndpointConnections operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -20562,7 +20562,7 @@ const opRejectVpcPeeringConnection = "RejectVpcPeeringConnection" // RejectVpcPeeringConnectionRequest generates a "aws/request.Request" representing the // client's request for the RejectVpcPeeringConnection operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -20640,7 +20640,7 @@ const opReleaseAddress = "ReleaseAddress" // ReleaseAddressRequest generates a "aws/request.Request" representing the // client's request for the ReleaseAddress operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -20733,7 +20733,7 @@ const opReleaseHosts = "ReleaseHosts" // ReleaseHostsRequest generates a "aws/request.Request" representing the // client's request for the ReleaseHosts operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -20817,7 +20817,7 @@ const opReplaceIamInstanceProfileAssociation = "ReplaceIamInstanceProfileAssocia // ReplaceIamInstanceProfileAssociationRequest generates a "aws/request.Request" representing the // client's request for the ReplaceIamInstanceProfileAssociation operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -20896,7 +20896,7 @@ const opReplaceNetworkAclAssociation = "ReplaceNetworkAclAssociation" // ReplaceNetworkAclAssociationRequest generates a "aws/request.Request" representing the // client's request for the ReplaceNetworkAclAssociation operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -20975,7 +20975,7 @@ const opReplaceNetworkAclEntry = "ReplaceNetworkAclEntry" // ReplaceNetworkAclEntryRequest generates a "aws/request.Request" representing the // client's request for the ReplaceNetworkAclEntry operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -21053,7 +21053,7 @@ const opReplaceRoute = "ReplaceRoute" // ReplaceRouteRequest generates a "aws/request.Request" representing the // client's request for the ReplaceRoute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -21135,7 +21135,7 @@ const opReplaceRouteTableAssociation = "ReplaceRouteTableAssociation" // ReplaceRouteTableAssociationRequest generates a "aws/request.Request" representing the // client's request for the ReplaceRouteTableAssociation operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -21217,7 +21217,7 @@ const opReportInstanceStatus = "ReportInstanceStatus" // ReportInstanceStatusRequest generates a "aws/request.Request" representing the // client's request for the ReportInstanceStatus operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -21299,7 +21299,7 @@ const opRequestSpotFleet = "RequestSpotFleet" // RequestSpotFleetRequest generates a "aws/request.Request" representing the // client's request for the RequestSpotFleet operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -21397,7 +21397,7 @@ const opRequestSpotInstances = "RequestSpotInstances" // RequestSpotInstancesRequest generates a "aws/request.Request" representing the // client's request for the RequestSpotInstances operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -21474,7 +21474,7 @@ const opResetFpgaImageAttribute = "ResetFpgaImageAttribute" // ResetFpgaImageAttributeRequest generates a "aws/request.Request" representing the // client's request for the ResetFpgaImageAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -21549,7 +21549,7 @@ const opResetImageAttribute = "ResetImageAttribute" // ResetImageAttributeRequest generates a "aws/request.Request" representing the // client's request for the ResetImageAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -21627,7 +21627,7 @@ const opResetInstanceAttribute = "ResetInstanceAttribute" // ResetInstanceAttributeRequest generates a "aws/request.Request" representing the // client's request for the ResetInstanceAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -21711,7 +21711,7 @@ const opResetNetworkInterfaceAttribute = "ResetNetworkInterfaceAttribute" // ResetNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the // client's request for the ResetNetworkInterfaceAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -21788,7 +21788,7 @@ const opResetSnapshotAttribute = "ResetSnapshotAttribute" // ResetSnapshotAttributeRequest generates a "aws/request.Request" representing the // client's request for the ResetSnapshotAttribute operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -21868,7 +21868,7 @@ const opRestoreAddressToClassic = "RestoreAddressToClassic" // RestoreAddressToClassicRequest generates a "aws/request.Request" representing the // client's request for the RestoreAddressToClassic operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -21945,7 +21945,7 @@ const opRevokeSecurityGroupEgress = "RevokeSecurityGroupEgress" // RevokeSecurityGroupEgressRequest generates a "aws/request.Request" representing the // client's request for the RevokeSecurityGroupEgress operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -22033,7 +22033,7 @@ const opRevokeSecurityGroupIngress = "RevokeSecurityGroupIngress" // RevokeSecurityGroupIngressRequest generates a "aws/request.Request" representing the // client's request for the RevokeSecurityGroupIngress operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -22124,7 +22124,7 @@ const opRunInstances = "RunInstances" // RunInstancesRequest generates a "aws/request.Request" representing the // client's request for the RunInstances operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -22251,7 +22251,7 @@ const opRunScheduledInstances = "RunScheduledInstances" // RunScheduledInstancesRequest generates a "aws/request.Request" representing the // client's request for the RunScheduledInstances operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -22335,7 +22335,7 @@ const opStartInstances = "StartInstances" // StartInstancesRequest generates a "aws/request.Request" representing the // client's request for the StartInstances operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -22431,7 +22431,7 @@ const opStopInstances = "StopInstances" // StopInstancesRequest generates a "aws/request.Request" representing the // client's request for the StopInstances operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -22537,7 +22537,7 @@ const opTerminateInstances = "TerminateInstances" // TerminateInstancesRequest generates a "aws/request.Request" representing the // client's request for the TerminateInstances operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -22635,7 +22635,7 @@ const opUnassignIpv6Addresses = "UnassignIpv6Addresses" // UnassignIpv6AddressesRequest generates a "aws/request.Request" representing the // client's request for the UnassignIpv6Addresses operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -22709,7 +22709,7 @@ const opUnassignPrivateIpAddresses = "UnassignPrivateIpAddresses" // UnassignPrivateIpAddressesRequest generates a "aws/request.Request" representing the // client's request for the UnassignPrivateIpAddresses operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -22785,7 +22785,7 @@ const opUnmonitorInstances = "UnmonitorInstances" // UnmonitorInstancesRequest generates a "aws/request.Request" representing the // client's request for the UnmonitorInstances operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -22861,7 +22861,7 @@ const opUpdateSecurityGroupRuleDescriptionsEgress = "UpdateSecurityGroupRuleDesc // UpdateSecurityGroupRuleDescriptionsEgressRequest generates a "aws/request.Request" representing the // client's request for the UpdateSecurityGroupRuleDescriptionsEgress operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -22941,7 +22941,7 @@ const opUpdateSecurityGroupRuleDescriptionsIngress = "UpdateSecurityGroupRuleDes // UpdateSecurityGroupRuleDescriptionsIngressRequest generates a "aws/request.Request" representing the // client's request for the UpdateSecurityGroupRuleDescriptionsIngress operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -71054,6 +71054,9 @@ const ( // InstanceTypeG316xlarge is a InstanceType enum value InstanceTypeG316xlarge = "g3.16xlarge" + // InstanceTypeG3sXlarge is a InstanceType enum value + InstanceTypeG3sXlarge = "g3s.xlarge" + // InstanceTypeCg14xlarge is a InstanceType enum value InstanceTypeCg14xlarge = "cg1.4xlarge" diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/api.go b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go index 875df512f..cb3280a79 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iam/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go @@ -18,7 +18,7 @@ const opAddClientIDToOpenIDConnectProvider = "AddClientIDToOpenIDConnectProvider // AddClientIDToOpenIDConnectProviderRequest generates a "aws/request.Request" representing the // client's request for the AddClientIDToOpenIDConnectProvider operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -116,7 +116,7 @@ const opAddRoleToInstanceProfile = "AddRoleToInstanceProfile" // AddRoleToInstanceProfileRequest generates a "aws/request.Request" representing the // client's request for the AddRoleToInstanceProfile operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -230,7 +230,7 @@ const opAddUserToGroup = "AddUserToGroup" // AddUserToGroupRequest generates a "aws/request.Request" representing the // client's request for the AddUserToGroup operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -320,7 +320,7 @@ const opAttachGroupPolicy = "AttachGroupPolicy" // AttachGroupPolicyRequest generates a "aws/request.Request" representing the // client's request for the AttachGroupPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -425,7 +425,7 @@ const opAttachRolePolicy = "AttachRolePolicy" // AttachRolePolicyRequest generates a "aws/request.Request" representing the // client's request for the AttachRolePolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -540,7 +540,7 @@ const opAttachUserPolicy = "AttachUserPolicy" // AttachUserPolicyRequest generates a "aws/request.Request" representing the // client's request for the AttachUserPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -645,7 +645,7 @@ const opChangePassword = "ChangePassword" // ChangePasswordRequest generates a "aws/request.Request" representing the // client's request for the ChangePassword operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -754,7 +754,7 @@ const opCreateAccessKey = "CreateAccessKey" // CreateAccessKeyRequest generates a "aws/request.Request" representing the // client's request for the CreateAccessKey operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -859,7 +859,7 @@ const opCreateAccountAlias = "CreateAccountAlias" // CreateAccountAliasRequest generates a "aws/request.Request" representing the // client's request for the CreateAccountAlias operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -951,7 +951,7 @@ const opCreateGroup = "CreateGroup" // CreateGroupRequest generates a "aws/request.Request" representing the // client's request for the CreateGroup operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1047,7 +1047,7 @@ const opCreateInstanceProfile = "CreateInstanceProfile" // CreateInstanceProfileRequest generates a "aws/request.Request" representing the // client's request for the CreateInstanceProfile operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1140,7 +1140,7 @@ const opCreateLoginProfile = "CreateLoginProfile" // CreateLoginProfileRequest generates a "aws/request.Request" representing the // client's request for the CreateLoginProfile operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1239,7 +1239,7 @@ const opCreateOpenIDConnectProvider = "CreateOpenIDConnectProvider" // CreateOpenIDConnectProviderRequest generates a "aws/request.Request" representing the // client's request for the CreateOpenIDConnectProvider operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1352,7 +1352,7 @@ const opCreatePolicy = "CreatePolicy" // CreatePolicyRequest generates a "aws/request.Request" representing the // client's request for the CreatePolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1457,7 +1457,7 @@ const opCreatePolicyVersion = "CreatePolicyVersion" // CreatePolicyVersionRequest generates a "aws/request.Request" representing the // client's request for the CreatePolicyVersion operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1564,7 +1564,7 @@ const opCreateRole = "CreateRole" // CreateRoleRequest generates a "aws/request.Request" representing the // client's request for the CreateRole operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1664,7 +1664,7 @@ const opCreateSAMLProvider = "CreateSAMLProvider" // CreateSAMLProviderRequest generates a "aws/request.Request" representing the // client's request for the CreateSAMLProvider operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1777,7 +1777,7 @@ const opCreateServiceLinkedRole = "CreateServiceLinkedRole" // CreateServiceLinkedRoleRequest generates a "aws/request.Request" representing the // client's request for the CreateServiceLinkedRole operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1882,7 +1882,7 @@ const opCreateServiceSpecificCredential = "CreateServiceSpecificCredential" // CreateServiceSpecificCredentialRequest generates a "aws/request.Request" representing the // client's request for the CreateServiceSpecificCredential operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1982,7 +1982,7 @@ const opCreateUser = "CreateUser" // CreateUserRequest generates a "aws/request.Request" representing the // client's request for the CreateUser operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2078,7 +2078,7 @@ const opCreateVirtualMFADevice = "CreateVirtualMFADevice" // CreateVirtualMFADeviceRequest generates a "aws/request.Request" representing the // client's request for the CreateVirtualMFADevice operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2179,7 +2179,7 @@ const opDeactivateMFADevice = "DeactivateMFADevice" // DeactivateMFADeviceRequest generates a "aws/request.Request" representing the // client's request for the DeactivateMFADevice operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2280,7 +2280,7 @@ const opDeleteAccessKey = "DeleteAccessKey" // DeleteAccessKeyRequest generates a "aws/request.Request" representing the // client's request for the DeleteAccessKey operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2376,7 +2376,7 @@ const opDeleteAccountAlias = "DeleteAccountAlias" // DeleteAccountAliasRequest generates a "aws/request.Request" representing the // client's request for the DeleteAccountAlias operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2468,7 +2468,7 @@ const opDeleteAccountPasswordPolicy = "DeleteAccountPasswordPolicy" // DeleteAccountPasswordPolicyRequest generates a "aws/request.Request" representing the // client's request for the DeleteAccountPasswordPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2558,7 +2558,7 @@ const opDeleteGroup = "DeleteGroup" // DeleteGroupRequest generates a "aws/request.Request" representing the // client's request for the DeleteGroup operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2653,7 +2653,7 @@ const opDeleteGroupPolicy = "DeleteGroupPolicy" // DeleteGroupPolicyRequest generates a "aws/request.Request" representing the // client's request for the DeleteGroupPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2749,7 +2749,7 @@ const opDeleteInstanceProfile = "DeleteInstanceProfile" // DeleteInstanceProfileRequest generates a "aws/request.Request" representing the // client's request for the DeleteInstanceProfile operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2852,7 +2852,7 @@ const opDeleteLoginProfile = "DeleteLoginProfile" // DeleteLoginProfileRequest generates a "aws/request.Request" representing the // client's request for the DeleteLoginProfile operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2954,7 +2954,7 @@ const opDeleteOpenIDConnectProvider = "DeleteOpenIDConnectProvider" // DeleteOpenIDConnectProviderRequest generates a "aws/request.Request" representing the // client's request for the DeleteOpenIDConnectProvider operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3051,7 +3051,7 @@ const opDeletePolicy = "DeletePolicy" // DeletePolicyRequest generates a "aws/request.Request" representing the // client's request for the DeletePolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3171,7 +3171,7 @@ const opDeletePolicyVersion = "DeletePolicyVersion" // DeletePolicyVersionRequest generates a "aws/request.Request" representing the // client's request for the DeletePolicyVersion operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3277,7 +3277,7 @@ const opDeleteRole = "DeleteRole" // DeleteRoleRequest generates a "aws/request.Request" representing the // client's request for the DeleteRole operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3383,7 +3383,7 @@ const opDeleteRolePermissionsBoundary = "DeleteRolePermissionsBoundary" // DeleteRolePermissionsBoundaryRequest generates a "aws/request.Request" representing the // client's request for the DeleteRolePermissionsBoundary operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3479,7 +3479,7 @@ const opDeleteRolePolicy = "DeleteRolePolicy" // DeleteRolePolicyRequest generates a "aws/request.Request" representing the // client's request for the DeleteRolePolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3581,7 +3581,7 @@ const opDeleteSAMLProvider = "DeleteSAMLProvider" // DeleteSAMLProviderRequest generates a "aws/request.Request" representing the // client's request for the DeleteSAMLProvider operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3682,7 +3682,7 @@ const opDeleteSSHPublicKey = "DeleteSSHPublicKey" // DeleteSSHPublicKeyRequest generates a "aws/request.Request" representing the // client's request for the DeleteSSHPublicKey operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3770,7 +3770,7 @@ const opDeleteServerCertificate = "DeleteServerCertificate" // DeleteServerCertificateRequest generates a "aws/request.Request" representing the // client's request for the DeleteServerCertificate operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3879,7 +3879,7 @@ const opDeleteServiceLinkedRole = "DeleteServiceLinkedRole" // DeleteServiceLinkedRoleRequest generates a "aws/request.Request" representing the // client's request for the DeleteServiceLinkedRole operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3986,7 +3986,7 @@ const opDeleteServiceSpecificCredential = "DeleteServiceSpecificCredential" // DeleteServiceSpecificCredentialRequest generates a "aws/request.Request" representing the // client's request for the DeleteServiceSpecificCredential operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4068,7 +4068,7 @@ const opDeleteSigningCertificate = "DeleteSigningCertificate" // DeleteSigningCertificateRequest generates a "aws/request.Request" representing the // client's request for the DeleteSigningCertificate operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4164,7 +4164,7 @@ const opDeleteUser = "DeleteUser" // DeleteUserRequest generates a "aws/request.Request" representing the // client's request for the DeleteUser operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4259,7 +4259,7 @@ const opDeleteUserPermissionsBoundary = "DeleteUserPermissionsBoundary" // DeleteUserPermissionsBoundaryRequest generates a "aws/request.Request" representing the // client's request for the DeleteUserPermissionsBoundary operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4349,7 +4349,7 @@ const opDeleteUserPolicy = "DeleteUserPolicy" // DeleteUserPolicyRequest generates a "aws/request.Request" representing the // client's request for the DeleteUserPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4445,7 +4445,7 @@ const opDeleteVirtualMFADevice = "DeleteVirtualMFADevice" // DeleteVirtualMFADeviceRequest generates a "aws/request.Request" representing the // client's request for the DeleteVirtualMFADevice operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4542,7 +4542,7 @@ const opDetachGroupPolicy = "DetachGroupPolicy" // DetachGroupPolicyRequest generates a "aws/request.Request" representing the // client's request for the DetachGroupPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4641,7 +4641,7 @@ const opDetachRolePolicy = "DetachRolePolicy" // DetachRolePolicyRequest generates a "aws/request.Request" representing the // client's request for the DetachRolePolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4746,7 +4746,7 @@ const opDetachUserPolicy = "DetachUserPolicy" // DetachUserPolicyRequest generates a "aws/request.Request" representing the // client's request for the DetachUserPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4845,7 +4845,7 @@ const opEnableMFADevice = "EnableMFADevice" // EnableMFADeviceRequest generates a "aws/request.Request" representing the // client's request for the EnableMFADevice operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4951,7 +4951,7 @@ const opGenerateCredentialReport = "GenerateCredentialReport" // GenerateCredentialReportRequest generates a "aws/request.Request" representing the // client's request for the GenerateCredentialReport operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5037,7 +5037,7 @@ const opGetAccessKeyLastUsed = "GetAccessKeyLastUsed" // GetAccessKeyLastUsedRequest generates a "aws/request.Request" representing the // client's request for the GetAccessKeyLastUsed operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5120,7 +5120,7 @@ const opGetAccountAuthorizationDetails = "GetAccountAuthorizationDetails" // GetAccountAuthorizationDetailsRequest generates a "aws/request.Request" representing the // client's request for the GetAccountAuthorizationDetails operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5268,7 +5268,7 @@ const opGetAccountPasswordPolicy = "GetAccountPasswordPolicy" // GetAccountPasswordPolicyRequest generates a "aws/request.Request" representing the // client's request for the GetAccountPasswordPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5353,7 +5353,7 @@ const opGetAccountSummary = "GetAccountSummary" // GetAccountSummaryRequest generates a "aws/request.Request" representing the // client's request for the GetAccountSummary operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5437,7 +5437,7 @@ const opGetContextKeysForCustomPolicy = "GetContextKeysForCustomPolicy" // GetContextKeysForCustomPolicyRequest generates a "aws/request.Request" representing the // client's request for the GetContextKeysForCustomPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5526,7 +5526,7 @@ const opGetContextKeysForPrincipalPolicy = "GetContextKeysForPrincipalPolicy" // GetContextKeysForPrincipalPolicyRequest generates a "aws/request.Request" representing the // client's request for the GetContextKeysForPrincipalPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5626,7 +5626,7 @@ const opGetCredentialReport = "GetCredentialReport" // GetCredentialReportRequest generates a "aws/request.Request" representing the // client's request for the GetCredentialReport operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5722,7 +5722,7 @@ const opGetGroup = "GetGroup" // GetGroupRequest generates a "aws/request.Request" representing the // client's request for the GetGroup operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5863,7 +5863,7 @@ const opGetGroupPolicy = "GetGroupPolicy" // GetGroupPolicyRequest generates a "aws/request.Request" representing the // client's request for the GetGroupPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5963,7 +5963,7 @@ const opGetInstanceProfile = "GetInstanceProfile" // GetInstanceProfileRequest generates a "aws/request.Request" representing the // client's request for the GetInstanceProfile operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6050,7 +6050,7 @@ const opGetLoginProfile = "GetLoginProfile" // GetLoginProfileRequest generates a "aws/request.Request" representing the // client's request for the GetLoginProfile operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6136,7 +6136,7 @@ const opGetOpenIDConnectProvider = "GetOpenIDConnectProvider" // GetOpenIDConnectProviderRequest generates a "aws/request.Request" representing the // client's request for the GetOpenIDConnectProvider operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6225,7 +6225,7 @@ const opGetPolicy = "GetPolicy" // GetPolicyRequest generates a "aws/request.Request" representing the // client's request for the GetPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6326,7 +6326,7 @@ const opGetPolicyVersion = "GetPolicyVersion" // GetPolicyVersionRequest generates a "aws/request.Request" representing the // client's request for the GetPolicyVersion operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6435,7 +6435,7 @@ const opGetRole = "GetRole" // GetRoleRequest generates a "aws/request.Request" representing the // client's request for the GetRole operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6527,7 +6527,7 @@ const opGetRolePolicy = "GetRolePolicy" // GetRolePolicyRequest generates a "aws/request.Request" representing the // client's request for the GetRolePolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6630,7 +6630,7 @@ const opGetSAMLProvider = "GetSAMLProvider" // GetSAMLProviderRequest generates a "aws/request.Request" representing the // client's request for the GetSAMLProvider operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6721,7 +6721,7 @@ const opGetSSHPublicKey = "GetSSHPublicKey" // GetSSHPublicKeyRequest generates a "aws/request.Request" representing the // client's request for the GetSSHPublicKey operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6811,7 +6811,7 @@ const opGetServerCertificate = "GetServerCertificate" // GetServerCertificateRequest generates a "aws/request.Request" representing the // client's request for the GetServerCertificate operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6900,7 +6900,7 @@ const opGetServiceLinkedRoleDeletionStatus = "GetServiceLinkedRoleDeletionStatus // GetServiceLinkedRoleDeletionStatusRequest generates a "aws/request.Request" representing the // client's request for the GetServiceLinkedRoleDeletionStatus operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6993,7 +6993,7 @@ const opGetUser = "GetUser" // GetUserRequest generates a "aws/request.Request" representing the // client's request for the GetUser operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7081,7 +7081,7 @@ const opGetUserPolicy = "GetUserPolicy" // GetUserPolicyRequest generates a "aws/request.Request" representing the // client's request for the GetUserPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7181,7 +7181,7 @@ const opListAccessKeys = "ListAccessKeys" // ListAccessKeysRequest generates a "aws/request.Request" representing the // client's request for the ListAccessKeys operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7334,7 +7334,7 @@ const opListAccountAliases = "ListAccountAliases" // ListAccountAliasesRequest generates a "aws/request.Request" representing the // client's request for the ListAccountAliases operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7473,7 +7473,7 @@ const opListAttachedGroupPolicies = "ListAttachedGroupPolicies" // ListAttachedGroupPoliciesRequest generates a "aws/request.Request" representing the // client's request for the ListAttachedGroupPolicies operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7628,7 +7628,7 @@ const opListAttachedRolePolicies = "ListAttachedRolePolicies" // ListAttachedRolePoliciesRequest generates a "aws/request.Request" representing the // client's request for the ListAttachedRolePolicies operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7783,7 +7783,7 @@ const opListAttachedUserPolicies = "ListAttachedUserPolicies" // ListAttachedUserPoliciesRequest generates a "aws/request.Request" representing the // client's request for the ListAttachedUserPolicies operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -7938,7 +7938,7 @@ const opListEntitiesForPolicy = "ListEntitiesForPolicy" // ListEntitiesForPolicyRequest generates a "aws/request.Request" representing the // client's request for the ListEntitiesForPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8090,7 +8090,7 @@ const opListGroupPolicies = "ListGroupPolicies" // ListGroupPoliciesRequest generates a "aws/request.Request" representing the // client's request for the ListGroupPolicies operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8241,7 +8241,7 @@ const opListGroups = "ListGroups" // ListGroupsRequest generates a "aws/request.Request" representing the // client's request for the ListGroups operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8379,7 +8379,7 @@ const opListGroupsForUser = "ListGroupsForUser" // ListGroupsForUserRequest generates a "aws/request.Request" representing the // client's request for the ListGroupsForUser operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8521,7 +8521,7 @@ const opListInstanceProfiles = "ListInstanceProfiles" // ListInstanceProfilesRequest generates a "aws/request.Request" representing the // client's request for the ListInstanceProfiles operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8661,7 +8661,7 @@ const opListInstanceProfilesForRole = "ListInstanceProfilesForRole" // ListInstanceProfilesForRoleRequest generates a "aws/request.Request" representing the // client's request for the ListInstanceProfilesForRole operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8805,7 +8805,7 @@ const opListMFADevices = "ListMFADevices" // ListMFADevicesRequest generates a "aws/request.Request" representing the // client's request for the ListMFADevices operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -8950,7 +8950,7 @@ const opListOpenIDConnectProviders = "ListOpenIDConnectProviders" // ListOpenIDConnectProvidersRequest generates a "aws/request.Request" representing the // client's request for the ListOpenIDConnectProviders operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9031,7 +9031,7 @@ const opListPolicies = "ListPolicies" // ListPoliciesRequest generates a "aws/request.Request" representing the // client's request for the ListPolicies operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9179,7 +9179,7 @@ const opListPolicyVersions = "ListPolicyVersions" // ListPolicyVersionsRequest generates a "aws/request.Request" representing the // client's request for the ListPolicyVersions operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9328,7 +9328,7 @@ const opListRolePolicies = "ListRolePolicies" // ListRolePoliciesRequest generates a "aws/request.Request" representing the // client's request for the ListRolePolicies operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9478,7 +9478,7 @@ const opListRoles = "ListRoles" // ListRolesRequest generates a "aws/request.Request" representing the // client's request for the ListRoles operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9618,7 +9618,7 @@ const opListSAMLProviders = "ListSAMLProviders" // ListSAMLProvidersRequest generates a "aws/request.Request" representing the // client's request for the ListSAMLProviders operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9700,7 +9700,7 @@ const opListSSHPublicKeys = "ListSSHPublicKeys" // ListSSHPublicKeysRequest generates a "aws/request.Request" representing the // client's request for the ListSSHPublicKeys operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9846,7 +9846,7 @@ const opListServerCertificates = "ListServerCertificates" // ListServerCertificatesRequest generates a "aws/request.Request" representing the // client's request for the ListServerCertificates operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -9990,7 +9990,7 @@ const opListServiceSpecificCredentials = "ListServiceSpecificCredentials" // ListServiceSpecificCredentialsRequest generates a "aws/request.Request" representing the // client's request for the ListServiceSpecificCredentials operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10079,7 +10079,7 @@ const opListSigningCertificates = "ListSigningCertificates" // ListSigningCertificatesRequest generates a "aws/request.Request" representing the // client's request for the ListSigningCertificates operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10229,7 +10229,7 @@ const opListUserPolicies = "ListUserPolicies" // ListUserPoliciesRequest generates a "aws/request.Request" representing the // client's request for the ListUserPolicies operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10378,7 +10378,7 @@ const opListUsers = "ListUsers" // ListUsersRequest generates a "aws/request.Request" representing the // client's request for the ListUsers operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10518,7 +10518,7 @@ const opListVirtualMFADevices = "ListVirtualMFADevices" // ListVirtualMFADevicesRequest generates a "aws/request.Request" representing the // client's request for the ListVirtualMFADevices operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10653,7 +10653,7 @@ const opPutGroupPolicy = "PutGroupPolicy" // PutGroupPolicyRequest generates a "aws/request.Request" representing the // client's request for the PutGroupPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10763,7 +10763,7 @@ const opPutRolePermissionsBoundary = "PutRolePermissionsBoundary" // PutRolePermissionsBoundaryRequest generates a "aws/request.Request" representing the // client's request for the PutRolePermissionsBoundary operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10875,7 +10875,7 @@ const opPutRolePolicy = "PutRolePolicy" // PutRolePolicyRequest generates a "aws/request.Request" representing the // client's request for the PutRolePolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -10997,7 +10997,7 @@ const opPutUserPermissionsBoundary = "PutUserPermissionsBoundary" // PutUserPermissionsBoundaryRequest generates a "aws/request.Request" representing the // client's request for the PutUserPermissionsBoundary operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11101,7 +11101,7 @@ const opPutUserPolicy = "PutUserPolicy" // PutUserPolicyRequest generates a "aws/request.Request" representing the // client's request for the PutUserPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11211,7 +11211,7 @@ const opRemoveClientIDFromOpenIDConnectProvider = "RemoveClientIDFromOpenIDConne // RemoveClientIDFromOpenIDConnectProviderRequest generates a "aws/request.Request" representing the // client's request for the RemoveClientIDFromOpenIDConnectProvider operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11306,7 +11306,7 @@ const opRemoveRoleFromInstanceProfile = "RemoveRoleFromInstanceProfile" // RemoveRoleFromInstanceProfileRequest generates a "aws/request.Request" representing the // client's request for the RemoveRoleFromInstanceProfile operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11411,7 +11411,7 @@ const opRemoveUserFromGroup = "RemoveUserFromGroup" // RemoveUserFromGroupRequest generates a "aws/request.Request" representing the // client's request for the RemoveUserFromGroup operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11501,7 +11501,7 @@ const opResetServiceSpecificCredential = "ResetServiceSpecificCredential" // ResetServiceSpecificCredentialRequest generates a "aws/request.Request" representing the // client's request for the ResetServiceSpecificCredential operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11584,7 +11584,7 @@ const opResyncMFADevice = "ResyncMFADevice" // ResyncMFADeviceRequest generates a "aws/request.Request" representing the // client's request for the ResyncMFADevice operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11683,7 +11683,7 @@ const opSetDefaultPolicyVersion = "SetDefaultPolicyVersion" // SetDefaultPolicyVersionRequest generates a "aws/request.Request" representing the // client's request for the SetDefaultPolicyVersion operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11786,7 +11786,7 @@ const opSimulateCustomPolicy = "SimulateCustomPolicy" // SimulateCustomPolicyRequest generates a "aws/request.Request" representing the // client's request for the SimulateCustomPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -11942,7 +11942,7 @@ const opSimulatePrincipalPolicy = "SimulatePrincipalPolicy" // SimulatePrincipalPolicyRequest generates a "aws/request.Request" representing the // client's request for the SimulatePrincipalPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12112,7 +12112,7 @@ const opUpdateAccessKey = "UpdateAccessKey" // UpdateAccessKeyRequest generates a "aws/request.Request" representing the // client's request for the UpdateAccessKey operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12213,7 +12213,7 @@ const opUpdateAccountPasswordPolicy = "UpdateAccountPasswordPolicy" // UpdateAccountPasswordPolicyRequest generates a "aws/request.Request" representing the // client's request for the UpdateAccountPasswordPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12318,7 +12318,7 @@ const opUpdateAssumeRolePolicy = "UpdateAssumeRolePolicy" // UpdateAssumeRolePolicyRequest generates a "aws/request.Request" representing the // client's request for the UpdateAssumeRolePolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12421,7 +12421,7 @@ const opUpdateGroup = "UpdateGroup" // UpdateGroupRequest generates a "aws/request.Request" representing the // client's request for the UpdateGroup operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12526,7 +12526,7 @@ const opUpdateLoginProfile = "UpdateLoginProfile" // UpdateLoginProfileRequest generates a "aws/request.Request" representing the // client's request for the UpdateLoginProfile operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12630,7 +12630,7 @@ const opUpdateOpenIDConnectProviderThumbprint = "UpdateOpenIDConnectProviderThum // UpdateOpenIDConnectProviderThumbprintRequest generates a "aws/request.Request" representing the // client's request for the UpdateOpenIDConnectProviderThumbprint operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12733,7 +12733,7 @@ const opUpdateRole = "UpdateRole" // UpdateRoleRequest generates a "aws/request.Request" representing the // client's request for the UpdateRole operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12823,7 +12823,7 @@ const opUpdateRoleDescription = "UpdateRoleDescription" // UpdateRoleDescriptionRequest generates a "aws/request.Request" representing the // client's request for the UpdateRoleDescription operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -12916,7 +12916,7 @@ const opUpdateSAMLProvider = "UpdateSAMLProvider" // UpdateSAMLProviderRequest generates a "aws/request.Request" representing the // client's request for the UpdateSAMLProvider operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13010,7 +13010,7 @@ const opUpdateSSHPublicKey = "UpdateSSHPublicKey" // UpdateSSHPublicKeyRequest generates a "aws/request.Request" representing the // client's request for the UpdateSSHPublicKey operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13101,7 +13101,7 @@ const opUpdateServerCertificate = "UpdateServerCertificate" // UpdateServerCertificateRequest generates a "aws/request.Request" representing the // client's request for the UpdateServerCertificate operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13214,7 +13214,7 @@ const opUpdateServiceSpecificCredential = "UpdateServiceSpecificCredential" // UpdateServiceSpecificCredentialRequest generates a "aws/request.Request" representing the // client's request for the UpdateServiceSpecificCredential operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13299,7 +13299,7 @@ const opUpdateSigningCertificate = "UpdateSigningCertificate" // UpdateSigningCertificateRequest generates a "aws/request.Request" representing the // client's request for the UpdateSigningCertificate operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13397,7 +13397,7 @@ const opUpdateUser = "UpdateUser" // UpdateUserRequest generates a "aws/request.Request" representing the // client's request for the UpdateUser operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13508,7 +13508,7 @@ const opUploadSSHPublicKey = "UploadSSHPublicKey" // UploadSSHPublicKeyRequest generates a "aws/request.Request" representing the // client's request for the UploadSSHPublicKey operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13610,7 +13610,7 @@ const opUploadServerCertificate = "UploadServerCertificate" // UploadServerCertificateRequest generates a "aws/request.Request" representing the // client's request for the UploadServerCertificate operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -13731,7 +13731,7 @@ const opUploadSigningCertificate = "UploadSigningCertificate" // UploadSigningCertificateRequest generates a "aws/request.Request" representing the // client's request for the UploadSigningCertificate operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 104361950..d5d617722 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -27,7 +27,7 @@ const opAbortMultipartUpload = "AbortMultipartUpload" // AbortMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the AbortMultipartUpload operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -110,7 +110,7 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload" // CompleteMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the CompleteMultipartUpload operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -184,7 +184,7 @@ const opCopyObject = "CopyObject" // CopyObjectRequest generates a "aws/request.Request" representing the // client's request for the CopyObject operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -264,7 +264,7 @@ const opCreateBucket = "CreateBucket" // CreateBucketRequest generates a "aws/request.Request" representing the // client's request for the CreateBucket operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -346,7 +346,7 @@ const opCreateMultipartUpload = "CreateMultipartUpload" // CreateMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the CreateMultipartUpload operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -426,7 +426,7 @@ const opDeleteBucket = "DeleteBucket" // DeleteBucketRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucket operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -503,7 +503,7 @@ const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration // DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -580,7 +580,7 @@ const opDeleteBucketCors = "DeleteBucketCors" // DeleteBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketCors operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -656,7 +656,7 @@ const opDeleteBucketEncryption = "DeleteBucketEncryption" // DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketEncryption operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -732,7 +732,7 @@ const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration // DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketInventoryConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -809,7 +809,7 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle" // DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketLifecycle operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -885,7 +885,7 @@ const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" // DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketMetricsConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -962,7 +962,7 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy" // DeleteBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1038,7 +1038,7 @@ const opDeleteBucketReplication = "DeleteBucketReplication" // DeleteBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketReplication operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1114,7 +1114,7 @@ const opDeleteBucketTagging = "DeleteBucketTagging" // DeleteBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketTagging operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1190,7 +1190,7 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite" // DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketWebsite operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1266,7 +1266,7 @@ const opDeleteObject = "DeleteObject" // DeleteObjectRequest generates a "aws/request.Request" representing the // client's request for the DeleteObject operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1342,7 +1342,7 @@ const opDeleteObjectTagging = "DeleteObjectTagging" // DeleteObjectTaggingRequest generates a "aws/request.Request" representing the // client's request for the DeleteObjectTagging operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1416,7 +1416,7 @@ const opDeleteObjects = "DeleteObjects" // DeleteObjectsRequest generates a "aws/request.Request" representing the // client's request for the DeleteObjects operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1491,7 +1491,7 @@ const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" // GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAccelerateConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1565,7 +1565,7 @@ const opGetBucketAcl = "GetBucketAcl" // GetBucketAclRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAcl operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1639,7 +1639,7 @@ const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" // GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAnalyticsConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1714,7 +1714,7 @@ const opGetBucketCors = "GetBucketCors" // GetBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the GetBucketCors operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1788,7 +1788,7 @@ const opGetBucketEncryption = "GetBucketEncryption" // GetBucketEncryptionRequest generates a "aws/request.Request" representing the // client's request for the GetBucketEncryption operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1862,7 +1862,7 @@ const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" // GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketInventoryConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -1937,7 +1937,7 @@ const opGetBucketLifecycle = "GetBucketLifecycle" // GetBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLifecycle operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2020,7 +2020,7 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" // GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLifecycleConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2094,7 +2094,7 @@ const opGetBucketLocation = "GetBucketLocation" // GetBucketLocationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLocation operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2168,7 +2168,7 @@ const opGetBucketLogging = "GetBucketLogging" // GetBucketLoggingRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLogging operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2243,7 +2243,7 @@ const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" // GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketMetricsConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2318,7 +2318,7 @@ const opGetBucketNotification = "GetBucketNotification" // GetBucketNotificationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketNotification operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2401,7 +2401,7 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration // GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketNotificationConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2475,7 +2475,7 @@ const opGetBucketPolicy = "GetBucketPolicy" // GetBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the GetBucketPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2549,7 +2549,7 @@ const opGetBucketReplication = "GetBucketReplication" // GetBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketReplication operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2623,7 +2623,7 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment" // GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the // client's request for the GetBucketRequestPayment operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2697,7 +2697,7 @@ const opGetBucketTagging = "GetBucketTagging" // GetBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the GetBucketTagging operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2771,7 +2771,7 @@ const opGetBucketVersioning = "GetBucketVersioning" // GetBucketVersioningRequest generates a "aws/request.Request" representing the // client's request for the GetBucketVersioning operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2845,7 +2845,7 @@ const opGetBucketWebsite = "GetBucketWebsite" // GetBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the GetBucketWebsite operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2919,7 +2919,7 @@ const opGetObject = "GetObject" // GetObjectRequest generates a "aws/request.Request" representing the // client's request for the GetObject operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -2998,7 +2998,7 @@ const opGetObjectAcl = "GetObjectAcl" // GetObjectAclRequest generates a "aws/request.Request" representing the // client's request for the GetObjectAcl operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3077,7 +3077,7 @@ const opGetObjectTagging = "GetObjectTagging" // GetObjectTaggingRequest generates a "aws/request.Request" representing the // client's request for the GetObjectTagging operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3151,7 +3151,7 @@ const opGetObjectTorrent = "GetObjectTorrent" // GetObjectTorrentRequest generates a "aws/request.Request" representing the // client's request for the GetObjectTorrent operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3225,7 +3225,7 @@ const opHeadBucket = "HeadBucket" // HeadBucketRequest generates a "aws/request.Request" representing the // client's request for the HeadBucket operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3307,7 +3307,7 @@ const opHeadObject = "HeadObject" // HeadObjectRequest generates a "aws/request.Request" representing the // client's request for the HeadObject operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3386,7 +3386,7 @@ const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" // ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the ListBucketAnalyticsConfigurations operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3460,7 +3460,7 @@ const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" // ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the ListBucketInventoryConfigurations operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3534,7 +3534,7 @@ const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" // ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the ListBucketMetricsConfigurations operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3608,7 +3608,7 @@ const opListBuckets = "ListBuckets" // ListBucketsRequest generates a "aws/request.Request" representing the // client's request for the ListBuckets operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3682,7 +3682,7 @@ const opListMultipartUploads = "ListMultipartUploads" // ListMultipartUploadsRequest generates a "aws/request.Request" representing the // client's request for the ListMultipartUploads operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3812,7 +3812,7 @@ const opListObjectVersions = "ListObjectVersions" // ListObjectVersionsRequest generates a "aws/request.Request" representing the // client's request for the ListObjectVersions operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -3942,7 +3942,7 @@ const opListObjects = "ListObjects" // ListObjectsRequest generates a "aws/request.Request" representing the // client's request for the ListObjects operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4079,7 +4079,7 @@ const opListObjectsV2 = "ListObjectsV2" // ListObjectsV2Request generates a "aws/request.Request" representing the // client's request for the ListObjectsV2 operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4217,7 +4217,7 @@ const opListParts = "ListParts" // ListPartsRequest generates a "aws/request.Request" representing the // client's request for the ListParts operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4347,7 +4347,7 @@ const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" // PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAccelerateConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4423,7 +4423,7 @@ const opPutBucketAcl = "PutBucketAcl" // PutBucketAclRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAcl operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4499,7 +4499,7 @@ const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" // PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAnalyticsConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4576,7 +4576,7 @@ const opPutBucketCors = "PutBucketCors" // PutBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the PutBucketCors operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4652,7 +4652,7 @@ const opPutBucketEncryption = "PutBucketEncryption" // PutBucketEncryptionRequest generates a "aws/request.Request" representing the // client's request for the PutBucketEncryption operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4729,7 +4729,7 @@ const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" // PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketInventoryConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4806,7 +4806,7 @@ const opPutBucketLifecycle = "PutBucketLifecycle" // PutBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the PutBucketLifecycle operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4891,7 +4891,7 @@ const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" // PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketLifecycleConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -4968,7 +4968,7 @@ const opPutBucketLogging = "PutBucketLogging" // PutBucketLoggingRequest generates a "aws/request.Request" representing the // client's request for the PutBucketLogging operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5046,7 +5046,7 @@ const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" // PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketMetricsConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5123,7 +5123,7 @@ const opPutBucketNotification = "PutBucketNotification" // PutBucketNotificationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketNotification operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5208,7 +5208,7 @@ const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration // PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketNotificationConfiguration operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5284,7 +5284,7 @@ const opPutBucketPolicy = "PutBucketPolicy" // PutBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the PutBucketPolicy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5361,7 +5361,7 @@ const opPutBucketReplication = "PutBucketReplication" // PutBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketReplication operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5439,7 +5439,7 @@ const opPutBucketRequestPayment = "PutBucketRequestPayment" // PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the // client's request for the PutBucketRequestPayment operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5519,7 +5519,7 @@ const opPutBucketTagging = "PutBucketTagging" // PutBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the PutBucketTagging operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5595,7 +5595,7 @@ const opPutBucketVersioning = "PutBucketVersioning" // PutBucketVersioningRequest generates a "aws/request.Request" representing the // client's request for the PutBucketVersioning operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5672,7 +5672,7 @@ const opPutBucketWebsite = "PutBucketWebsite" // PutBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the PutBucketWebsite operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5748,7 +5748,7 @@ const opPutObject = "PutObject" // PutObjectRequest generates a "aws/request.Request" representing the // client's request for the PutObject operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5822,7 +5822,7 @@ const opPutObjectAcl = "PutObjectAcl" // PutObjectAclRequest generates a "aws/request.Request" representing the // client's request for the PutObjectAcl operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5902,7 +5902,7 @@ const opPutObjectTagging = "PutObjectTagging" // PutObjectTaggingRequest generates a "aws/request.Request" representing the // client's request for the PutObjectTagging operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -5976,7 +5976,7 @@ const opRestoreObject = "RestoreObject" // RestoreObjectRequest generates a "aws/request.Request" representing the // client's request for the RestoreObject operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6055,7 +6055,7 @@ const opSelectObjectContent = "SelectObjectContent" // SelectObjectContentRequest generates a "aws/request.Request" representing the // client's request for the SelectObjectContent operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6137,7 +6137,7 @@ const opUploadPart = "UploadPart" // UploadPartRequest generates a "aws/request.Request" representing the // client's request for the UploadPart operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -6217,7 +6217,7 @@ const opUploadPartCopy = "UploadPartCopy" // UploadPartCopyRequest generates a "aws/request.Request" representing the // client's request for the UploadPartCopy operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index a55beab96..6f560a409 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -3,6 +3,7 @@ package s3 import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3err" ) func init() { @@ -21,6 +22,7 @@ func defaultInitClientFn(c *client.Client) { // S3 uses custom error unmarshaling logic c.Handlers.UnmarshalError.Clear() c.Handlers.UnmarshalError.PushBack(unmarshalError) + c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler()) } func defaultInitRequestFn(r *request.Request) { @@ -42,6 +44,7 @@ func defaultInitRequestFn(r *request.Request) { r.Handlers.Validate.PushFront(populateLocationConstraint) case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler()) case opPutObject, opUploadPart: r.Handlers.Build.PushBack(computeBodyHashes) // Disabled until #1837 root issue is resolved. diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go index 9f33efc6c..fde3050f9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -13,7 +13,11 @@ import ( func copyMultipartStatusOKUnmarhsalError(r *request.Request) { b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "unable to read response body", err) + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", "unable to read response body", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } body := bytes.NewReader(b) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go index bcca8627a..12c0612c8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -23,22 +23,17 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) - hostID := r.HTTPResponse.Header.Get("X-Amz-Id-2") - // Bucket exists in a different region, and request needs // to be made to the correct region. if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { - r.Error = requestFailure{ - RequestFailure: awserr.NewRequestFailure( - awserr.New("BucketRegionError", - fmt.Sprintf("incorrect region, the bucket is not in '%s' region", - aws.StringValue(r.Config.Region)), - nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ), - hostID: hostID, - } + r.Error = awserr.NewRequestFailure( + awserr.New("BucketRegionError", + fmt.Sprintf("incorrect region, the bucket is not in '%s' region", + aws.StringValue(r.Config.Region)), + nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } @@ -63,14 +58,11 @@ func unmarshalError(r *request.Request) { errMsg = statusText } - r.Error = requestFailure{ - RequestFailure: awserr.NewRequestFailure( - awserr.New(errCode, errMsg, err), - r.HTTPResponse.StatusCode, - r.RequestID, - ), - hostID: hostID, - } + r.Error = awserr.NewRequestFailure( + awserr.New(errCode, errMsg, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) } // A RequestFailure provides access to the S3 Request ID and Host ID values @@ -83,21 +75,3 @@ type RequestFailure interface { // Host ID is the S3 Host ID needed for debug, and contacting support HostID() string } - -type requestFailure struct { - awserr.RequestFailure - - hostID string -} - -func (r requestFailure) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", - r.StatusCode(), r.RequestID(), r.hostID) - return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} -func (r requestFailure) String() string { - return r.Error() -} -func (r requestFailure) HostID() string { - return r.hostID -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 6f89a796e..ee908f916 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -15,7 +15,7 @@ const opAssumeRole = "AssumeRole" // AssumeRoleRequest generates a "aws/request.Request" representing the // client's request for the AssumeRole operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -209,7 +209,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithSAML operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -391,7 +391,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithWebIdentity operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -602,7 +602,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the // client's request for the DecodeAuthorizationMessage operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -714,7 +714,7 @@ const opGetCallerIdentity = "GetCallerIdentity" // GetCallerIdentityRequest generates a "aws/request.Request" representing the // client's request for the GetCallerIdentity operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -789,7 +789,7 @@ const opGetFederationToken = "GetFederationToken" // GetFederationTokenRequest generates a "aws/request.Request" representing the // client's request for the GetFederationToken operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. @@ -958,7 +958,7 @@ const opGetSessionToken = "GetSessionToken" // GetSessionTokenRequest generates a "aws/request.Request" representing the // client's request for the GetSessionToken operation. The "output" return // value will be populated with the request's response once the request completes -// successfuly. +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. diff --git a/vendor/github.com/coreos/etcd/clientv3/README.md b/vendor/github.com/coreos/etcd/clientv3/README.md deleted file mode 100644 index a249b73c4..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# etcd/clientv3 - -[![Docs](https://readthedocs.org/projects/etcd/badge/?version=latest&style=flat-square)](https://etcd.readthedocs.io/en/latest/?badge=latest) -[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/clientv3) - -`etcd/clientv3` is the official Go etcd client for v3. - -See https://etcd.readthedocs.io/en/latest for latest client architecture. - -## Install - -```bash -go get go.etcd.io/etcd/clientv3 -``` - -## Get started - -Create client using `clientv3.New`: - -```go -cli, err := clientv3.New(clientv3.Config{ - Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, - DialTimeout: 5 * time.Second, -}) -if err != nil { - // handle error! -} -defer cli.Close() -``` - -etcd v3 uses [`gRPC`](http://www.grpc.io) for remote procedure calls. And `clientv3` uses -[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it. -If the client is not closed, the connection will have leaky goroutines. To specify client request timeout, -pass `context.WithTimeout` to APIs: - -```go -ctx, cancel := context.WithTimeout(context.Background(), timeout) -resp, err := cli.Put(ctx, "sample_key", "sample_value") -cancel() -if err != nil { - // handle error! -} -// use the response -``` - -For full compatibility, it is recommended to vendor builds using etcd's vendored packages, using tools like `golang/dep`, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). - -## Error Handling - -etcd client returns 2 types of errors: - -1. context error: canceled or deadline exceeded. -2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes). - -Here is the example code to handle client errors: - -```go -resp, err := cli.Put(ctx, "", "") -if err != nil { - switch err { - case context.Canceled: - log.Fatalf("ctx is canceled by another routine: %v", err) - case context.DeadlineExceeded: - log.Fatalf("ctx is attached with a deadline is exceeded: %v", err) - case rpctypes.ErrEmptyKey: - log.Fatalf("client-side error: %v", err) - default: - log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err) - } -} -``` - -## Metrics - -The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/etcd-io/etcd/blob/master/clientv3/example_metrics_test.go). - -## Namespacing - -The [namespace](https://godoc.org/go.etcd.io/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix. - -## Request size limit - -Client request size limit is configurable via `clientv3.Config.MaxCallSendMsgSize` and `MaxCallRecvMsgSize` in bytes. If none given, client request send limit defaults to 2 MiB including gRPC overhead bytes. And receive limit defaults to `math.MaxInt32`. - -## Examples - -More code examples can be found at [GoDoc](https://godoc.org/go.etcd.io/etcd/clientv3). diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go deleted file mode 100644 index 921f50f5e..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/auth.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "fmt" - "strings" - - "go.etcd.io/etcd/auth/authpb" - pb "go.etcd.io/etcd/etcdserver/etcdserverpb" - "google.golang.org/grpc" -) - -type ( - AuthEnableResponse pb.AuthEnableResponse - AuthDisableResponse pb.AuthDisableResponse - AuthenticateResponse pb.AuthenticateResponse - AuthUserAddResponse pb.AuthUserAddResponse - AuthUserDeleteResponse pb.AuthUserDeleteResponse - AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse - AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse - AuthUserGetResponse pb.AuthUserGetResponse - AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse - AuthRoleAddResponse pb.AuthRoleAddResponse - AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse - AuthRoleGetResponse pb.AuthRoleGetResponse - AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse - AuthRoleDeleteResponse pb.AuthRoleDeleteResponse - AuthUserListResponse pb.AuthUserListResponse - AuthRoleListResponse pb.AuthRoleListResponse - - PermissionType authpb.Permission_Type - Permission authpb.Permission -) - -const ( - PermRead = authpb.READ - PermWrite = authpb.WRITE - PermReadWrite = authpb.READWRITE -) - -type Auth interface { - // AuthEnable enables auth of an etcd cluster. - AuthEnable(ctx context.Context) (*AuthEnableResponse, error) - - // AuthDisable disables auth of an etcd cluster. - AuthDisable(ctx context.Context) (*AuthDisableResponse, error) - - // UserAdd adds a new user to an etcd cluster. - UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) - - // UserDelete deletes a user from an etcd cluster. - UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) - - // UserChangePassword changes a password of a user. - UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) - - // UserGrantRole grants a role to a user. - UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) - - // UserGet gets a detailed information of a user. - UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) - - // UserList gets a list of all users. - UserList(ctx context.Context) (*AuthUserListResponse, error) - - // UserRevokeRole revokes a role of a user. - UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) - - // RoleAdd adds a new role to an etcd cluster. - RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) - - // RoleGrantPermission grants a permission to a role. - RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) - - // RoleGet gets a detailed information of a role. - RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) - - // RoleList gets a list of all roles. - RoleList(ctx context.Context) (*AuthRoleListResponse, error) - - // RoleRevokePermission revokes a permission from a role. - RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) - - // RoleDelete deletes a role. - RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) -} - -type authClient struct { - remote pb.AuthClient - callOpts []grpc.CallOption -} - -func NewAuth(c *Client) Auth { - api := &authClient{remote: RetryAuthClient(c)} - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { - resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) - return (*AuthEnableResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { - resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) - return (*AuthDisableResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { - resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...) - return (*AuthUserAddResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { - resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) - return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { - resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) - return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { - resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) - return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { - resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) - return (*AuthUserGetResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) { - resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) - return (*AuthUserListResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { - resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) - return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { - resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) - return (*AuthRoleAddResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { - perm := &authpb.Permission{ - Key: []byte(key), - RangeEnd: []byte(rangeEnd), - PermType: authpb.Permission_Type(permType), - } - resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...) - return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { - resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) - return (*AuthRoleGetResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { - resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) - return (*AuthRoleListResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { - resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...) - return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { - resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) - return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) -} - -func StrToPermissionType(s string) (PermissionType, error) { - val, ok := authpb.Permission_Type_value[strings.ToUpper(s)] - if ok { - return PermissionType(val), nil - } - return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s) -} - -type authenticator struct { - conn *grpc.ClientConn // conn in-use - remote pb.AuthClient - callOpts []grpc.CallOption -} - -func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { - resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...) - return (*AuthenticateResponse)(resp), toErr(ctx, err) -} - -func (auth *authenticator) close() { - auth.conn.Close() -} - -func newAuthenticator(ctx context.Context, target string, opts []grpc.DialOption, c *Client) (*authenticator, error) { - conn, err := grpc.DialContext(ctx, target, opts...) - if err != nil { - return nil, err - } - - api := &authenticator{ - conn: conn, - remote: pb.NewAuthClient(conn), - } - if c != nil { - api.callOpts = c.callOpts - } - return api, nil -} diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go deleted file mode 100644 index 0a578307b..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/client.go +++ /dev/null @@ -1,673 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "crypto/tls" - "errors" - "fmt" - "net" - "net/url" - "os" - "strconv" - "strings" - "sync" - "time" - - "github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils" - "go.etcd.io/etcd/clientv3/balancer" - "go.etcd.io/etcd/clientv3/balancer/picker" - "go.etcd.io/etcd/clientv3/balancer/resolver/endpoint" - "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" - "go.uber.org/zap" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -var ( - ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints") - ErrOldCluster = errors.New("etcdclient: old cluster version") - - roundRobinBalancerName = fmt.Sprintf("etcd-%s", picker.RoundrobinBalanced.String()) -) - -func init() { - lg := zap.NewNop() - if os.Getenv("ETCD_CLIENT_DEBUG") != "" { - var err error - lg, err = zap.NewProductionConfig().Build() // info level logging - if err != nil { - panic(err) - } - } - balancer.RegisterBuilder(balancer.Config{ - Policy: picker.RoundrobinBalanced, - Name: roundRobinBalancerName, - Logger: lg, - }) -} - -// Client provides and manages an etcd v3 client session. -type Client struct { - Cluster - KV - Lease - Watcher - Auth - Maintenance - - conn *grpc.ClientConn - - cfg Config - creds *credentials.TransportCredentials - balancer balancer.Balancer - resolverGroup *endpoint.ResolverGroup - mu *sync.Mutex - - ctx context.Context - cancel context.CancelFunc - - // Username is a user name for authentication. - Username string - // Password is a password for authentication. - Password string - // tokenCred is an instance of WithPerRPCCredentials()'s argument - tokenCred *authTokenCredential - - callOpts []grpc.CallOption - - lg *zap.Logger -} - -// New creates a new etcdv3 client from a given configuration. -func New(cfg Config) (*Client, error) { - if len(cfg.Endpoints) == 0 { - return nil, ErrNoAvailableEndpoints - } - - return newClient(&cfg) -} - -// NewCtxClient creates a client with a context but no underlying grpc -// connection. This is useful for embedded cases that override the -// service interface implementations and do not need connection management. -func NewCtxClient(ctx context.Context) *Client { - cctx, cancel := context.WithCancel(ctx) - return &Client{ctx: cctx, cancel: cancel} -} - -// NewFromURL creates a new etcdv3 client from a URL. -func NewFromURL(url string) (*Client, error) { - return New(Config{Endpoints: []string{url}}) -} - -// NewFromURLs creates a new etcdv3 client from URLs. -func NewFromURLs(urls []string) (*Client, error) { - return New(Config{Endpoints: urls}) -} - -// Close shuts down the client's etcd connections. -func (c *Client) Close() error { - c.cancel() - c.Watcher.Close() - c.Lease.Close() - if c.resolverGroup != nil { - c.resolverGroup.Close() - } - if c.conn != nil { - return toErr(c.ctx, c.conn.Close()) - } - return c.ctx.Err() -} - -// Ctx is a context for "out of band" messages (e.g., for sending -// "clean up" message when another context is canceled). It is -// canceled on client Close(). -func (c *Client) Ctx() context.Context { return c.ctx } - -// Endpoints lists the registered endpoints for the client. -func (c *Client) Endpoints() (eps []string) { - // copy the slice; protect original endpoints from being changed - eps = make([]string, len(c.cfg.Endpoints)) - copy(eps, c.cfg.Endpoints) - return -} - -// SetEndpoints updates client's endpoints. -func (c *Client) SetEndpoints(eps ...string) { - c.mu.Lock() - defer c.mu.Unlock() - c.cfg.Endpoints = eps - c.resolverGroup.SetEndpoints(eps) -} - -// Sync synchronizes client's endpoints with the known endpoints from the etcd membership. -func (c *Client) Sync(ctx context.Context) error { - mresp, err := c.MemberList(ctx) - if err != nil { - return err - } - var eps []string - for _, m := range mresp.Members { - eps = append(eps, m.ClientURLs...) - } - c.SetEndpoints(eps...) - return nil -} - -func (c *Client) autoSync() { - if c.cfg.AutoSyncInterval == time.Duration(0) { - return - } - - for { - select { - case <-c.ctx.Done(): - return - case <-time.After(c.cfg.AutoSyncInterval): - ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) - err := c.Sync(ctx) - cancel() - if err != nil && err != c.ctx.Err() { - lg.Lvl(4).Infof("Auto sync endpoints failed: %v", err) - } - } - } -} - -type authTokenCredential struct { - token string - tokenMu *sync.RWMutex -} - -func (cred authTokenCredential) RequireTransportSecurity() bool { - return false -} - -func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) { - cred.tokenMu.RLock() - defer cred.tokenMu.RUnlock() - return map[string]string{ - rpctypes.TokenFieldNameGRPC: cred.token, - }, nil -} - -func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) { - creds = c.creds - switch scheme { - case "unix": - case "http": - creds = nil - case "https", "unixs": - if creds != nil { - break - } - tlsconfig := &tls.Config{} - emptyCreds := credentials.NewTLS(tlsconfig) - creds = &emptyCreds - default: - creds = nil - } - return creds -} - -// dialSetupOpts gives the dial opts prior to any authentication. -func (c *Client) dialSetupOpts(creds *credentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) { - if c.cfg.DialKeepAliveTime > 0 { - params := keepalive.ClientParameters{ - Time: c.cfg.DialKeepAliveTime, - Timeout: c.cfg.DialKeepAliveTimeout, - PermitWithoutStream: c.cfg.PermitWithoutStream, - } - opts = append(opts, grpc.WithKeepaliveParams(params)) - } - opts = append(opts, dopts...) - - // Provide a net dialer that supports cancelation and timeout. - f := func(dialEp string, t time.Duration) (net.Conn, error) { - proto, host, _ := endpoint.ParseEndpoint(dialEp) - select { - case <-c.ctx.Done(): - return nil, c.ctx.Err() - default: - } - dialer := &net.Dialer{Timeout: t} - return dialer.DialContext(c.ctx, proto, host) - } - opts = append(opts, grpc.WithDialer(f)) - - if creds != nil { - opts = append(opts, grpc.WithTransportCredentials(*creds)) - } else { - opts = append(opts, grpc.WithInsecure()) - } - - // Interceptor retry and backoff. - // TODO: Replace all of clientv3/retry.go with interceptor based retry, or with - // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy - // once it is available. - rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction)) - opts = append(opts, - // Disable stream retry by default since go-grpc-middleware/retry does not support client streams. - // Streams that are safe to retry are enabled individually. - grpc.WithStreamInterceptor(c.streamClientInterceptor(c.lg, withMax(0), rrBackoff)), - grpc.WithUnaryInterceptor(c.unaryClientInterceptor(c.lg, withMax(defaultUnaryMaxRetries), rrBackoff)), - ) - - return opts, nil -} - -// Dial connects to a single endpoint using the client's config. -func (c *Client) Dial(ep string) (*grpc.ClientConn, error) { - creds := c.directDialCreds(ep) - // Use the grpc passthrough resolver to directly dial a single endpoint. - // This resolver passes through the 'unix' and 'unixs' endpoints schemes used - // by etcd without modification, allowing us to directly dial endpoints and - // using the same dial functions that we use for load balancer dialing. - return c.dial(fmt.Sprintf("passthrough:///%s", ep), creds) -} - -func (c *Client) getToken(ctx context.Context) error { - var err error // return last error in a case of fail - var auth *authenticator - - for i := 0; i < len(c.cfg.Endpoints); i++ { - ep := c.cfg.Endpoints[i] - // use dial options without dopts to avoid reusing the client balancer - var dOpts []grpc.DialOption - _, host, _ := endpoint.ParseEndpoint(ep) - target := c.resolverGroup.Target(host) - creds := c.dialWithBalancerCreds(ep) - dOpts, err = c.dialSetupOpts(creds, c.cfg.DialOptions...) - if err != nil { - err = fmt.Errorf("failed to configure auth dialer: %v", err) - continue - } - dOpts = append(dOpts, grpc.WithBalancerName(roundRobinBalancerName)) - auth, err = newAuthenticator(ctx, target, dOpts, c) - if err != nil { - continue - } - defer auth.close() - - var resp *AuthenticateResponse - resp, err = auth.authenticate(ctx, c.Username, c.Password) - if err != nil { - continue - } - - c.tokenCred.tokenMu.Lock() - c.tokenCred.token = resp.Token - c.tokenCred.tokenMu.Unlock() - - return nil - } - - return err -} - -// dialWithBalancer dials the client's current load balanced resolver group. The scheme of the host -// of the provided endpoint determines the scheme used for all endpoints of the client connection. -func (c *Client) dialWithBalancer(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { - _, host, _ := endpoint.ParseEndpoint(ep) - target := c.resolverGroup.Target(host) - creds := c.dialWithBalancerCreds(ep) - return c.dial(target, creds, dopts...) -} - -// dial configures and dials any grpc balancer target. -func (c *Client) dial(target string, creds *credentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { - opts, err := c.dialSetupOpts(creds, dopts...) - if err != nil { - return nil, fmt.Errorf("failed to configure dialer: %v", err) - } - - if c.Username != "" && c.Password != "" { - c.tokenCred = &authTokenCredential{ - tokenMu: &sync.RWMutex{}, - } - - ctx, cancel := c.ctx, func() {} - if c.cfg.DialTimeout > 0 { - ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout) - } - - err = c.getToken(ctx) - if err != nil { - if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled { - if err == ctx.Err() && ctx.Err() != c.ctx.Err() { - err = context.DeadlineExceeded - } - cancel() - return nil, err - } - } else { - opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) - } - cancel() - } - - opts = append(opts, c.cfg.DialOptions...) - - dctx := c.ctx - if c.cfg.DialTimeout > 0 { - var cancel context.CancelFunc - dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) - defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options? - } - - conn, err := grpc.DialContext(dctx, target, opts...) - if err != nil { - return nil, err - } - return conn, nil -} - -func (c *Client) directDialCreds(ep string) *credentials.TransportCredentials { - _, hostPort, scheme := endpoint.ParseEndpoint(ep) - creds := c.creds - if len(scheme) != 0 { - creds = c.processCreds(scheme) - if creds != nil { - c := *creds - clone := c.Clone() - // Set the server name must to the endpoint hostname without port since grpc - // otherwise attempts to check if x509 cert is valid for the full endpoint - // including the scheme and port, which fails. - host, _ := endpoint.ParseHostPort(hostPort) - clone.OverrideServerName(host) - creds = &clone - } - } - return creds -} - -func (c *Client) dialWithBalancerCreds(ep string) *credentials.TransportCredentials { - _, _, scheme := endpoint.ParseEndpoint(ep) - creds := c.creds - if len(scheme) != 0 { - creds = c.processCreds(scheme) - } - return creds -} - -// WithRequireLeader requires client requests to only succeed -// when the cluster has a leader. -func WithRequireLeader(ctx context.Context) context.Context { - md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - return metadata.NewOutgoingContext(ctx, md) -} - -func newClient(cfg *Config) (*Client, error) { - if cfg == nil { - cfg = &Config{} - } - var creds *credentials.TransportCredentials - if cfg.TLS != nil { - c := credentials.NewTLS(cfg.TLS) - creds = &c - } - - // use a temporary skeleton client to bootstrap first connection - baseCtx := context.TODO() - if cfg.Context != nil { - baseCtx = cfg.Context - } - - ctx, cancel := context.WithCancel(baseCtx) - client := &Client{ - conn: nil, - cfg: *cfg, - creds: creds, - ctx: ctx, - cancel: cancel, - mu: new(sync.Mutex), - callOpts: defaultCallOpts, - } - - lcfg := DefaultLogConfig - if cfg.LogConfig != nil { - lcfg = *cfg.LogConfig - } - var err error - client.lg, err = lcfg.Build() - if err != nil { - return nil, err - } - - if cfg.Username != "" && cfg.Password != "" { - client.Username = cfg.Username - client.Password = cfg.Password - } - if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 { - if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize { - return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize) - } - callOpts := []grpc.CallOption{ - defaultFailFast, - defaultMaxCallSendMsgSize, - defaultMaxCallRecvMsgSize, - } - if cfg.MaxCallSendMsgSize > 0 { - callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize) - } - if cfg.MaxCallRecvMsgSize > 0 { - callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize) - } - client.callOpts = callOpts - } - - // Prepare a 'endpoint:///' resolver for the client and create a endpoint target to pass - // to dial so the client knows to use this resolver. - client.resolverGroup, err = endpoint.NewResolverGroup(fmt.Sprintf("client-%s", strconv.FormatInt(time.Now().UnixNano(), 36))) - if err != nil { - client.cancel() - return nil, err - } - client.resolverGroup.SetEndpoints(cfg.Endpoints) - - if len(cfg.Endpoints) < 1 { - return nil, fmt.Errorf("at least one Endpoint must is required in client config") - } - dialEndpoint := cfg.Endpoints[0] - - // Use an provided endpoint target so that for https:// without any tls config given, then - // grpc will assume the certificate server name is the endpoint host. - conn, err := client.dialWithBalancer(dialEndpoint, grpc.WithBalancerName(roundRobinBalancerName)) - if err != nil { - client.cancel() - client.resolverGroup.Close() - return nil, err - } - // TODO: With the old grpc balancer interface, we waited until the dial timeout - // for the balancer to be ready. Is there an equivalent wait we should do with the new grpc balancer interface? - client.conn = conn - - client.Cluster = NewCluster(client) - client.KV = NewKV(client) - client.Lease = NewLease(client) - client.Watcher = NewWatcher(client) - client.Auth = NewAuth(client) - client.Maintenance = NewMaintenance(client) - - if cfg.RejectOldCluster { - if err := client.checkVersion(); err != nil { - client.Close() - return nil, err - } - } - - go client.autoSync() - return client, nil -} - -// roundRobinQuorumBackoff retries against quorum between each backoff. -// This is intended for use with a round robin load balancer. -func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc { - return func(attempt uint) time.Duration { - // after each round robin across quorum, backoff for our wait between duration - n := uint(len(c.Endpoints())) - quorum := (n/2 + 1) - if attempt%quorum == 0 { - c.lg.Info("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction)) - return backoffutils.JitterUp(waitBetween, jitterFraction) - } - c.lg.Info("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum)) - return 0 - } -} - -func (c *Client) checkVersion() (err error) { - var wg sync.WaitGroup - errc := make(chan error, len(c.cfg.Endpoints)) - ctx, cancel := context.WithCancel(c.ctx) - if c.cfg.DialTimeout > 0 { - ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout) - } - wg.Add(len(c.cfg.Endpoints)) - for _, ep := range c.cfg.Endpoints { - // if cluster is current, any endpoint gives a recent version - go func(e string) { - defer wg.Done() - resp, rerr := c.Status(ctx, e) - if rerr != nil { - errc <- rerr - return - } - vs := strings.Split(resp.Version, ".") - maj, min := 0, 0 - if len(vs) >= 2 { - maj, _ = strconv.Atoi(vs[0]) - min, rerr = strconv.Atoi(vs[1]) - } - if maj < 3 || (maj == 3 && min < 2) { - rerr = ErrOldCluster - } - errc <- rerr - }(ep) - } - // wait for success - for i := 0; i < len(c.cfg.Endpoints); i++ { - if err = <-errc; err == nil { - break - } - } - cancel() - wg.Wait() - return err -} - -// ActiveConnection returns the current in-use connection -func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn } - -// isHaltErr returns true if the given error and context indicate no forward -// progress can be made, even after reconnecting. -func isHaltErr(ctx context.Context, err error) bool { - if ctx != nil && ctx.Err() != nil { - return true - } - if err == nil { - return false - } - ev, _ := status.FromError(err) - // Unavailable codes mean the system will be right back. - // (e.g., can't connect, lost leader) - // Treat Internal codes as if something failed, leaving the - // system in an inconsistent state, but retrying could make progress. - // (e.g., failed in middle of send, corrupted frame) - // TODO: are permanent Internal errors possible from grpc? - return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal -} - -// isUnavailableErr returns true if the given error is an unavailable error -func isUnavailableErr(ctx context.Context, err error) bool { - if ctx != nil && ctx.Err() != nil { - return false - } - if err == nil { - return false - } - ev, _ := status.FromError(err) - // Unavailable codes mean the system will be right back. - // (e.g., can't connect, lost leader) - return ev.Code() == codes.Unavailable -} - -func toErr(ctx context.Context, err error) error { - if err == nil { - return nil - } - err = rpctypes.Error(err) - if _, ok := err.(rpctypes.EtcdError); ok { - return err - } - if ev, ok := status.FromError(err); ok { - code := ev.Code() - switch code { - case codes.DeadlineExceeded: - fallthrough - case codes.Canceled: - if ctx.Err() != nil { - err = ctx.Err() - } - case codes.Unavailable: - case codes.FailedPrecondition: - err = grpc.ErrClientConnClosing - } - } - return err -} - -func canceledByCaller(stopCtx context.Context, err error) bool { - if stopCtx.Err() == nil || err == nil { - return false - } - - return err == context.Canceled || err == context.DeadlineExceeded -} - -// IsConnCanceled returns true, if error is from a closed gRPC connection. -// ref. https://github.com/grpc/grpc-go/pull/1854 -func IsConnCanceled(err error) bool { - if err == nil { - return false - } - // >= gRPC v1.10.x - s, ok := status.FromError(err) - if ok { - // connection is canceled or server has already closed the connection - return s.Code() == codes.Canceled || s.Message() == "transport is closing" - } - // >= gRPC v1.10.x - if err == context.Canceled { - return true - } - // <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")' - return strings.Contains(err.Error(), "grpc: the client connection is closing") -} - -func getHost(ep string) string { - url, uerr := url.Parse(ep) - if uerr != nil || !strings.Contains(ep, "://") { - return ep - } - return url.Host -} diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/github.com/coreos/etcd/clientv3/cluster.go deleted file mode 100644 index d497c0578..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/cluster.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - - pb "go.etcd.io/etcd/etcdserver/etcdserverpb" - "go.etcd.io/etcd/pkg/types" - - "google.golang.org/grpc" -) - -type ( - Member pb.Member - MemberListResponse pb.MemberListResponse - MemberAddResponse pb.MemberAddResponse - MemberRemoveResponse pb.MemberRemoveResponse - MemberUpdateResponse pb.MemberUpdateResponse -) - -type Cluster interface { - // MemberList lists the current cluster membership. - MemberList(ctx context.Context) (*MemberListResponse, error) - - // MemberAdd adds a new member into the cluster. - MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) - - // MemberRemove removes an existing member from the cluster. - MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) - - // MemberUpdate updates the peer addresses of the member. - MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) -} - -type cluster struct { - remote pb.ClusterClient - callOpts []grpc.CallOption -} - -func NewCluster(c *Client) Cluster { - api := &cluster{remote: RetryClusterClient(c)} - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster { - api := &cluster{remote: remote} - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { - // fail-fast before panic in rafthttp - if _, err := types.NewURLs(peerAddrs); err != nil { - return nil, err - } - - r := &pb.MemberAddRequest{PeerURLs: peerAddrs} - resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*MemberAddResponse)(resp), nil -} - -func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { - r := &pb.MemberRemoveRequest{ID: id} - resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*MemberRemoveResponse)(resp), nil -} - -func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { - // fail-fast before panic in rafthttp - if _, err := types.NewURLs(peerAddrs); err != nil { - return nil, err - } - - // it is safe to retry on update. - r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} - resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...) - if err == nil { - return (*MemberUpdateResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { - // it is safe to retry on list. - resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...) - if err == nil { - return (*MemberListResponse)(resp), nil - } - return nil, toErr(ctx, err) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/compact_op.go b/vendor/github.com/coreos/etcd/clientv3/compact_op.go deleted file mode 100644 index 5779713d3..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/compact_op.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - pb "go.etcd.io/etcd/etcdserver/etcdserverpb" -) - -// CompactOp represents a compact operation. -type CompactOp struct { - revision int64 - physical bool -} - -// CompactOption configures compact operation. -type CompactOption func(*CompactOp) - -func (op *CompactOp) applyCompactOpts(opts []CompactOption) { - for _, opt := range opts { - opt(op) - } -} - -// OpCompact wraps slice CompactOption to create a CompactOp. -func OpCompact(rev int64, opts ...CompactOption) CompactOp { - ret := CompactOp{revision: rev} - ret.applyCompactOpts(opts) - return ret -} - -func (op CompactOp) toRequest() *pb.CompactionRequest { - return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical} -} - -// WithCompactPhysical makes Compact wait until all compacted entries are -// removed from the etcd server's storage. -func WithCompactPhysical() CompactOption { - return func(op *CompactOp) { op.physical = true } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/compare.go b/vendor/github.com/coreos/etcd/clientv3/compare.go deleted file mode 100644 index 01ed68e94..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/compare.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - pb "go.etcd.io/etcd/etcdserver/etcdserverpb" -) - -type CompareTarget int -type CompareResult int - -const ( - CompareVersion CompareTarget = iota - CompareCreated - CompareModified - CompareValue -) - -type Cmp pb.Compare - -func Compare(cmp Cmp, result string, v interface{}) Cmp { - var r pb.Compare_CompareResult - - switch result { - case "=": - r = pb.Compare_EQUAL - case "!=": - r = pb.Compare_NOT_EQUAL - case ">": - r = pb.Compare_GREATER - case "<": - r = pb.Compare_LESS - default: - panic("Unknown result op") - } - - cmp.Result = r - switch cmp.Target { - case pb.Compare_VALUE: - val, ok := v.(string) - if !ok { - panic("bad compare value") - } - cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)} - case pb.Compare_VERSION: - cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)} - case pb.Compare_CREATE: - cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)} - case pb.Compare_MOD: - cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)} - case pb.Compare_LEASE: - cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)} - default: - panic("Unknown compare type") - } - return cmp -} - -func Value(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_VALUE} -} - -func Version(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_VERSION} -} - -func CreateRevision(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_CREATE} -} - -func ModRevision(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_MOD} -} - -// LeaseValue compares a key's LeaseID to a value of your choosing. The empty -// LeaseID is 0, otherwise known as `NoLease`. -func LeaseValue(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_LEASE} -} - -// KeyBytes returns the byte slice holding with the comparison key. -func (cmp *Cmp) KeyBytes() []byte { return cmp.Key } - -// WithKeyBytes sets the byte slice for the comparison key. -func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key } - -// ValueBytes returns the byte slice holding the comparison value, if any. -func (cmp *Cmp) ValueBytes() []byte { - if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok { - return tu.Value - } - return nil -} - -// WithValueBytes sets the byte slice for the comparison's value. -func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } - -// WithRange sets the comparison to scan the range [key, end). -func (cmp Cmp) WithRange(end string) Cmp { - cmp.RangeEnd = []byte(end) - return cmp -} - -// WithPrefix sets the comparison to scan all keys prefixed by the key. -func (cmp Cmp) WithPrefix() Cmp { - cmp.RangeEnd = getPrefix(cmp.Key) - return cmp -} - -// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. -func mustInt64(val interface{}) int64 { - if v, ok := val.(int64); ok { - return v - } - if v, ok := val.(int); ok { - return int64(v) - } - panic("bad value") -} - -// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an -// int64 otherwise. -func mustInt64orLeaseID(val interface{}) int64 { - if v, ok := val.(LeaseID); ok { - return int64(v) - } - return mustInt64(val) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go deleted file mode 100644 index dcdbf511d..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package concurrency implements concurrency operations on top of -// etcd such as distributed locks, barriers, and elections. -package concurrency diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go deleted file mode 100644 index d44e426f4..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "context" - "errors" - "fmt" - - v3 "go.etcd.io/etcd/clientv3" - pb "go.etcd.io/etcd/etcdserver/etcdserverpb" - "go.etcd.io/etcd/mvcc/mvccpb" -) - -var ( - ErrElectionNotLeader = errors.New("election: not leader") - ErrElectionNoLeader = errors.New("election: no leader") -) - -type Election struct { - session *Session - - keyPrefix string - - leaderKey string - leaderRev int64 - leaderSession *Session - hdr *pb.ResponseHeader -} - -// NewElection returns a new election on a given key prefix. -func NewElection(s *Session, pfx string) *Election { - return &Election{session: s, keyPrefix: pfx + "/"} -} - -// ResumeElection initializes an election with a known leader. -func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election { - return &Election{ - session: s, - leaderKey: leaderKey, - leaderRev: leaderRev, - leaderSession: s, - } -} - -// Campaign puts a value as eligible for the election on the prefix -// key. -// Multiple sessions can participate in the election for the -// same prefix, but only one can be the leader at a time. -// -// If the context is 'context.TODO()/context.Background()', the Campaign -// will continue to be blocked for other keys to be deleted, unless server -// returns a non-recoverable error (e.g. ErrCompacted). -// Otherwise, until the context is not cancelled or timed-out, Campaign will -// continue to be blocked until it becomes the leader. -func (e *Election) Campaign(ctx context.Context, val string) error { - s := e.session - client := e.session.Client() - - k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease()) - txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0)) - txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease()))) - txn = txn.Else(v3.OpGet(k)) - resp, err := txn.Commit() - if err != nil { - return err - } - e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s - if !resp.Succeeded { - kv := resp.Responses[0].GetResponseRange().Kvs[0] - e.leaderRev = kv.CreateRevision - if string(kv.Value) != val { - if err = e.Proclaim(ctx, val); err != nil { - e.Resign(ctx) - return err - } - } - } - - _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1) - if err != nil { - // clean up in case of context cancel - select { - case <-ctx.Done(): - e.Resign(client.Ctx()) - default: - e.leaderSession = nil - } - return err - } - e.hdr = resp.Header - - return nil -} - -// Proclaim lets the leader announce a new value without another election. -func (e *Election) Proclaim(ctx context.Context, val string) error { - if e.leaderSession == nil { - return ErrElectionNotLeader - } - client := e.session.Client() - cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) - txn := client.Txn(ctx).If(cmp) - txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease()))) - tresp, terr := txn.Commit() - if terr != nil { - return terr - } - if !tresp.Succeeded { - e.leaderKey = "" - return ErrElectionNotLeader - } - - e.hdr = tresp.Header - return nil -} - -// Resign lets a leader start a new election. -func (e *Election) Resign(ctx context.Context) (err error) { - if e.leaderSession == nil { - return nil - } - client := e.session.Client() - cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) - resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit() - if err == nil { - e.hdr = resp.Header - } - e.leaderKey = "" - e.leaderSession = nil - return err -} - -// Leader returns the leader value for the current election. -func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) { - client := e.session.Client() - resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) - if err != nil { - return nil, err - } else if len(resp.Kvs) == 0 { - // no leader currently elected - return nil, ErrElectionNoLeader - } - return resp, nil -} - -// Observe returns a channel that reliably observes ordered leader proposals -// as GetResponse values on every current elected leader key. It will not -// necessarily fetch all historical leader updates, but will always post the -// most recent leader value. -// -// The channel closes when the context is canceled or the underlying watcher -// is otherwise disrupted. -func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse { - retc := make(chan v3.GetResponse) - go e.observe(ctx, retc) - return retc -} - -func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) { - client := e.session.Client() - - defer close(ch) - for { - resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) - if err != nil { - return - } - - var kv *mvccpb.KeyValue - var hdr *pb.ResponseHeader - - if len(resp.Kvs) == 0 { - cctx, cancel := context.WithCancel(ctx) - // wait for first key put on prefix - opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()} - wch := client.Watch(cctx, e.keyPrefix, opts...) - for kv == nil { - wr, ok := <-wch - if !ok || wr.Err() != nil { - cancel() - return - } - // only accept puts; a delete will make observe() spin - for _, ev := range wr.Events { - if ev.Type == mvccpb.PUT { - hdr, kv = &wr.Header, ev.Kv - // may have multiple revs; hdr.rev = the last rev - // set to kv's rev in case batch has multiple Puts - hdr.Revision = kv.ModRevision - break - } - } - } - cancel() - } else { - hdr, kv = resp.Header, resp.Kvs[0] - } - - select { - case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}: - case <-ctx.Done(): - return - } - - cctx, cancel := context.WithCancel(ctx) - wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1)) - keyDeleted := false - for !keyDeleted { - wr, ok := <-wch - if !ok { - cancel() - return - } - for _, ev := range wr.Events { - if ev.Type == mvccpb.DELETE { - keyDeleted = true - break - } - resp.Header = &wr.Header - resp.Kvs = []*mvccpb.KeyValue{ev.Kv} - select { - case ch <- *resp: - case <-cctx.Done(): - cancel() - return - } - } - } - cancel() - } -} - -// Key returns the leader key if elected, empty string otherwise. -func (e *Election) Key() string { return e.leaderKey } - -// Rev returns the leader key's creation revision, if elected. -func (e *Election) Rev() int64 { return e.leaderRev } - -// Header is the response header from the last successful election proposal. -func (e *Election) Header() *pb.ResponseHeader { return e.hdr } diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go deleted file mode 100644 index e4cf77517..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "context" - "fmt" - - v3 "go.etcd.io/etcd/clientv3" - pb "go.etcd.io/etcd/etcdserver/etcdserverpb" - "go.etcd.io/etcd/mvcc/mvccpb" -) - -func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error { - cctx, cancel := context.WithCancel(ctx) - defer cancel() - - var wr v3.WatchResponse - wch := client.Watch(cctx, key, v3.WithRev(rev)) - for wr = range wch { - for _, ev := range wr.Events { - if ev.Type == mvccpb.DELETE { - return nil - } - } - } - if err := wr.Err(); err != nil { - return err - } - if err := ctx.Err(); err != nil { - return err - } - return fmt.Errorf("lost watcher waiting for delete") -} - -// waitDeletes efficiently waits until all keys matching the prefix and no greater -// than the create revision. -func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) { - getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev)) - for { - resp, err := client.Get(ctx, pfx, getOpts...) - if err != nil { - return nil, err - } - if len(resp.Kvs) == 0 { - return resp.Header, nil - } - lastKey := string(resp.Kvs[0].Key) - if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil { - return nil, err - } - } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go deleted file mode 100644 index 162e70364..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "context" - "fmt" - "sync" - - v3 "go.etcd.io/etcd/clientv3" - pb "go.etcd.io/etcd/etcdserver/etcdserverpb" -) - -// Mutex implements the sync Locker interface with etcd -type Mutex struct { - s *Session - - pfx string - myKey string - myRev int64 - hdr *pb.ResponseHeader -} - -func NewMutex(s *Session, pfx string) *Mutex { - return &Mutex{s, pfx + "/", "", -1, nil} -} - -// Lock locks the mutex with a cancelable context. If the context is canceled -// while trying to acquire the lock, the mutex tries to clean its stale lock entry. -func (m *Mutex) Lock(ctx context.Context) error { - s := m.s - client := m.s.Client() - - m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease()) - cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0) - // put self in lock waiters via myKey; oldest waiter holds lock - put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease())) - // reuse key in case this session already holds the lock - get := v3.OpGet(m.myKey) - // fetch current holder to complete uncontended path with only one RPC - getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...) - resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit() - if err != nil { - return err - } - m.myRev = resp.Header.Revision - if !resp.Succeeded { - m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision - } - // if no key on prefix / the minimum rev is key, already hold the lock - ownerKey := resp.Responses[1].GetResponseRange().Kvs - if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { - m.hdr = resp.Header - return nil - } - - // wait for deletion revisions prior to myKey - hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) - // release lock key if cancelled - select { - case <-ctx.Done(): - m.Unlock(client.Ctx()) - default: - m.hdr = hdr - } - return werr -} - -func (m *Mutex) Unlock(ctx context.Context) error { - client := m.s.Client() - if _, err := client.Delete(ctx, m.myKey); err != nil { - return err - } - m.myKey = "\x00" - m.myRev = -1 - return nil -} - -func (m *Mutex) IsOwner() v3.Cmp { - return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev) -} - -func (m *Mutex) Key() string { return m.myKey } - -// Header is the response header received from etcd on acquiring the lock. -func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr } - -type lockerMutex struct{ *Mutex } - -func (lm *lockerMutex) Lock() { - client := lm.s.Client() - if err := lm.Mutex.Lock(client.Ctx()); err != nil { - panic(err) - } -} -func (lm *lockerMutex) Unlock() { - client := lm.s.Client() - if err := lm.Mutex.Unlock(client.Ctx()); err != nil { - panic(err) - } -} - -// NewLocker creates a sync.Locker backed by an etcd mutex. -func NewLocker(s *Session, pfx string) sync.Locker { - return &lockerMutex{NewMutex(s, pfx)} -} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go deleted file mode 100644 index 598ec0e4f..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "context" - "time" - - v3 "go.etcd.io/etcd/clientv3" -) - -const defaultSessionTTL = 60 - -// Session represents a lease kept alive for the lifetime of a client. -// Fault-tolerant applications may use sessions to reason about liveness. -type Session struct { - client *v3.Client - opts *sessionOptions - id v3.LeaseID - - cancel context.CancelFunc - donec <-chan struct{} -} - -// NewSession gets the leased session for a client. -func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { - ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()} - for _, opt := range opts { - opt(ops) - } - - id := ops.leaseID - if id == v3.NoLease { - resp, err := client.Grant(ops.ctx, int64(ops.ttl)) - if err != nil { - return nil, err - } - id = v3.LeaseID(resp.ID) - } - - ctx, cancel := context.WithCancel(ops.ctx) - keepAlive, err := client.KeepAlive(ctx, id) - if err != nil || keepAlive == nil { - cancel() - return nil, err - } - - donec := make(chan struct{}) - s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec} - - // keep the lease alive until client error or cancelled context - go func() { - defer close(donec) - for range keepAlive { - // eat messages until keep alive channel closes - } - }() - - return s, nil -} - -// Client is the etcd client that is attached to the session. -func (s *Session) Client() *v3.Client { - return s.client -} - -// Lease is the lease ID for keys bound to the session. -func (s *Session) Lease() v3.LeaseID { return s.id } - -// Done returns a channel that closes when the lease is orphaned, expires, or -// is otherwise no longer being refreshed. -func (s *Session) Done() <-chan struct{} { return s.donec } - -// Orphan ends the refresh for the session lease. This is useful -// in case the state of the client connection is indeterminate (revoke -// would fail) or when transferring lease ownership. -func (s *Session) Orphan() { - s.cancel() - <-s.donec -} - -// Close orphans the session and revokes the session lease. -func (s *Session) Close() error { - s.Orphan() - // if revoke takes longer than the ttl, lease is expired anyway - ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second) - _, err := s.client.Revoke(ctx, s.id) - cancel() - return err -} - -type sessionOptions struct { - ttl int - leaseID v3.LeaseID - ctx context.Context -} - -// SessionOption configures Session. -type SessionOption func(*sessionOptions) - -// WithTTL configures the session's TTL in seconds. -// If TTL is <= 0, the default 60 seconds TTL will be used. -func WithTTL(ttl int) SessionOption { - return func(so *sessionOptions) { - if ttl > 0 { - so.ttl = ttl - } - } -} - -// WithLease specifies the existing leaseID to be used for the session. -// This is useful in process restart scenario, for example, to reclaim -// leadership from an election prior to restart. -func WithLease(leaseID v3.LeaseID) SessionOption { - return func(so *sessionOptions) { - so.leaseID = leaseID - } -} - -// WithContext assigns a context to the session instead of defaulting to -// using the client context. This is useful for canceling NewSession and -// Close operations immediately without having to close the client. If the -// context is canceled before Close() completes, the session's lease will be -// abandoned and left to expire instead of being revoked. -func WithContext(ctx context.Context) SessionOption { - return func(so *sessionOptions) { - so.ctx = ctx - } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go deleted file mode 100644 index ee1151079..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "context" - "math" - - v3 "go.etcd.io/etcd/clientv3" -) - -// STM is an interface for software transactional memory. -type STM interface { - // Get returns the value for a key and inserts the key in the txn's read set. - // If Get fails, it aborts the transaction with an error, never returning. - Get(key ...string) string - // Put adds a value for a key to the write set. - Put(key, val string, opts ...v3.OpOption) - // Rev returns the revision of a key in the read set. - Rev(key string) int64 - // Del deletes a key. - Del(key string) - - // commit attempts to apply the txn's changes to the server. - commit() *v3.TxnResponse - reset() -} - -// Isolation is an enumeration of transactional isolation levels which -// describes how transactions should interfere and conflict. -type Isolation int - -const ( - // SerializableSnapshot provides serializable isolation and also checks - // for write conflicts. - SerializableSnapshot Isolation = iota - // Serializable reads within the same transaction attempt return data - // from the at the revision of the first read. - Serializable - // RepeatableReads reads within the same transaction attempt always - // return the same data. - RepeatableReads - // ReadCommitted reads keys from any committed revision. - ReadCommitted -) - -// stmError safely passes STM errors through panic to the STM error channel. -type stmError struct{ err error } - -type stmOptions struct { - iso Isolation - ctx context.Context - prefetch []string -} - -type stmOption func(*stmOptions) - -// WithIsolation specifies the transaction isolation level. -func WithIsolation(lvl Isolation) stmOption { - return func(so *stmOptions) { so.iso = lvl } -} - -// WithAbortContext specifies the context for permanently aborting the transaction. -func WithAbortContext(ctx context.Context) stmOption { - return func(so *stmOptions) { so.ctx = ctx } -} - -// WithPrefetch is a hint to prefetch a list of keys before trying to apply. -// If an STM transaction will unconditionally fetch a set of keys, prefetching -// those keys will save the round-trip cost from requesting each key one by one -// with Get(). -func WithPrefetch(keys ...string) stmOption { - return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) } -} - -// NewSTM initiates a new STM instance, using serializable snapshot isolation by default. -func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) { - opts := &stmOptions{ctx: c.Ctx()} - for _, f := range so { - f(opts) - } - if len(opts.prefetch) != 0 { - f := apply - apply = func(s STM) error { - s.Get(opts.prefetch...) - return f(s) - } - } - return runSTM(mkSTM(c, opts), apply) -} - -func mkSTM(c *v3.Client, opts *stmOptions) STM { - switch opts.iso { - case SerializableSnapshot: - s := &stmSerializable{ - stm: stm{client: c, ctx: opts.ctx}, - prefetch: make(map[string]*v3.GetResponse), - } - s.conflicts = func() []v3.Cmp { - return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...) - } - return s - case Serializable: - s := &stmSerializable{ - stm: stm{client: c, ctx: opts.ctx}, - prefetch: make(map[string]*v3.GetResponse), - } - s.conflicts = func() []v3.Cmp { return s.rset.cmps() } - return s - case RepeatableReads: - s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} - s.conflicts = func() []v3.Cmp { return s.rset.cmps() } - return s - case ReadCommitted: - s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} - s.conflicts = func() []v3.Cmp { return nil } - return s - default: - panic("unsupported stm") - } -} - -type stmResponse struct { - resp *v3.TxnResponse - err error -} - -func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) { - outc := make(chan stmResponse, 1) - go func() { - defer func() { - if r := recover(); r != nil { - e, ok := r.(stmError) - if !ok { - // client apply panicked - panic(r) - } - outc <- stmResponse{nil, e.err} - } - }() - var out stmResponse - for { - s.reset() - if out.err = apply(s); out.err != nil { - break - } - if out.resp = s.commit(); out.resp != nil { - break - } - } - outc <- out - }() - r := <-outc - return r.resp, r.err -} - -// stm implements repeatable-read software transactional memory over etcd -type stm struct { - client *v3.Client - ctx context.Context - // rset holds read key values and revisions - rset readSet - // wset holds overwritten keys and their values - wset writeSet - // getOpts are the opts used for gets - getOpts []v3.OpOption - // conflicts computes the current conflicts on the txn - conflicts func() []v3.Cmp -} - -type stmPut struct { - val string - op v3.Op -} - -type readSet map[string]*v3.GetResponse - -func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) { - for i, resp := range txnresp.Responses { - rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange()) - } -} - -// first returns the store revision from the first fetch -func (rs readSet) first() int64 { - ret := int64(math.MaxInt64 - 1) - for _, resp := range rs { - if rev := resp.Header.Revision; rev < ret { - ret = rev - } - } - return ret -} - -// cmps guards the txn from updates to read set -func (rs readSet) cmps() []v3.Cmp { - cmps := make([]v3.Cmp, 0, len(rs)) - for k, rk := range rs { - cmps = append(cmps, isKeyCurrent(k, rk)) - } - return cmps -} - -type writeSet map[string]stmPut - -func (ws writeSet) get(keys ...string) *stmPut { - for _, key := range keys { - if wv, ok := ws[key]; ok { - return &wv - } - } - return nil -} - -// cmps returns a cmp list testing no writes have happened past rev -func (ws writeSet) cmps(rev int64) []v3.Cmp { - cmps := make([]v3.Cmp, 0, len(ws)) - for key := range ws { - cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev)) - } - return cmps -} - -// puts is the list of ops for all pending writes -func (ws writeSet) puts() []v3.Op { - puts := make([]v3.Op, 0, len(ws)) - for _, v := range ws { - puts = append(puts, v.op) - } - return puts -} - -func (s *stm) Get(keys ...string) string { - if wv := s.wset.get(keys...); wv != nil { - return wv.val - } - return respToValue(s.fetch(keys...)) -} - -func (s *stm) Put(key, val string, opts ...v3.OpOption) { - s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)} -} - -func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} } - -func (s *stm) Rev(key string) int64 { - if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 { - return resp.Kvs[0].ModRevision - } - return 0 -} - -func (s *stm) commit() *v3.TxnResponse { - txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit() - if err != nil { - panic(stmError{err}) - } - if txnresp.Succeeded { - return txnresp - } - return nil -} - -func (s *stm) fetch(keys ...string) *v3.GetResponse { - if len(keys) == 0 { - return nil - } - ops := make([]v3.Op, len(keys)) - for i, key := range keys { - if resp, ok := s.rset[key]; ok { - return resp - } - ops[i] = v3.OpGet(key, s.getOpts...) - } - txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit() - if err != nil { - panic(stmError{err}) - } - s.rset.add(keys, txnresp) - return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange()) -} - -func (s *stm) reset() { - s.rset = make(map[string]*v3.GetResponse) - s.wset = make(map[string]stmPut) -} - -type stmSerializable struct { - stm - prefetch map[string]*v3.GetResponse -} - -func (s *stmSerializable) Get(keys ...string) string { - if wv := s.wset.get(keys...); wv != nil { - return wv.val - } - firstRead := len(s.rset) == 0 - for _, key := range keys { - if resp, ok := s.prefetch[key]; ok { - delete(s.prefetch, key) - s.rset[key] = resp - } - } - resp := s.stm.fetch(keys...) - if firstRead { - // txn's base revision is defined by the first read - s.getOpts = []v3.OpOption{ - v3.WithRev(resp.Header.Revision), - v3.WithSerializable(), - } - } - return respToValue(resp) -} - -func (s *stmSerializable) Rev(key string) int64 { - s.Get(key) - return s.stm.Rev(key) -} - -func (s *stmSerializable) gets() ([]string, []v3.Op) { - keys := make([]string, 0, len(s.rset)) - ops := make([]v3.Op, 0, len(s.rset)) - for k := range s.rset { - keys = append(keys, k) - ops = append(ops, v3.OpGet(k)) - } - return keys, ops -} - -func (s *stmSerializable) commit() *v3.TxnResponse { - keys, getops := s.gets() - txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...) - // use Else to prefetch keys in case of conflict to save a round trip - txnresp, err := txn.Else(getops...).Commit() - if err != nil { - panic(stmError{err}) - } - if txnresp.Succeeded { - return txnresp - } - // load prefetch with Else data - s.rset.add(keys, txnresp) - s.prefetch = s.rset - s.getOpts = nil - return nil -} - -func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp { - if len(r.Kvs) != 0 { - return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision) - } - return v3.Compare(v3.ModRevision(k), "=", 0) -} - -func respToValue(resp *v3.GetResponse) string { - if resp == nil || len(resp.Kvs) == 0 { - return "" - } - return string(resp.Kvs[0].Value) -} - -// NewSTMRepeatable is deprecated. -func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { - return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads)) -} - -// NewSTMSerializable is deprecated. -func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { - return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable)) -} - -// NewSTMReadCommitted is deprecated. -func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { - return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted)) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go deleted file mode 100644 index 96e94e1c0..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/config.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "crypto/tls" - "time" - - "go.uber.org/zap" - "google.golang.org/grpc" -) - -type Config struct { - // Endpoints is a list of URLs. - Endpoints []string `json:"endpoints"` - - // AutoSyncInterval is the interval to update endpoints with its latest members. - // 0 disables auto-sync. By default auto-sync is disabled. - AutoSyncInterval time.Duration `json:"auto-sync-interval"` - - // DialTimeout is the timeout for failing to establish a connection. - DialTimeout time.Duration `json:"dial-timeout"` - - // DialKeepAliveTime is the time after which client pings the server to see if - // transport is alive. - DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` - - // DialKeepAliveTimeout is the time that the client waits for a response for the - // keep-alive probe. If the response is not received in this time, the connection is closed. - DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` - - // MaxCallSendMsgSize is the client-side request send limit in bytes. - // If 0, it defaults to 2.0 MiB (2 * 1024 * 1024). - // Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit. - // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). - MaxCallSendMsgSize int - - // MaxCallRecvMsgSize is the client-side response receive limit. - // If 0, it defaults to "math.MaxInt32", because range response can - // easily exceed request send limits. - // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit. - // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). - MaxCallRecvMsgSize int - - // TLS holds the client secure credentials, if any. - TLS *tls.Config - - // Username is a user name for authentication. - Username string `json:"username"` - - // Password is a password for authentication. - Password string `json:"password"` - - // RejectOldCluster when set will refuse to create a client against an outdated cluster. - RejectOldCluster bool `json:"reject-old-cluster"` - - // DialOptions is a list of dial options for the grpc client (e.g., for interceptors). - DialOptions []grpc.DialOption - - // Context is the default client context; it can be used to cancel grpc dial out and - // other operations that do not have an explicit context. - Context context.Context - - // LogConfig configures client-side logger. - // If nil, use the default logger. - // TODO: configure gRPC logger - LogConfig *zap.Config - - // PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs). - PermitWithoutStream bool `json:"permit-without-stream"` -} - -// DefaultLogConfig is the default client logging configuration. -// Default log level is "Warn". Use "zap.InfoLevel" for debugging. -// Use "/dev/null" for output paths, to discard all logs. -var DefaultLogConfig = zap.Config{ - Level: zap.NewAtomicLevelAt(zap.WarnLevel), - Development: false, - Sampling: &zap.SamplingConfig{ - Initial: 100, - Thereafter: 100, - }, - Encoding: "json", - EncoderConfig: zap.NewProductionEncoderConfig(), - - // Use "/dev/null" to discard all - OutputPaths: []string{"stderr"}, - ErrorOutputPaths: []string{"stderr"}, -} diff --git a/vendor/github.com/coreos/etcd/clientv3/doc.go b/vendor/github.com/coreos/etcd/clientv3/doc.go deleted file mode 100644 index 649471774..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/doc.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package clientv3 implements the official Go etcd client for v3. -// -// Create client using `clientv3.New`: -// -// // expect dial time-out on ipv4 blackhole -// _, err := clientv3.New(clientv3.Config{ -// Endpoints: []string{"http://254.0.0.1:12345"}, -// DialTimeout: 2 * time.Second, -// }) -// -// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3 -// if err == context.DeadlineExceeded { -// // handle errors -// } -// -// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1 -// if err == grpc.ErrClientConnTimeout { -// // handle errors -// } -// -// cli, err := clientv3.New(clientv3.Config{ -// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, -// DialTimeout: 5 * time.Second, -// }) -// if err != nil { -// // handle error! -// } -// defer cli.Close() -// -// Make sure to close the client after using it. If the client is not closed, the -// connection will have leaky goroutines. -// -// To specify a client request timeout, wrap the context with context.WithTimeout: -// -// ctx, cancel := context.WithTimeout(context.Background(), timeout) -// resp, err := kvc.Put(ctx, "sample_key", "sample_value") -// cancel() -// if err != nil { -// // handle error! -// } -// // use the response -// -// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed. -// Clients are safe for concurrent use by multiple goroutines. -// -// etcd client returns 3 types of errors: -// -// 1. context error: canceled or deadline exceeded. -// 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded. -// 3. gRPC error: see https://go.etcd.io/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go -// -// Here is the example code to handle client errors: -// -// resp, err := kvc.Put(ctx, "", "") -// if err != nil { -// if err == context.Canceled { -// // ctx is canceled by another routine -// } else if err == context.DeadlineExceeded { -// // ctx is attached with a deadline and it exceeded -// } else if err == rpctypes.ErrEmptyKey { -// // client-side error: key is not provided -// } else if ev, ok := status.FromError(err); ok { -// code := ev.Code() -// if code == codes.DeadlineExceeded { -// // server-side context might have timed-out first (due to clock skew) -// // while original client-side context is not timed-out yet -// } -// } else { -// // bad cluster endpoints, which are not etcd servers -// } -// } -// -// go func() { cli.Close() }() -// _, err := kvc.Get(ctx, "a") -// if err != nil { -// // with etcd clientv3 <= v3.3 -// if err == context.Canceled { -// // grpc balancer calls 'Get' with an inflight client.Close -// } else if err == grpc.ErrClientConnClosing { -// // grpc balancer calls 'Get' after client.Close. -// } -// // with etcd clientv3 >= v3.4 -// if clientv3.IsConnCanceled(err) { -// // gRPC client connection is closed -// } -// } -// -// The grpc load balancer is registered statically and is shared across etcd clients. -// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment -// variable. E.g. "ETCD_CLIENT_DEBUG=1". -// -package clientv3 diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go deleted file mode 100644 index 2b7864ad8..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/kv.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - - pb "go.etcd.io/etcd/etcdserver/etcdserverpb" - - "google.golang.org/grpc" -) - -type ( - CompactResponse pb.CompactionResponse - PutResponse pb.PutResponse - GetResponse pb.RangeResponse - DeleteResponse pb.DeleteRangeResponse - TxnResponse pb.TxnResponse -) - -type KV interface { - // Put puts a key-value pair into etcd. - // Note that key,value can be plain bytes array and string is - // an immutable representation of that bytes array. - // To get a string of bytes, do string([]byte{0x10, 0x20}). - Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) - - // Get retrieves keys. - // By default, Get will return the value for "key", if any. - // When passed WithRange(end), Get will return the keys in the range [key, end). - // When passed WithFromKey(), Get returns keys greater than or equal to key. - // When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision; - // if the required revision is compacted, the request will fail with ErrCompacted . - // When passed WithLimit(limit), the number of returned keys is bounded by limit. - // When passed WithSort(), the keys will be sorted. - Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) - - // Delete deletes a key, or optionally using WithRange(end), [key, end). - Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) - - // Compact compacts etcd KV history before the given rev. - Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) - - // Do applies a single Op on KV without a transaction. - // Do is useful when creating arbitrary operations to be issued at a - // later time; the user can range over the operations, calling Do to - // execute them. Get/Put/Delete, on the other hand, are best suited - // for when the operation should be issued at the time of declaration. - Do(ctx context.Context, op Op) (OpResponse, error) - - // Txn creates a transaction. - Txn(ctx context.Context) Txn -} - -type OpResponse struct { - put *PutResponse - get *GetResponse - del *DeleteResponse - txn *TxnResponse -} - -func (op OpResponse) Put() *PutResponse { return op.put } -func (op OpResponse) Get() *GetResponse { return op.get } -func (op OpResponse) Del() *DeleteResponse { return op.del } -func (op OpResponse) Txn() *TxnResponse { return op.txn } - -func (resp *PutResponse) OpResponse() OpResponse { - return OpResponse{put: resp} -} -func (resp *GetResponse) OpResponse() OpResponse { - return OpResponse{get: resp} -} -func (resp *DeleteResponse) OpResponse() OpResponse { - return OpResponse{del: resp} -} -func (resp *TxnResponse) OpResponse() OpResponse { - return OpResponse{txn: resp} -} - -type kv struct { - remote pb.KVClient - callOpts []grpc.CallOption -} - -func NewKV(c *Client) KV { - api := &kv{remote: RetryKVClient(c)} - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func NewKVFromKVClient(remote pb.KVClient, c *Client) KV { - api := &kv{remote: remote} - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { - r, err := kv.Do(ctx, OpPut(key, val, opts...)) - return r.put, toErr(ctx, err) -} - -func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) { - r, err := kv.Do(ctx, OpGet(key, opts...)) - return r.get, toErr(ctx, err) -} - -func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) { - r, err := kv.Do(ctx, OpDelete(key, opts...)) - return r.del, toErr(ctx, err) -} - -func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { - resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*CompactResponse)(resp), err -} - -func (kv *kv) Txn(ctx context.Context) Txn { - return &txn{ - kv: kv, - ctx: ctx, - callOpts: kv.callOpts, - } -} - -func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { - var err error - switch op.t { - case tRange: - var resp *pb.RangeResponse - resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) - if err == nil { - return OpResponse{get: (*GetResponse)(resp)}, nil - } - case tPut: - var resp *pb.PutResponse - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} - resp, err = kv.remote.Put(ctx, r, kv.callOpts...) - if err == nil { - return OpResponse{put: (*PutResponse)(resp)}, nil - } - case tDeleteRange: - var resp *pb.DeleteRangeResponse - r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} - resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...) - if err == nil { - return OpResponse{del: (*DeleteResponse)(resp)}, nil - } - case tTxn: - var resp *pb.TxnResponse - resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...) - if err == nil { - return OpResponse{txn: (*TxnResponse)(resp)}, nil - } - default: - panic("Unknown op") - } - return OpResponse{}, toErr(ctx, err) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go deleted file mode 100644 index 68a8ae6b3..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/lease.go +++ /dev/null @@ -1,597 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "sync" - "time" - - "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" - pb "go.etcd.io/etcd/etcdserver/etcdserverpb" - - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -type ( - LeaseRevokeResponse pb.LeaseRevokeResponse - LeaseID int64 -) - -// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse. -type LeaseGrantResponse struct { - *pb.ResponseHeader - ID LeaseID - TTL int64 - Error string -} - -// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse. -type LeaseKeepAliveResponse struct { - *pb.ResponseHeader - ID LeaseID - TTL int64 -} - -// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse. -type LeaseTimeToLiveResponse struct { - *pb.ResponseHeader - ID LeaseID `json:"id"` - - // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1. - TTL int64 `json:"ttl"` - - // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. - GrantedTTL int64 `json:"granted-ttl"` - - // Keys is the list of keys attached to this lease. - Keys [][]byte `json:"keys"` -} - -// LeaseStatus represents a lease status. -type LeaseStatus struct { - ID LeaseID `json:"id"` - // TODO: TTL int64 -} - -// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse. -type LeaseLeasesResponse struct { - *pb.ResponseHeader - Leases []LeaseStatus `json:"leases"` -} - -const ( - // defaultTTL is the assumed lease TTL used for the first keepalive - // deadline before the actual TTL is known to the client. - defaultTTL = 5 * time.Second - // NoLease is a lease ID for the absence of a lease. - NoLease LeaseID = 0 - - // retryConnWait is how long to wait before retrying request due to an error - retryConnWait = 500 * time.Millisecond -) - -// LeaseResponseChSize is the size of buffer to store unsent lease responses. -// WARNING: DO NOT UPDATE. -// Only for testing purposes. -var LeaseResponseChSize = 16 - -// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error. -// -// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected. -type ErrKeepAliveHalted struct { - Reason error -} - -func (e ErrKeepAliveHalted) Error() string { - s := "etcdclient: leases keep alive halted" - if e.Reason != nil { - s += ": " + e.Reason.Error() - } - return s -} - -type Lease interface { - // Grant creates a new lease. - Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) - - // Revoke revokes the given lease. - Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) - - // TimeToLive retrieves the lease information of the given lease ID. - TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) - - // Leases retrieves all leases. - Leases(ctx context.Context) (*LeaseLeasesResponse, error) - - // KeepAlive keeps the given lease alive forever. If the keepalive response - // posted to the channel is not consumed immediately, the lease client will - // continue sending keep alive requests to the etcd server at least every - // second until latest response is consumed. - // - // The returned "LeaseKeepAliveResponse" channel closes if underlying keep - // alive stream is interrupted in some way the client cannot handle itself; - // given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse" - // from this closed channel is nil. - // - // If client keep alive loop halts with an unexpected error (e.g. "etcdserver: - // no leader") or canceled by the caller (e.g. context.Canceled), the error - // is returned. Otherwise, it retries. - // - // TODO(v4.0): post errors to last keep alive message before closing - // (see https://go.etcd.io/etcd/pull/7866) - KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) - - // KeepAliveOnce renews the lease once. The response corresponds to the - // first message from calling KeepAlive. If the response has a recoverable - // error, KeepAliveOnce will retry the RPC with a new keep alive message. - // - // In most of the cases, Keepalive should be used instead of KeepAliveOnce. - KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) - - // Close releases all resources Lease keeps for efficient communication - // with the etcd server. - Close() error -} - -type lessor struct { - mu sync.Mutex // guards all fields - - // donec is closed and loopErr is set when recvKeepAliveLoop stops - donec chan struct{} - loopErr error - - remote pb.LeaseClient - - stream pb.Lease_LeaseKeepAliveClient - streamCancel context.CancelFunc - - stopCtx context.Context - stopCancel context.CancelFunc - - keepAlives map[LeaseID]*keepAlive - - // firstKeepAliveTimeout is the timeout for the first keepalive request - // before the actual TTL is known to the lease client - firstKeepAliveTimeout time.Duration - - // firstKeepAliveOnce ensures stream starts after first KeepAlive call. - firstKeepAliveOnce sync.Once - - callOpts []grpc.CallOption - - lg *zap.Logger -} - -// keepAlive multiplexes a keepalive for a lease over multiple channels -type keepAlive struct { - chs []chan<- *LeaseKeepAliveResponse - ctxs []context.Context - // deadline is the time the keep alive channels close if no response - deadline time.Time - // nextKeepAlive is when to send the next keep alive message - nextKeepAlive time.Time - // donec is closed on lease revoke, expiration, or cancel. - donec chan struct{} -} - -func NewLease(c *Client) Lease { - return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second) -} - -func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease { - l := &lessor{ - donec: make(chan struct{}), - keepAlives: make(map[LeaseID]*keepAlive), - remote: remote, - firstKeepAliveTimeout: keepAliveTimeout, - lg: c.lg, - } - if l.firstKeepAliveTimeout == time.Second { - l.firstKeepAliveTimeout = defaultTTL - } - if c != nil { - l.callOpts = c.callOpts - } - reqLeaderCtx := WithRequireLeader(context.Background()) - l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx) - return l -} - -func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { - r := &pb.LeaseGrantRequest{TTL: ttl} - resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...) - if err == nil { - gresp := &LeaseGrantResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - Error: resp.Error, - } - return gresp, nil - } - return nil, toErr(ctx, err) -} - -func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { - r := &pb.LeaseRevokeRequest{ID: int64(id)} - resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...) - if err == nil { - return (*LeaseRevokeResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { - r := toLeaseTimeToLiveRequest(id, opts...) - resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...) - if err == nil { - gresp := &LeaseTimeToLiveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - GrantedTTL: resp.GrantedTTL, - Keys: resp.Keys, - } - return gresp, nil - } - return nil, toErr(ctx, err) -} - -func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) { - resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...) - if err == nil { - leases := make([]LeaseStatus, len(resp.Leases)) - for i := range resp.Leases { - leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)} - } - return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil - } - return nil, toErr(ctx, err) -} - -func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { - ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize) - - l.mu.Lock() - // ensure that recvKeepAliveLoop is still running - select { - case <-l.donec: - err := l.loopErr - l.mu.Unlock() - close(ch) - return ch, ErrKeepAliveHalted{Reason: err} - default: - } - ka, ok := l.keepAlives[id] - if !ok { - // create fresh keep alive - ka = &keepAlive{ - chs: []chan<- *LeaseKeepAliveResponse{ch}, - ctxs: []context.Context{ctx}, - deadline: time.Now().Add(l.firstKeepAliveTimeout), - nextKeepAlive: time.Now(), - donec: make(chan struct{}), - } - l.keepAlives[id] = ka - } else { - // add channel and context to existing keep alive - ka.ctxs = append(ka.ctxs, ctx) - ka.chs = append(ka.chs, ch) - } - l.mu.Unlock() - - go l.keepAliveCtxCloser(ctx, id, ka.donec) - l.firstKeepAliveOnce.Do(func() { - go l.recvKeepAliveLoop() - go l.deadlineLoop() - }) - - return ch, nil -} - -func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { - for { - resp, err := l.keepAliveOnce(ctx, id) - if err == nil { - if resp.TTL <= 0 { - err = rpctypes.ErrLeaseNotFound - } - return resp, err - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } - } -} - -func (l *lessor) Close() error { - l.stopCancel() - // close for synchronous teardown if stream goroutines never launched - l.firstKeepAliveOnce.Do(func() { close(l.donec) }) - <-l.donec - return nil -} - -func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) { - select { - case <-donec: - return - case <-l.donec: - return - case <-ctx.Done(): - } - - l.mu.Lock() - defer l.mu.Unlock() - - ka, ok := l.keepAlives[id] - if !ok { - return - } - - // close channel and remove context if still associated with keep alive - for i, c := range ka.ctxs { - if c == ctx { - close(ka.chs[i]) - ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...) - ka.chs = append(ka.chs[:i], ka.chs[i+1:]...) - break - } - } - // remove if no one more listeners - if len(ka.chs) == 0 { - delete(l.keepAlives, id) - } -} - -// closeRequireLeader scans keepAlives for ctxs that have require leader -// and closes the associated channels. -func (l *lessor) closeRequireLeader() { - l.mu.Lock() - defer l.mu.Unlock() - for _, ka := range l.keepAlives { - reqIdxs := 0 - // find all required leader channels, close, mark as nil - for i, ctx := range ka.ctxs { - md, ok := metadata.FromOutgoingContext(ctx) - if !ok { - continue - } - ks := md[rpctypes.MetadataRequireLeaderKey] - if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader { - continue - } - close(ka.chs[i]) - ka.chs[i] = nil - reqIdxs++ - } - if reqIdxs == 0 { - continue - } - // remove all channels that required a leader from keepalive - newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs) - newCtxs := make([]context.Context, len(newChs)) - newIdx := 0 - for i := range ka.chs { - if ka.chs[i] == nil { - continue - } - newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx] - newIdx++ - } - ka.chs, ka.ctxs = newChs, newCtxs - } -} - -func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { - cctx, cancel := context.WithCancel(ctx) - defer cancel() - - stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - - err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) - if err != nil { - return nil, toErr(ctx, err) - } - - resp, rerr := stream.Recv() - if rerr != nil { - return nil, toErr(ctx, rerr) - } - - karesp := &LeaseKeepAliveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - } - return karesp, nil -} - -func (l *lessor) recvKeepAliveLoop() (gerr error) { - defer func() { - l.mu.Lock() - close(l.donec) - l.loopErr = gerr - for _, ka := range l.keepAlives { - ka.close() - } - l.keepAlives = make(map[LeaseID]*keepAlive) - l.mu.Unlock() - }() - - for { - stream, err := l.resetRecv() - if err != nil { - if canceledByCaller(l.stopCtx, err) { - return err - } - } else { - for { - resp, err := stream.Recv() - if err != nil { - if canceledByCaller(l.stopCtx, err) { - return err - } - - if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { - l.closeRequireLeader() - } - break - } - - l.recvKeepAlive(resp) - } - } - - select { - case <-time.After(retryConnWait): - case <-l.stopCtx.Done(): - return l.stopCtx.Err() - } - } -} - -// resetRecv opens a new lease stream and starts sending keep alive requests. -func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { - sctx, cancel := context.WithCancel(l.stopCtx) - stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...) - if err != nil { - cancel() - return nil, err - } - - l.mu.Lock() - defer l.mu.Unlock() - if l.stream != nil && l.streamCancel != nil { - l.streamCancel() - } - - l.streamCancel = cancel - l.stream = stream - - go l.sendKeepAliveLoop(stream) - return stream, nil -} - -// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse -func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { - karesp := &LeaseKeepAliveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - } - - l.mu.Lock() - defer l.mu.Unlock() - - ka, ok := l.keepAlives[karesp.ID] - if !ok { - return - } - - if karesp.TTL <= 0 { - // lease expired; close all keep alive channels - delete(l.keepAlives, karesp.ID) - ka.close() - return - } - - // send update to all channels - nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0) - ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second) - for _, ch := range ka.chs { - select { - case ch <- karesp: - default: - if l.lg != nil { - l.lg.Warn("lease keepalive response queue is full; dropping response send", - zap.Int("queue-size", len(ch)), - zap.Int("queue-capacity", cap(ch)), - ) - } - } - // still advance in order to rate-limit keep-alive sends - ka.nextKeepAlive = nextKeepAlive - } -} - -// deadlineLoop reaps any keep alive channels that have not received a response -// within the lease TTL -func (l *lessor) deadlineLoop() { - for { - select { - case <-time.After(time.Second): - case <-l.donec: - return - } - now := time.Now() - l.mu.Lock() - for id, ka := range l.keepAlives { - if ka.deadline.Before(now) { - // waited too long for response; lease may be expired - ka.close() - delete(l.keepAlives, id) - } - } - l.mu.Unlock() - } -} - -// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream. -func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { - for { - var tosend []LeaseID - - now := time.Now() - l.mu.Lock() - for id, ka := range l.keepAlives { - if ka.nextKeepAlive.Before(now) { - tosend = append(tosend, id) - } - } - l.mu.Unlock() - - for _, id := range tosend { - r := &pb.LeaseKeepAliveRequest{ID: int64(id)} - if err := stream.Send(r); err != nil { - // TODO do something with this error? - return - } - } - - select { - case <-time.After(retryConnWait): - case <-stream.Context().Done(): - return - case <-l.donec: - return - case <-l.stopCtx.Done(): - return - } - } -} - -func (ka *keepAlive) close() { - close(ka.donec) - for _, ch := range ka.chs { - close(ch) - } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go deleted file mode 100644 index f5ae0109d..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/logger.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "io/ioutil" - "sync" - - "go.etcd.io/etcd/pkg/logutil" - - "google.golang.org/grpc/grpclog" -) - -var ( - lgMu sync.RWMutex - lg logutil.Logger -) - -type settableLogger struct { - l grpclog.LoggerV2 - mu sync.RWMutex -} - -func init() { - // disable client side logs by default - lg = &settableLogger{} - SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)) -} - -// SetLogger sets client-side Logger. -func SetLogger(l grpclog.LoggerV2) { - lgMu.Lock() - lg = logutil.NewLogger(l) - // override grpclog so that any changes happen with locking - grpclog.SetLoggerV2(lg) - lgMu.Unlock() -} - -// GetLogger returns the current logutil.Logger. -func GetLogger() logutil.Logger { - lgMu.RLock() - l := lg - lgMu.RUnlock() - return l -} - -// NewLogger returns a new Logger with logutil.Logger. -func NewLogger(gl grpclog.LoggerV2) logutil.Logger { - return &settableLogger{l: gl} -} - -func (s *settableLogger) get() grpclog.LoggerV2 { - s.mu.RLock() - l := s.l - s.mu.RUnlock() - return l -} - -// implement the grpclog.LoggerV2 interface - -func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) } -func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) } -func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) } -func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) } -func (s *settableLogger) Warningf(format string, args ...interface{}) { - s.get().Warningf(format, args...) -} -func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) } -func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) } -func (s *settableLogger) Errorf(format string, args ...interface{}) { - s.get().Errorf(format, args...) -} -func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) } -func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } -func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } -func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) } -func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) } -func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) } -func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) } -func (s *settableLogger) V(l int) bool { return s.get().V(l) } -func (s *settableLogger) Lvl(lvl int) grpclog.LoggerV2 { - s.mu.RLock() - l := s.l - s.mu.RUnlock() - if l.V(lvl) { - return s - } - return logutil.NewDiscardLogger() -} diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/github.com/coreos/etcd/clientv3/maintenance.go deleted file mode 100644 index 744455a3b..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/maintenance.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "fmt" - "io" - - pb "go.etcd.io/etcd/etcdserver/etcdserverpb" - - "google.golang.org/grpc" -) - -type ( - DefragmentResponse pb.DefragmentResponse - AlarmResponse pb.AlarmResponse - AlarmMember pb.AlarmMember - StatusResponse pb.StatusResponse - HashKVResponse pb.HashKVResponse - MoveLeaderResponse pb.MoveLeaderResponse -) - -type Maintenance interface { - // AlarmList gets all active alarms. - AlarmList(ctx context.Context) (*AlarmResponse, error) - - // AlarmDisarm disarms a given alarm. - AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) - - // Defragment releases wasted space from internal fragmentation on a given etcd member. - // Defragment is only needed when deleting a large number of keys and want to reclaim - // the resources. - // Defragment is an expensive operation. User should avoid defragmenting multiple members - // at the same time. - // To defragment multiple members in the cluster, user need to call defragment multiple - // times with different endpoints. - Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) - - // Status gets the status of the endpoint. - Status(ctx context.Context, endpoint string) (*StatusResponse, error) - - // HashKV returns a hash of the KV state at the time of the RPC. - // If revision is zero, the hash is computed on all keys. If the revision - // is non-zero, the hash is computed on all keys at or below the given revision. - HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) - - // Snapshot provides a reader for a point-in-time snapshot of etcd. - // If the context "ctx" is canceled or timed out, reading from returned - // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded). - Snapshot(ctx context.Context) (io.ReadCloser, error) - - // MoveLeader requests current leader to transfer its leadership to the transferee. - // Request must be made to the leader. - MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) -} - -type maintenance struct { - dial func(endpoint string) (pb.MaintenanceClient, func(), error) - remote pb.MaintenanceClient - callOpts []grpc.CallOption -} - -func NewMaintenance(c *Client) Maintenance { - api := &maintenance{ - dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { - conn, err := c.Dial(endpoint) - if err != nil { - return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err) - } - cancel := func() { conn.Close() } - return RetryMaintenanceClient(c, conn), cancel, nil - }, - remote: RetryMaintenanceClient(c, c.conn), - } - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance { - api := &maintenance{ - dial: func(string) (pb.MaintenanceClient, func(), error) { - return remote, func() {}, nil - }, - remote: remote, - } - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { - req := &pb.AlarmRequest{ - Action: pb.AlarmRequest_GET, - MemberID: 0, // all - Alarm: pb.AlarmType_NONE, // all - } - resp, err := m.remote.Alarm(ctx, req, m.callOpts...) - if err == nil { - return (*AlarmResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { - req := &pb.AlarmRequest{ - Action: pb.AlarmRequest_DEACTIVATE, - MemberID: am.MemberID, - Alarm: am.Alarm, - } - - if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE { - ar, err := m.AlarmList(ctx) - if err != nil { - return nil, toErr(ctx, err) - } - ret := AlarmResponse{} - for _, am := range ar.Alarms { - dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am)) - if derr != nil { - return nil, toErr(ctx, derr) - } - ret.Alarms = append(ret.Alarms, dresp.Alarms...) - } - return &ret, nil - } - - resp, err := m.remote.Alarm(ctx, req, m.callOpts...) - if err == nil { - return (*AlarmResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { - remote, cancel, err := m.dial(endpoint) - if err != nil { - return nil, toErr(ctx, err) - } - defer cancel() - resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*DefragmentResponse)(resp), nil -} - -func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { - remote, cancel, err := m.dial(endpoint) - if err != nil { - return nil, toErr(ctx, err) - } - defer cancel() - resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*StatusResponse)(resp), nil -} - -func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) { - remote, cancel, err := m.dial(endpoint) - if err != nil { - - return nil, toErr(ctx, err) - } - defer cancel() - resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*HashKVResponse)(resp), nil -} - -func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { - ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...) - if err != nil { - return nil, toErr(ctx, err) - } - - pr, pw := io.Pipe() - go func() { - for { - resp, err := ss.Recv() - if err != nil { - pw.CloseWithError(err) - return - } - if resp == nil && err == nil { - break - } - if _, werr := pw.Write(resp.Blob); werr != nil { - pw.CloseWithError(werr) - return - } - } - pw.Close() - }() - return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil -} - -type snapshotReadCloser struct { - ctx context.Context - io.ReadCloser -} - -func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) { - n, err = rc.ReadCloser.Read(p) - return n, toErr(rc.ctx, err) -} - -func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) { - resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...) - return (*MoveLeaderResponse)(resp), toErr(ctx, err) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/github.com/coreos/etcd/clientv3/op.go deleted file mode 100644 index a3e7d3e77..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/op.go +++ /dev/null @@ -1,539 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import pb "go.etcd.io/etcd/etcdserver/etcdserverpb" - -type opType int - -const ( - // A default Op has opType 0, which is invalid. - tRange opType = iota + 1 - tPut - tDeleteRange - tTxn -) - -var noPrefixEnd = []byte{0} - -// Op represents an Operation that kv can execute. -type Op struct { - t opType - key []byte - end []byte - - // for range - limit int64 - sort *SortOption - serializable bool - keysOnly bool - countOnly bool - minModRev int64 - maxModRev int64 - minCreateRev int64 - maxCreateRev int64 - - // for range, watch - rev int64 - - // for watch, put, delete - prevKV bool - - // for watch - // fragmentation should be disabled by default - // if true, split watch events when total exceeds - // "--max-request-bytes" flag value + 512-byte - fragment bool - - // for put - ignoreValue bool - ignoreLease bool - - // progressNotify is for progress updates. - progressNotify bool - // createdNotify is for created event - createdNotify bool - // filters for watchers - filterPut bool - filterDelete bool - - // for put - val []byte - leaseID LeaseID - - // txn - cmps []Cmp - thenOps []Op - elseOps []Op -} - -// accessors / mutators - -// IsTxn returns true if the "Op" type is transaction. -func (op Op) IsTxn() bool { - return op.t == tTxn -} - -// Txn returns the comparison(if) operations, "then" operations, and "else" operations. -func (op Op) Txn() ([]Cmp, []Op, []Op) { - return op.cmps, op.thenOps, op.elseOps -} - -// KeyBytes returns the byte slice holding the Op's key. -func (op Op) KeyBytes() []byte { return op.key } - -// WithKeyBytes sets the byte slice for the Op's key. -func (op *Op) WithKeyBytes(key []byte) { op.key = key } - -// RangeBytes returns the byte slice holding with the Op's range end, if any. -func (op Op) RangeBytes() []byte { return op.end } - -// Rev returns the requested revision, if any. -func (op Op) Rev() int64 { return op.rev } - -// IsPut returns true iff the operation is a Put. -func (op Op) IsPut() bool { return op.t == tPut } - -// IsGet returns true iff the operation is a Get. -func (op Op) IsGet() bool { return op.t == tRange } - -// IsDelete returns true iff the operation is a Delete. -func (op Op) IsDelete() bool { return op.t == tDeleteRange } - -// IsSerializable returns true if the serializable field is true. -func (op Op) IsSerializable() bool { return op.serializable == true } - -// IsKeysOnly returns whether keysOnly is set. -func (op Op) IsKeysOnly() bool { return op.keysOnly == true } - -// IsCountOnly returns whether countOnly is set. -func (op Op) IsCountOnly() bool { return op.countOnly == true } - -// MinModRev returns the operation's minimum modify revision. -func (op Op) MinModRev() int64 { return op.minModRev } - -// MaxModRev returns the operation's maximum modify revision. -func (op Op) MaxModRev() int64 { return op.maxModRev } - -// MinCreateRev returns the operation's minimum create revision. -func (op Op) MinCreateRev() int64 { return op.minCreateRev } - -// MaxCreateRev returns the operation's maximum create revision. -func (op Op) MaxCreateRev() int64 { return op.maxCreateRev } - -// WithRangeBytes sets the byte slice for the Op's range end. -func (op *Op) WithRangeBytes(end []byte) { op.end = end } - -// ValueBytes returns the byte slice holding the Op's value, if any. -func (op Op) ValueBytes() []byte { return op.val } - -// WithValueBytes sets the byte slice for the Op's value. -func (op *Op) WithValueBytes(v []byte) { op.val = v } - -func (op Op) toRangeRequest() *pb.RangeRequest { - if op.t != tRange { - panic("op.t != tRange") - } - r := &pb.RangeRequest{ - Key: op.key, - RangeEnd: op.end, - Limit: op.limit, - Revision: op.rev, - Serializable: op.serializable, - KeysOnly: op.keysOnly, - CountOnly: op.countOnly, - MinModRevision: op.minModRev, - MaxModRevision: op.maxModRev, - MinCreateRevision: op.minCreateRev, - MaxCreateRevision: op.maxCreateRev, - } - if op.sort != nil { - r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order) - r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target) - } - return r -} - -func (op Op) toTxnRequest() *pb.TxnRequest { - thenOps := make([]*pb.RequestOp, len(op.thenOps)) - for i, tOp := range op.thenOps { - thenOps[i] = tOp.toRequestOp() - } - elseOps := make([]*pb.RequestOp, len(op.elseOps)) - for i, eOp := range op.elseOps { - elseOps[i] = eOp.toRequestOp() - } - cmps := make([]*pb.Compare, len(op.cmps)) - for i := range op.cmps { - cmps[i] = (*pb.Compare)(&op.cmps[i]) - } - return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps} -} - -func (op Op) toRequestOp() *pb.RequestOp { - switch op.t { - case tRange: - return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}} - case tPut: - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} - return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}} - case tDeleteRange: - r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} - return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}} - case tTxn: - return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}} - default: - panic("Unknown Op") - } -} - -func (op Op) isWrite() bool { - if op.t == tTxn { - for _, tOp := range op.thenOps { - if tOp.isWrite() { - return true - } - } - for _, tOp := range op.elseOps { - if tOp.isWrite() { - return true - } - } - return false - } - return op.t != tRange -} - -// OpGet returns "get" operation based on given key and operation options. -func OpGet(key string, opts ...OpOption) Op { - ret := Op{t: tRange, key: []byte(key)} - ret.applyOpts(opts) - return ret -} - -// OpDelete returns "delete" operation based on given key and operation options. -func OpDelete(key string, opts ...OpOption) Op { - ret := Op{t: tDeleteRange, key: []byte(key)} - ret.applyOpts(opts) - switch { - case ret.leaseID != 0: - panic("unexpected lease in delete") - case ret.limit != 0: - panic("unexpected limit in delete") - case ret.rev != 0: - panic("unexpected revision in delete") - case ret.sort != nil: - panic("unexpected sort in delete") - case ret.serializable: - panic("unexpected serializable in delete") - case ret.countOnly: - panic("unexpected countOnly in delete") - case ret.minModRev != 0, ret.maxModRev != 0: - panic("unexpected mod revision filter in delete") - case ret.minCreateRev != 0, ret.maxCreateRev != 0: - panic("unexpected create revision filter in delete") - case ret.filterDelete, ret.filterPut: - panic("unexpected filter in delete") - case ret.createdNotify: - panic("unexpected createdNotify in delete") - } - return ret -} - -// OpPut returns "put" operation based on given key-value and operation options. -func OpPut(key, val string, opts ...OpOption) Op { - ret := Op{t: tPut, key: []byte(key), val: []byte(val)} - ret.applyOpts(opts) - switch { - case ret.end != nil: - panic("unexpected range in put") - case ret.limit != 0: - panic("unexpected limit in put") - case ret.rev != 0: - panic("unexpected revision in put") - case ret.sort != nil: - panic("unexpected sort in put") - case ret.serializable: - panic("unexpected serializable in put") - case ret.countOnly: - panic("unexpected countOnly in put") - case ret.minModRev != 0, ret.maxModRev != 0: - panic("unexpected mod revision filter in put") - case ret.minCreateRev != 0, ret.maxCreateRev != 0: - panic("unexpected create revision filter in put") - case ret.filterDelete, ret.filterPut: - panic("unexpected filter in put") - case ret.createdNotify: - panic("unexpected createdNotify in put") - } - return ret -} - -// OpTxn returns "txn" operation based on given transaction conditions. -func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { - return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} -} - -func opWatch(key string, opts ...OpOption) Op { - ret := Op{t: tRange, key: []byte(key)} - ret.applyOpts(opts) - switch { - case ret.leaseID != 0: - panic("unexpected lease in watch") - case ret.limit != 0: - panic("unexpected limit in watch") - case ret.sort != nil: - panic("unexpected sort in watch") - case ret.serializable: - panic("unexpected serializable in watch") - case ret.countOnly: - panic("unexpected countOnly in watch") - case ret.minModRev != 0, ret.maxModRev != 0: - panic("unexpected mod revision filter in watch") - case ret.minCreateRev != 0, ret.maxCreateRev != 0: - panic("unexpected create revision filter in watch") - } - return ret -} - -func (op *Op) applyOpts(opts []OpOption) { - for _, opt := range opts { - opt(op) - } -} - -// OpOption configures Operations like Get, Put, Delete. -type OpOption func(*Op) - -// WithLease attaches a lease ID to a key in 'Put' request. -func WithLease(leaseID LeaseID) OpOption { - return func(op *Op) { op.leaseID = leaseID } -} - -// WithLimit limits the number of results to return from 'Get' request. -// If WithLimit is given a 0 limit, it is treated as no limit. -func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } } - -// WithRev specifies the store revision for 'Get' request. -// Or the start revision of 'Watch' request. -func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } } - -// WithSort specifies the ordering in 'Get' request. It requires -// 'WithRange' and/or 'WithPrefix' to be specified too. -// 'target' specifies the target to sort by: key, version, revisions, value. -// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'. -func WithSort(target SortTarget, order SortOrder) OpOption { - return func(op *Op) { - if target == SortByKey && order == SortAscend { - // If order != SortNone, server fetches the entire key-space, - // and then applies the sort and limit, if provided. - // Since by default the server returns results sorted by keys - // in lexicographically ascending order, the client should ignore - // SortOrder if the target is SortByKey. - order = SortNone - } - op.sort = &SortOption{target, order} - } -} - -// GetPrefixRangeEnd gets the range end of the prefix. -// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'. -func GetPrefixRangeEnd(prefix string) string { - return string(getPrefix([]byte(prefix))) -} - -func getPrefix(key []byte) []byte { - end := make([]byte, len(key)) - copy(end, key) - for i := len(end) - 1; i >= 0; i-- { - if end[i] < 0xff { - end[i] = end[i] + 1 - end = end[:i+1] - return end - } - } - // next prefix does not exist (e.g., 0xffff); - // default to WithFromKey policy - return noPrefixEnd -} - -// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate -// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())' -// can return 'foo1', 'foo2', and so on. -func WithPrefix() OpOption { - return func(op *Op) { - if len(op.key) == 0 { - op.key, op.end = []byte{0}, []byte{0} - return - } - op.end = getPrefix(op.key) - } -} - -// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests. -// For example, 'Get' requests with 'WithRange(end)' returns -// the keys in the range [key, end). -// endKey must be lexicographically greater than start key. -func WithRange(endKey string) OpOption { - return func(op *Op) { op.end = []byte(endKey) } -} - -// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests -// to be equal or greater than the key in the argument. -func WithFromKey() OpOption { return WithRange("\x00") } - -// WithSerializable makes 'Get' request serializable. By default, -// it's linearizable. Serializable requests are better for lower latency -// requirement. -func WithSerializable() OpOption { - return func(op *Op) { op.serializable = true } -} - -// WithKeysOnly makes the 'Get' request return only the keys and the corresponding -// values will be omitted. -func WithKeysOnly() OpOption { - return func(op *Op) { op.keysOnly = true } -} - -// WithCountOnly makes the 'Get' request return only the count of keys. -func WithCountOnly() OpOption { - return func(op *Op) { op.countOnly = true } -} - -// WithMinModRev filters out keys for Get with modification revisions less than the given revision. -func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } } - -// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision. -func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } } - -// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision. -func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } } - -// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision. -func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } } - -// WithFirstCreate gets the key with the oldest creation revision in the request range. -func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) } - -// WithLastCreate gets the key with the latest creation revision in the request range. -func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) } - -// WithFirstKey gets the lexically first key in the request range. -func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) } - -// WithLastKey gets the lexically last key in the request range. -func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) } - -// WithFirstRev gets the key with the oldest modification revision in the request range. -func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) } - -// WithLastRev gets the key with the latest modification revision in the request range. -func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) } - -// withTop gets the first key over the get's prefix given a sort order -func withTop(target SortTarget, order SortOrder) []OpOption { - return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)} -} - -// WithProgressNotify makes watch server send periodic progress updates -// every 10 minutes when there is no incoming events. -// Progress updates have zero events in WatchResponse. -func WithProgressNotify() OpOption { - return func(op *Op) { - op.progressNotify = true - } -} - -// WithCreatedNotify makes watch server sends the created event. -func WithCreatedNotify() OpOption { - return func(op *Op) { - op.createdNotify = true - } -} - -// WithFilterPut discards PUT events from the watcher. -func WithFilterPut() OpOption { - return func(op *Op) { op.filterPut = true } -} - -// WithFilterDelete discards DELETE events from the watcher. -func WithFilterDelete() OpOption { - return func(op *Op) { op.filterDelete = true } -} - -// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted, -// nothing will be returned. -func WithPrevKV() OpOption { - return func(op *Op) { - op.prevKV = true - } -} - -// WithFragment to receive raw watch response with fragmentation. -// Fragmentation is disabled by default. If fragmentation is enabled, -// etcd watch server will split watch response before sending to clients -// when the total size of watch events exceed server-side request limit. -// The default server-side request limit is 1.5 MiB, which can be configured -// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes. -// See "etcdserver/api/v3rpc/watch.go" for more details. -func WithFragment() OpOption { - return func(op *Op) { op.fragment = true } -} - -// WithIgnoreValue updates the key using its current value. -// This option can not be combined with non-empty values. -// Returns an error if the key does not exist. -func WithIgnoreValue() OpOption { - return func(op *Op) { - op.ignoreValue = true - } -} - -// WithIgnoreLease updates the key using its current lease. -// This option can not be combined with WithLease. -// Returns an error if the key does not exist. -func WithIgnoreLease() OpOption { - return func(op *Op) { - op.ignoreLease = true - } -} - -// LeaseOp represents an Operation that lease can execute. -type LeaseOp struct { - id LeaseID - - // for TimeToLive - attachedKeys bool -} - -// LeaseOption configures lease operations. -type LeaseOption func(*LeaseOp) - -func (op *LeaseOp) applyOpts(opts []LeaseOption) { - for _, opt := range opts { - opt(op) - } -} - -// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID. -func WithAttachedKeys() LeaseOption { - return func(op *LeaseOp) { op.attachedKeys = true } -} - -func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest { - ret := &LeaseOp{id: id} - ret.applyOpts(opts) - return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys} -} diff --git a/vendor/github.com/coreos/etcd/clientv3/options.go b/vendor/github.com/coreos/etcd/clientv3/options.go deleted file mode 100644 index 4660acea0..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/options.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "math" - "time" - - "google.golang.org/grpc" -) - -var ( - // client-side handling retrying of request failures where data was not written to the wire or - // where server indicates it did not process the data. gRPC default is default is "FailFast(true)" - // but for etcd we default to "FailFast(false)" to minimize client request error responses due to - // transient failures. - defaultFailFast = grpc.FailFast(false) - - // client-side request send limit, gRPC default is math.MaxInt32 - // Make sure that "client-side send limit < server-side default send/recv limit" - // Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes - defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024) - - // client-side response receive limit, gRPC default is 4MB - // Make sure that "client-side receive limit >= server-side default send/recv limit" - // because range response can easily exceed request send limits - // Default to math.MaxInt32; writes exceeding server-side send limit fails anyway - defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32) - - // client-side non-streaming retry limit, only applied to requests where server responds with - // a error code clearly indicating it was unable to process the request such as codes.Unavailable. - // If set to 0, retry is disabled. - defaultUnaryMaxRetries uint = 100 - - // client-side streaming retry limit, only applied to requests where server responds with - // a error code clearly indicating it was unable to process the request such as codes.Unavailable. - // If set to 0, retry is disabled. - defaultStreamMaxRetries = uint(^uint(0)) // max uint - - // client-side retry backoff wait between requests. - defaultBackoffWaitBetween = 25 * time.Millisecond - - // client-side retry backoff default jitter fraction. - defaultBackoffJitterFraction = 0.10 -) - -// defaultCallOpts defines a list of default "gRPC.CallOption". -// Some options are exposed to "clientv3.Config". -// Defaults will be overridden by the settings in "clientv3.Config". -var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize} - -// MaxLeaseTTL is the maximum lease TTL value -const MaxLeaseTTL = 9000000000 diff --git a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go deleted file mode 100644 index c6ef585b5..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import "context" - -// TODO: remove this when "FailFast=false" is fixed. -// See https://github.com/grpc/grpc-go/issues/1532. -func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error { - select { - case <-ready: - return nil - case <-rpcCtx.Done(): - return rpcCtx.Err() - case <-clientCtx.Done(): - return clientCtx.Err() - } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go deleted file mode 100644 index 38ad00ac9..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/retry.go +++ /dev/null @@ -1,298 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - - "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" - pb "go.etcd.io/etcd/etcdserver/etcdserverpb" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type retryPolicy uint8 - -const ( - repeatable retryPolicy = iota - nonRepeatable -) - -func (rp retryPolicy) String() string { - switch rp { - case repeatable: - return "repeatable" - case nonRepeatable: - return "nonRepeatable" - default: - return "UNKNOWN" - } -} - -type rpcFunc func(ctx context.Context) error -type retryRPCFunc func(context.Context, rpcFunc, retryPolicy) error -type retryStopErrFunc func(error) bool - -// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry. -// -// immutable requests (e.g. Get) should be retried unless it's -// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge). -// -// Returning "false" means retry should stop, since client cannot -// handle itself even with retries. -func isSafeRetryImmutableRPC(err error) bool { - eErr := rpctypes.Error(err) - if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { - // interrupted by non-transient server-side or gRPC-side error - // client cannot handle itself (e.g. rpctypes.ErrCompacted) - return false - } - // only retry if unavailable - ev, ok := status.FromError(err) - if !ok { - // all errors from RPC is typed "grpc/status.(*statusError)" - // (ref. https://github.com/grpc/grpc-go/pull/1782) - // - // if the error type is not "grpc/status.(*statusError)", - // it could be from "Dial" - // TODO: do not retry for now - // ref. https://github.com/grpc/grpc-go/issues/1581 - return false - } - return ev.Code() == codes.Unavailable -} - -// isSafeRetryMutableRPC returns "true" when a mutable request is safe for retry. -// -// mutable requests (e.g. Put, Delete, Txn) should only be retried -// when the status code is codes.Unavailable when initial connection -// has not been established (no endpoint is up). -// -// Returning "false" means retry should stop, otherwise it violates -// write-at-most-once semantics. -func isSafeRetryMutableRPC(err error) bool { - if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable { - // not safe for mutable RPCs - // e.g. interrupted by non-transient error that client cannot handle itself, - // or transient error while the connection has already been established - return false - } - desc := rpctypes.ErrorDesc(err) - return desc == "there is no address available" || desc == "there is no connection available" -} - -type retryKVClient struct { - kc pb.KVClient -} - -// RetryKVClient implements a KVClient. -func RetryKVClient(c *Client) pb.KVClient { - return &retryKVClient{ - kc: pb.NewKVClient(c.conn), - } -} -func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { - return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...) -} - -func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { - return rkv.kc.Put(ctx, in, opts...) -} - -func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { - return rkv.kc.DeleteRange(ctx, in, opts...) -} - -func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { - return rkv.kc.Txn(ctx, in, opts...) -} - -func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { - return rkv.kc.Compact(ctx, in, opts...) -} - -type retryLeaseClient struct { - lc pb.LeaseClient -} - -// RetryLeaseClient implements a LeaseClient. -func RetryLeaseClient(c *Client) pb.LeaseClient { - return &retryLeaseClient{ - lc: pb.NewLeaseClient(c.conn), - } -} - -func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) { - return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...) -} - -func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) { - return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...) -} - -func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { - return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...) -} - -func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { - return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...) -} - -func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) { - return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...) -} - -type retryClusterClient struct { - cc pb.ClusterClient -} - -// RetryClusterClient implements a ClusterClient. -func RetryClusterClient(c *Client) pb.ClusterClient { - return &retryClusterClient{ - cc: pb.NewClusterClient(c.conn), - } -} - -func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) { - return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...) -} - -func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { - return rcc.cc.MemberAdd(ctx, in, opts...) -} - -func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { - return rcc.cc.MemberRemove(ctx, in, opts...) -} - -func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { - return rcc.cc.MemberUpdate(ctx, in, opts...) -} - -type retryMaintenanceClient struct { - mc pb.MaintenanceClient -} - -// RetryMaintenanceClient implements a Maintenance. -func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient { - return &retryMaintenanceClient{ - mc: pb.NewMaintenanceClient(conn), - } -} - -func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) { - return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...) -} - -func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) { - return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...) -} - -func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) { - return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...) -} - -func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) { - return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...) -} - -func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) { - return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...) -} - -func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) { - return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...) -} - -func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) { - return rmc.mc.Defragment(ctx, in, opts...) -} - -type retryAuthClient struct { - ac pb.AuthClient -} - -// RetryAuthClient implements a AuthClient. -func RetryAuthClient(c *Client) pb.AuthClient { - return &retryAuthClient{ - ac: pb.NewAuthClient(c.conn), - } -} - -func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) { - return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...) -} - -func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) { - return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...) -} - -func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) { - return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...) -} - -func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) { - return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...) -} - -func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { - return rac.ac.AuthEnable(ctx, in, opts...) -} - -func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { - return rac.ac.AuthDisable(ctx, in, opts...) -} - -func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { - return rac.ac.UserAdd(ctx, in, opts...) -} - -func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { - return rac.ac.UserDelete(ctx, in, opts...) -} - -func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { - return rac.ac.UserChangePassword(ctx, in, opts...) -} - -func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { - return rac.ac.UserGrantRole(ctx, in, opts...) -} - -func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { - return rac.ac.UserRevokeRole(ctx, in, opts...) -} - -func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { - return rac.ac.RoleAdd(ctx, in, opts...) -} - -func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { - return rac.ac.RoleDelete(ctx, in, opts...) -} - -func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { - return rac.ac.RoleGrantPermission(ctx, in, opts...) -} - -func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { - return rac.ac.RoleRevokePermission(ctx, in, opts...) -} - -func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) { - return rac.ac.Authenticate(ctx, in, opts...) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/retry_interceptor.go b/vendor/github.com/coreos/etcd/clientv3/retry_interceptor.go deleted file mode 100644 index f8ebe44e4..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/retry_interceptor.go +++ /dev/null @@ -1,382 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more -// fine grained error checking required by write-at-most-once retry semantics of etcd. - -package clientv3 - -import ( - "context" - "io" - "sync" - "time" - - "github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils" - "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" -) - -// unaryClientInterceptor returns a new retrying unary client interceptor. -// -// The default configuration of the interceptor is to not retry *at all*. This behaviour can be -// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). -func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor { - intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - grpcOpts, retryOpts := filterCallOptions(opts) - callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) - // short circuit for simplicity, and avoiding allocations. - if callOpts.max == 0 { - return invoker(ctx, method, req, reply, cc, grpcOpts...) - } - var lastErr error - for attempt := uint(0); attempt < callOpts.max; attempt++ { - if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil { - return err - } - lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...) - logger.Info("retry unary intercept", zap.Uint("attempt", attempt), zap.Error(lastErr)) - if lastErr == nil { - return nil - } - if isContextError(lastErr) { - if ctx.Err() != nil { - // its the context deadline or cancellation. - return lastErr - } - // its the callCtx deadline or cancellation, in which case try again. - continue - } - if callOpts.retryAuth && rpctypes.Error(lastErr) == rpctypes.ErrInvalidAuthToken { - gterr := c.getToken(ctx) - if gterr != nil { - logger.Info("retry failed to fetch new auth token", zap.Error(gterr)) - return lastErr // return the original error for simplicity - } - continue - } - if !isSafeRetry(c.lg, lastErr, callOpts) { - return lastErr - } - } - return lastErr - } -} - -// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls. -// -// The default configuration of the interceptor is to not retry *at all*. This behaviour can be -// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). -// -// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs -// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams, -// BidiStreams), the retry interceptor will fail the call. -func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor { - intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) - return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - grpcOpts, retryOpts := filterCallOptions(opts) - callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) - // short circuit for simplicity, and avoiding allocations. - if callOpts.max == 0 { - return streamer(ctx, desc, cc, method, grpcOpts...) - } - if desc.ClientStreams { - return nil, grpc.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()") - } - newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...) - logger.Info("retry stream intercept", zap.Error(err)) - if err != nil { - // TODO(mwitkow): Maybe dial and transport errors should be retriable? - return nil, err - } - retryingStreamer := &serverStreamingRetryingStream{ - client: c, - ClientStream: newStreamer, - callOpts: callOpts, - ctx: ctx, - streamerCall: func(ctx context.Context) (grpc.ClientStream, error) { - return streamer(ctx, desc, cc, method, grpcOpts...) - }, - } - return retryingStreamer, nil - } -} - -// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a -// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish -// a new ClientStream according to the retry policy. -type serverStreamingRetryingStream struct { - grpc.ClientStream - client *Client - bufferedSends []interface{} // single message that the client can sen - receivedGood bool // indicates whether any prior receives were successful - wasClosedSend bool // indicates that CloseSend was closed - ctx context.Context - callOpts *options - streamerCall func(ctx context.Context) (grpc.ClientStream, error) - mu sync.RWMutex -} - -func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) { - s.mu.Lock() - s.ClientStream = clientStream - s.mu.Unlock() -} - -func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream { - s.mu.RLock() - defer s.mu.RUnlock() - return s.ClientStream -} - -func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error { - s.mu.Lock() - s.bufferedSends = append(s.bufferedSends, m) - s.mu.Unlock() - return s.getStream().SendMsg(m) -} - -func (s *serverStreamingRetryingStream) CloseSend() error { - s.mu.Lock() - s.wasClosedSend = true - s.mu.Unlock() - return s.getStream().CloseSend() -} - -func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) { - return s.getStream().Header() -} - -func (s *serverStreamingRetryingStream) Trailer() metadata.MD { - return s.getStream().Trailer() -} - -func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { - attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m) - if !attemptRetry { - return lastErr // success or hard failure - } - // We start off from attempt 1, because zeroth was already made on normal SendMsg(). - for attempt := uint(1); attempt < s.callOpts.max; attempt++ { - if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil { - return err - } - newStream, err := s.reestablishStreamAndResendBuffer(s.ctx) - if err != nil { - // TODO(mwitkow): Maybe dial and transport errors should be retriable? - return err - } - s.setStream(newStream) - attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m) - //fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr) - if !attemptRetry { - return lastErr - } - } - return lastErr -} - -func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) { - s.mu.RLock() - wasGood := s.receivedGood - s.mu.RUnlock() - err := s.getStream().RecvMsg(m) - if err == nil || err == io.EOF { - s.mu.Lock() - s.receivedGood = true - s.mu.Unlock() - return false, err - } else if wasGood { - // previous RecvMsg in the stream succeeded, no retry logic should interfere - return false, err - } - if isContextError(err) { - if s.ctx.Err() != nil { - return false, err - } - // its the callCtx deadline or cancellation, in which case try again. - return true, err - } - if s.callOpts.retryAuth && rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken { - gterr := s.client.getToken(s.ctx) - if gterr != nil { - s.client.lg.Info("retry failed to fetch new auth token", zap.Error(gterr)) - return false, err // return the original error for simplicity - } - return true, err - - } - return isSafeRetry(s.client.lg, err, s.callOpts), err -} - -func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) { - s.mu.RLock() - bufferedSends := s.bufferedSends - s.mu.RUnlock() - newStream, err := s.streamerCall(callCtx) - if err != nil { - return nil, err - } - for _, msg := range bufferedSends { - if err := newStream.SendMsg(msg); err != nil { - return nil, err - } - } - if err := newStream.CloseSend(); err != nil { - return nil, err - } - return newStream, nil -} - -func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) error { - waitTime := time.Duration(0) - if attempt > 0 { - waitTime = callOpts.backoffFunc(attempt) - } - if waitTime > 0 { - timer := time.NewTimer(waitTime) - select { - case <-ctx.Done(): - timer.Stop() - return contextErrToGrpcErr(ctx.Err()) - case <-timer.C: - } - } - return nil -} - -// isSafeRetry returns "true", if request is safe for retry with the given error. -func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool { - if isContextError(err) { - return false - } - switch callOpts.retryPolicy { - case repeatable: - return isSafeRetryImmutableRPC(err) - case nonRepeatable: - return isSafeRetryMutableRPC(err) - default: - lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String())) - return false - } -} - -func isContextError(err error) bool { - return grpc.Code(err) == codes.DeadlineExceeded || grpc.Code(err) == codes.Canceled -} - -func contextErrToGrpcErr(err error) error { - switch err { - case context.DeadlineExceeded: - return grpc.Errorf(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return grpc.Errorf(codes.Canceled, err.Error()) - default: - return grpc.Errorf(codes.Unknown, err.Error()) - } -} - -var ( - defaultOptions = &options{ - retryPolicy: nonRepeatable, - max: 0, // disable - backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10), - retryAuth: true, - } -) - -// backoffFunc denotes a family of functions that control the backoff duration between call retries. -// -// They are called with an identifier of the attempt, and should return a time the system client should -// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request -// the deadline of the request takes precedence and the wait will be interrupted before proceeding -// with the next iteration. -type backoffFunc func(attempt uint) time.Duration - -// withRetryPolicy sets the retry policy of this call. -func withRetryPolicy(rp retryPolicy) retryOption { - return retryOption{applyFunc: func(o *options) { - o.retryPolicy = rp - }} -} - -// withAuthRetry sets enables authentication retries. -func withAuthRetry(retryAuth bool) retryOption { - return retryOption{applyFunc: func(o *options) { - o.retryAuth = retryAuth - }} -} - -// withMax sets the maximum number of retries on this call, or this interceptor. -func withMax(maxRetries uint) retryOption { - return retryOption{applyFunc: func(o *options) { - o.max = maxRetries - }} -} - -// WithBackoff sets the `BackoffFunc `used to control time between retries. -func withBackoff(bf backoffFunc) retryOption { - return retryOption{applyFunc: func(o *options) { - o.backoffFunc = bf - }} -} - -type options struct { - retryPolicy retryPolicy - max uint - backoffFunc backoffFunc - retryAuth bool -} - -// retryOption is a grpc.CallOption that is local to clientv3's retry interceptor. -type retryOption struct { - grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic. - applyFunc func(opt *options) -} - -func reuseOrNewWithCallOptions(opt *options, retryOptions []retryOption) *options { - if len(retryOptions) == 0 { - return opt - } - optCopy := &options{} - *optCopy = *opt - for _, f := range retryOptions { - f.applyFunc(optCopy) - } - return optCopy -} - -func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []retryOption) { - for _, opt := range callOptions { - if co, ok := opt.(retryOption); ok { - retryOptions = append(retryOptions, co) - } else { - grpcOptions = append(grpcOptions, opt) - } - } - return grpcOptions, retryOptions -} - -// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment). -// -// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms. -func backoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) backoffFunc { - return func(attempt uint) time.Duration { - return backoffutils.JitterUp(waitBetween, jitterFraction) - } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/sort.go b/vendor/github.com/coreos/etcd/clientv3/sort.go deleted file mode 100644 index 2bb9d9a13..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/sort.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -type SortTarget int -type SortOrder int - -const ( - SortNone SortOrder = iota - SortAscend - SortDescend -) - -const ( - SortByKey SortTarget = iota - SortByVersion - SortByCreateRevision - SortByModRevision - SortByValue -) - -type SortOption struct { - Target SortTarget - Order SortOrder -} diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/github.com/coreos/etcd/clientv3/txn.go deleted file mode 100644 index c19715da4..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/txn.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "sync" - - pb "go.etcd.io/etcd/etcdserver/etcdserverpb" - - "google.golang.org/grpc" -) - -// Txn is the interface that wraps mini-transactions. -// -// Txn(context.TODO()).If( -// Compare(Value(k1), ">", v1), -// Compare(Version(k1), "=", 2) -// ).Then( -// OpPut(k2,v2), OpPut(k3,v3) -// ).Else( -// OpPut(k4,v4), OpPut(k5,v5) -// ).Commit() -// -type Txn interface { - // If takes a list of comparison. If all comparisons passed in succeed, - // the operations passed into Then() will be executed. Or the operations - // passed into Else() will be executed. - If(cs ...Cmp) Txn - - // Then takes a list of operations. The Ops list will be executed, if the - // comparisons passed in If() succeed. - Then(ops ...Op) Txn - - // Else takes a list of operations. The Ops list will be executed, if the - // comparisons passed in If() fail. - Else(ops ...Op) Txn - - // Commit tries to commit the transaction. - Commit() (*TxnResponse, error) -} - -type txn struct { - kv *kv - ctx context.Context - - mu sync.Mutex - cif bool - cthen bool - celse bool - - isWrite bool - - cmps []*pb.Compare - - sus []*pb.RequestOp - fas []*pb.RequestOp - - callOpts []grpc.CallOption -} - -func (txn *txn) If(cs ...Cmp) Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - - if txn.cif { - panic("cannot call If twice!") - } - - if txn.cthen { - panic("cannot call If after Then!") - } - - if txn.celse { - panic("cannot call If after Else!") - } - - txn.cif = true - - for i := range cs { - txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i])) - } - - return txn -} - -func (txn *txn) Then(ops ...Op) Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - - if txn.cthen { - panic("cannot call Then twice!") - } - if txn.celse { - panic("cannot call Then after Else!") - } - - txn.cthen = true - - for _, op := range ops { - txn.isWrite = txn.isWrite || op.isWrite() - txn.sus = append(txn.sus, op.toRequestOp()) - } - - return txn -} - -func (txn *txn) Else(ops ...Op) Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - - if txn.celse { - panic("cannot call Else twice!") - } - - txn.celse = true - - for _, op := range ops { - txn.isWrite = txn.isWrite || op.isWrite() - txn.fas = append(txn.fas, op.toRequestOp()) - } - - return txn -} - -func (txn *txn) Commit() (*TxnResponse, error) { - txn.mu.Lock() - defer txn.mu.Unlock() - - r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} - - var resp *pb.TxnResponse - var err error - resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...) - if err != nil { - return nil, toErr(txn.ctx, err) - } - return (*TxnResponse)(resp), nil -} diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go deleted file mode 100644 index 9c677ac7b..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/watch.go +++ /dev/null @@ -1,982 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - v3rpc "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" - pb "go.etcd.io/etcd/etcdserver/etcdserverpb" - mvccpb "go.etcd.io/etcd/mvcc/mvccpb" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -const ( - EventTypeDelete = mvccpb.DELETE - EventTypePut = mvccpb.PUT - - closeSendErrTimeout = 250 * time.Millisecond -) - -type Event mvccpb.Event - -type WatchChan <-chan WatchResponse - -type Watcher interface { - // Watch watches on a key or prefix. The watched events will be returned - // through the returned channel. If revisions waiting to be sent over the - // watch are compacted, then the watch will be canceled by the server, the - // client will post a compacted error watch response, and the channel will close. - // If the context "ctx" is canceled or timed out, returned "WatchChan" is closed, - // and "WatchResponse" from this closed channel has zero events and nil "Err()". - // The context "ctx" MUST be canceled, as soon as watcher is no longer being used, - // to release the associated resources. - // - // If the context is "context.Background/TODO", returned "WatchChan" will - // not be closed and block until event is triggered, except when server - // returns a non-recoverable error (e.g. ErrCompacted). - // For example, when context passed with "WithRequireLeader" and the - // connected server has no leader (e.g. due to network partition), - // error "etcdserver: no leader" (ErrNoLeader) will be returned, - // and then "WatchChan" is closed with non-nil "Err()". - // In order to prevent a watch stream being stuck in a partitioned node, - // make sure to wrap context with "WithRequireLeader". - // - // Otherwise, as long as the context has not been canceled or timed out, - // watch will retry on other recoverable errors forever until reconnected. - // - // TODO: explicitly set context error in the last "WatchResponse" message and close channel? - // Currently, client contexts are overwritten with "valCtx" that never closes. - // TODO(v3.4): configure watch retry policy, limit maximum retry number - // (see https://go.etcd.io/etcd/issues/8980) - Watch(ctx context.Context, key string, opts ...OpOption) WatchChan - - // RequestProgress requests a progress notify response be sent in all watch channels. - RequestProgress(ctx context.Context) error - - // Close closes the watcher and cancels all watch requests. - Close() error -} - -type WatchResponse struct { - Header pb.ResponseHeader - Events []*Event - - // CompactRevision is the minimum revision the watcher may receive. - CompactRevision int64 - - // Canceled is used to indicate watch failure. - // If the watch failed and the stream was about to close, before the channel is closed, - // the channel sends a final response that has Canceled set to true with a non-nil Err(). - Canceled bool - - // Created is used to indicate the creation of the watcher. - Created bool - - closeErr error - - // cancelReason is a reason of canceling watch - cancelReason string -} - -// IsCreate returns true if the event tells that the key is newly created. -func (e *Event) IsCreate() bool { - return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision -} - -// IsModify returns true if the event tells that a new value is put on existing key. -func (e *Event) IsModify() bool { - return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision -} - -// Err is the error value if this WatchResponse holds an error. -func (wr *WatchResponse) Err() error { - switch { - case wr.closeErr != nil: - return v3rpc.Error(wr.closeErr) - case wr.CompactRevision != 0: - return v3rpc.ErrCompacted - case wr.Canceled: - if len(wr.cancelReason) != 0 { - return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason)) - } - return v3rpc.ErrFutureRev - } - return nil -} - -// IsProgressNotify returns true if the WatchResponse is progress notification. -func (wr *WatchResponse) IsProgressNotify() bool { - return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0 -} - -// watcher implements the Watcher interface -type watcher struct { - remote pb.WatchClient - callOpts []grpc.CallOption - - // mu protects the grpc streams map - mu sync.RWMutex - - // streams holds all the active grpc streams keyed by ctx value. - streams map[string]*watchGrpcStream -} - -// watchGrpcStream tracks all watch resources attached to a single grpc stream. -type watchGrpcStream struct { - owner *watcher - remote pb.WatchClient - callOpts []grpc.CallOption - - // ctx controls internal remote.Watch requests - ctx context.Context - // ctxKey is the key used when looking up this stream's context - ctxKey string - cancel context.CancelFunc - - // substreams holds all active watchers on this grpc stream - substreams map[int64]*watcherStream - // resuming holds all resuming watchers on this grpc stream - resuming []*watcherStream - - // reqc sends a watch request from Watch() to the main goroutine - reqc chan watchStreamRequest - // respc receives data from the watch client - respc chan *pb.WatchResponse - // donec closes to broadcast shutdown - donec chan struct{} - // errc transmits errors from grpc Recv to the watch stream reconnect logic - errc chan error - // closingc gets the watcherStream of closing watchers - closingc chan *watcherStream - // wg is Done when all substream goroutines have exited - wg sync.WaitGroup - - // resumec closes to signal that all substreams should begin resuming - resumec chan struct{} - // closeErr is the error that closed the watch stream - closeErr error -} - -// watchStreamRequest is a union of the supported watch request operation types -type watchStreamRequest interface { - toPB() *pb.WatchRequest -} - -// watchRequest is issued by the subscriber to start a new watcher -type watchRequest struct { - ctx context.Context - key string - end string - rev int64 - - // send created notification event if this field is true - createdNotify bool - // progressNotify is for progress updates - progressNotify bool - // fragmentation should be disabled by default - // if true, split watch events when total exceeds - // "--max-request-bytes" flag value + 512-byte - fragment bool - - // filters is the list of events to filter out - filters []pb.WatchCreateRequest_FilterType - // get the previous key-value pair before the event happens - prevKV bool - // retc receives a chan WatchResponse once the watcher is established - retc chan chan WatchResponse -} - -// progressRequest is issued by the subscriber to request watch progress -type progressRequest struct { -} - -// watcherStream represents a registered watcher -type watcherStream struct { - // initReq is the request that initiated this request - initReq watchRequest - - // outc publishes watch responses to subscriber - outc chan WatchResponse - // recvc buffers watch responses before publishing - recvc chan *WatchResponse - // donec closes when the watcherStream goroutine stops. - donec chan struct{} - // closing is set to true when stream should be scheduled to shutdown. - closing bool - // id is the registered watch id on the grpc stream - id int64 - - // buf holds all events received from etcd but not yet consumed by the client - buf []*WatchResponse -} - -func NewWatcher(c *Client) Watcher { - return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c) -} - -func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher { - w := &watcher{ - remote: wc, - streams: make(map[string]*watchGrpcStream), - } - if c != nil { - w.callOpts = c.callOpts - } - return w -} - -// never closes -var valCtxCh = make(chan struct{}) -var zeroTime = time.Unix(0, 0) - -// ctx with only the values; never Done -type valCtx struct{ context.Context } - -func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false } -func (vc *valCtx) Done() <-chan struct{} { return valCtxCh } -func (vc *valCtx) Err() error { return nil } - -func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { - ctx, cancel := context.WithCancel(&valCtx{inctx}) - wgs := &watchGrpcStream{ - owner: w, - remote: w.remote, - callOpts: w.callOpts, - ctx: ctx, - ctxKey: streamKeyFromCtx(inctx), - cancel: cancel, - substreams: make(map[int64]*watcherStream), - respc: make(chan *pb.WatchResponse), - reqc: make(chan watchStreamRequest), - donec: make(chan struct{}), - errc: make(chan error, 1), - closingc: make(chan *watcherStream), - resumec: make(chan struct{}), - } - go wgs.run() - return wgs -} - -// Watch posts a watch request to run() and waits for a new watcher channel -func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan { - ow := opWatch(key, opts...) - - var filters []pb.WatchCreateRequest_FilterType - if ow.filterPut { - filters = append(filters, pb.WatchCreateRequest_NOPUT) - } - if ow.filterDelete { - filters = append(filters, pb.WatchCreateRequest_NODELETE) - } - - wr := &watchRequest{ - ctx: ctx, - createdNotify: ow.createdNotify, - key: string(ow.key), - end: string(ow.end), - rev: ow.rev, - progressNotify: ow.progressNotify, - fragment: ow.fragment, - filters: filters, - prevKV: ow.prevKV, - retc: make(chan chan WatchResponse, 1), - } - - ok := false - ctxKey := streamKeyFromCtx(ctx) - - // find or allocate appropriate grpc watch stream - w.mu.Lock() - if w.streams == nil { - // closed - w.mu.Unlock() - ch := make(chan WatchResponse) - close(ch) - return ch - } - wgs := w.streams[ctxKey] - if wgs == nil { - wgs = w.newWatcherGrpcStream(ctx) - w.streams[ctxKey] = wgs - } - donec := wgs.donec - reqc := wgs.reqc - w.mu.Unlock() - - // couldn't create channel; return closed channel - closeCh := make(chan WatchResponse, 1) - - // submit request - select { - case reqc <- wr: - ok = true - case <-wr.ctx.Done(): - case <-donec: - if wgs.closeErr != nil { - closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr} - break - } - // retry; may have dropped stream from no ctxs - return w.Watch(ctx, key, opts...) - } - - // receive channel - if ok { - select { - case ret := <-wr.retc: - return ret - case <-ctx.Done(): - case <-donec: - if wgs.closeErr != nil { - closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr} - break - } - // retry; may have dropped stream from no ctxs - return w.Watch(ctx, key, opts...) - } - } - - close(closeCh) - return closeCh -} - -func (w *watcher) Close() (err error) { - w.mu.Lock() - streams := w.streams - w.streams = nil - w.mu.Unlock() - for _, wgs := range streams { - if werr := wgs.close(); werr != nil { - err = werr - } - } - return err -} - -// RequestProgress requests a progress notify response be sent in all watch channels. -func (w *watcher) RequestProgress(ctx context.Context) (err error) { - ctxKey := streamKeyFromCtx(ctx) - - w.mu.Lock() - if w.streams == nil { - return fmt.Errorf("no stream found for context") - } - wgs := w.streams[ctxKey] - if wgs == nil { - wgs = w.newWatcherGrpcStream(ctx) - w.streams[ctxKey] = wgs - } - donec := wgs.donec - reqc := wgs.reqc - w.mu.Unlock() - - pr := &progressRequest{} - - select { - case reqc <- pr: - return nil - case <-ctx.Done(): - if err == nil { - return ctx.Err() - } - return err - case <-donec: - if wgs.closeErr != nil { - return wgs.closeErr - } - // retry; may have dropped stream from no ctxs - return w.RequestProgress(ctx) - } -} - -func (w *watchGrpcStream) close() (err error) { - w.cancel() - <-w.donec - select { - case err = <-w.errc: - default: - } - return toErr(w.ctx, err) -} - -func (w *watcher) closeStream(wgs *watchGrpcStream) { - w.mu.Lock() - close(wgs.donec) - wgs.cancel() - if w.streams != nil { - delete(w.streams, wgs.ctxKey) - } - w.mu.Unlock() -} - -func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { - // check watch ID for backward compatibility (<= v3.3) - if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") { - w.closeErr = v3rpc.Error(errors.New(resp.CancelReason)) - // failed; no channel - close(ws.recvc) - return - } - ws.id = resp.WatchId - w.substreams[ws.id] = ws -} - -func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { - select { - case ws.outc <- *resp: - case <-ws.initReq.ctx.Done(): - case <-time.After(closeSendErrTimeout): - } - close(ws.outc) -} - -func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { - // send channel response in case stream was never established - select { - case ws.initReq.retc <- ws.outc: - default: - } - // close subscriber's channel - if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil { - go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr}) - } else if ws.outc != nil { - close(ws.outc) - } - if ws.id != -1 { - delete(w.substreams, ws.id) - return - } - for i := range w.resuming { - if w.resuming[i] == ws { - w.resuming[i] = nil - return - } - } -} - -// run is the root of the goroutines for managing a watcher client -func (w *watchGrpcStream) run() { - var wc pb.Watch_WatchClient - var closeErr error - - // substreams marked to close but goroutine still running; needed for - // avoiding double-closing recvc on grpc stream teardown - closing := make(map[*watcherStream]struct{}) - - defer func() { - w.closeErr = closeErr - // shutdown substreams and resuming substreams - for _, ws := range w.substreams { - if _, ok := closing[ws]; !ok { - close(ws.recvc) - closing[ws] = struct{}{} - } - } - for _, ws := range w.resuming { - if _, ok := closing[ws]; ws != nil && !ok { - close(ws.recvc) - closing[ws] = struct{}{} - } - } - w.joinSubstreams() - for range closing { - w.closeSubstream(<-w.closingc) - } - w.wg.Wait() - w.owner.closeStream(w) - }() - - // start a stream with the etcd grpc server - if wc, closeErr = w.newWatchClient(); closeErr != nil { - return - } - - cancelSet := make(map[int64]struct{}) - - var cur *pb.WatchResponse - for { - select { - // Watch() requested - case req := <-w.reqc: - switch wreq := req.(type) { - case *watchRequest: - outc := make(chan WatchResponse, 1) - // TODO: pass custom watch ID? - ws := &watcherStream{ - initReq: *wreq, - id: -1, - outc: outc, - // unbuffered so resumes won't cause repeat events - recvc: make(chan *WatchResponse), - } - - ws.donec = make(chan struct{}) - w.wg.Add(1) - go w.serveSubstream(ws, w.resumec) - - // queue up for watcher creation/resume - w.resuming = append(w.resuming, ws) - if len(w.resuming) == 1 { - // head of resume queue, can register a new watcher - wc.Send(ws.initReq.toPB()) - } - case *progressRequest: - wc.Send(wreq.toPB()) - } - - // new events from the watch client - case pbresp := <-w.respc: - if cur == nil || pbresp.Created || pbresp.Canceled { - cur = pbresp - } else if cur != nil && cur.WatchId == pbresp.WatchId { - // merge new events - cur.Events = append(cur.Events, pbresp.Events...) - // update "Fragment" field; last response with "Fragment" == false - cur.Fragment = pbresp.Fragment - } - - switch { - case pbresp.Created: - // response to head of queue creation - if ws := w.resuming[0]; ws != nil { - w.addSubstream(pbresp, ws) - w.dispatchEvent(pbresp) - w.resuming[0] = nil - } - - if ws := w.nextResume(); ws != nil { - wc.Send(ws.initReq.toPB()) - } - - // reset for next iteration - cur = nil - - case pbresp.Canceled && pbresp.CompactRevision == 0: - delete(cancelSet, pbresp.WatchId) - if ws, ok := w.substreams[pbresp.WatchId]; ok { - // signal to stream goroutine to update closingc - close(ws.recvc) - closing[ws] = struct{}{} - } - - // reset for next iteration - cur = nil - - case cur.Fragment: - // watch response events are still fragmented - // continue to fetch next fragmented event arrival - continue - - default: - // dispatch to appropriate watch stream - ok := w.dispatchEvent(cur) - - // reset for next iteration - cur = nil - - if ok { - break - } - - // watch response on unexpected watch id; cancel id - if _, ok := cancelSet[pbresp.WatchId]; ok { - break - } - - cancelSet[pbresp.WatchId] = struct{}{} - cr := &pb.WatchRequest_CancelRequest{ - CancelRequest: &pb.WatchCancelRequest{ - WatchId: pbresp.WatchId, - }, - } - req := &pb.WatchRequest{RequestUnion: cr} - wc.Send(req) - } - - // watch client failed on Recv; spawn another if possible - case err := <-w.errc: - if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { - closeErr = err - return - } - if wc, closeErr = w.newWatchClient(); closeErr != nil { - return - } - if ws := w.nextResume(); ws != nil { - wc.Send(ws.initReq.toPB()) - } - cancelSet = make(map[int64]struct{}) - - case <-w.ctx.Done(): - return - - case ws := <-w.closingc: - w.closeSubstream(ws) - delete(closing, ws) - // no more watchers on this stream, shutdown - if len(w.substreams)+len(w.resuming) == 0 { - return - } - } - } -} - -// nextResume chooses the next resuming to register with the grpc stream. Abandoned -// streams are marked as nil in the queue since the head must wait for its inflight registration. -func (w *watchGrpcStream) nextResume() *watcherStream { - for len(w.resuming) != 0 { - if w.resuming[0] != nil { - return w.resuming[0] - } - w.resuming = w.resuming[1:len(w.resuming)] - } - return nil -} - -// dispatchEvent sends a WatchResponse to the appropriate watcher stream -func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { - events := make([]*Event, len(pbresp.Events)) - for i, ev := range pbresp.Events { - events[i] = (*Event)(ev) - } - // TODO: return watch ID? - wr := &WatchResponse{ - Header: *pbresp.Header, - Events: events, - CompactRevision: pbresp.CompactRevision, - Created: pbresp.Created, - Canceled: pbresp.Canceled, - cancelReason: pbresp.CancelReason, - } - - // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to - // indicate they should be broadcast. - if wr.IsProgressNotify() && pbresp.WatchId == -1 { - return w.broadcastResponse(wr) - } - - return w.unicastResponse(wr, pbresp.WatchId) - -} - -// broadcastResponse send a watch response to all watch substreams. -func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool { - for _, ws := range w.substreams { - select { - case ws.recvc <- wr: - case <-ws.donec: - } - } - return true -} - -// unicastResponse sends a watch response to a specific watch substream. -func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool { - ws, ok := w.substreams[watchId] - if !ok { - return false - } - select { - case ws.recvc <- wr: - case <-ws.donec: - return false - } - return true -} - -// serveWatchClient forwards messages from the grpc stream to run() -func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { - for { - resp, err := wc.Recv() - if err != nil { - select { - case w.errc <- err: - case <-w.donec: - } - return - } - select { - case w.respc <- resp: - case <-w.donec: - return - } - } -} - -// serveSubstream forwards watch responses from run() to the subscriber -func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { - if ws.closing { - panic("created substream goroutine but substream is closing") - } - - // nextRev is the minimum expected next revision - nextRev := ws.initReq.rev - resuming := false - defer func() { - if !resuming { - ws.closing = true - } - close(ws.donec) - if !resuming { - w.closingc <- ws - } - w.wg.Done() - }() - - emptyWr := &WatchResponse{} - for { - curWr := emptyWr - outc := ws.outc - - if len(ws.buf) > 0 { - curWr = ws.buf[0] - } else { - outc = nil - } - select { - case outc <- *curWr: - if ws.buf[0].Err() != nil { - return - } - ws.buf[0] = nil - ws.buf = ws.buf[1:] - case wr, ok := <-ws.recvc: - if !ok { - // shutdown from closeSubstream - return - } - - if wr.Created { - if ws.initReq.retc != nil { - ws.initReq.retc <- ws.outc - // to prevent next write from taking the slot in buffered channel - // and posting duplicate create events - ws.initReq.retc = nil - - // send first creation event only if requested - if ws.initReq.createdNotify { - ws.outc <- *wr - } - // once the watch channel is returned, a current revision - // watch must resume at the store revision. This is necessary - // for the following case to work as expected: - // wch := m1.Watch("a") - // m2.Put("a", "b") - // <-wch - // If the revision is only bound on the first observed event, - // if wch is disconnected before the Put is issued, then reconnects - // after it is committed, it'll miss the Put. - if ws.initReq.rev == 0 { - nextRev = wr.Header.Revision - } - } - } else { - // current progress of watch; <= store revision - nextRev = wr.Header.Revision - } - - if len(wr.Events) > 0 { - nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 - } - ws.initReq.rev = nextRev - - // created event is already sent above, - // watcher should not post duplicate events - if wr.Created { - continue - } - - // TODO pause channel if buffer gets too large - ws.buf = append(ws.buf, wr) - case <-w.ctx.Done(): - return - case <-ws.initReq.ctx.Done(): - return - case <-resumec: - resuming = true - return - } - } - // lazily send cancel message if events on missing id -} - -func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { - // mark all substreams as resuming - close(w.resumec) - w.resumec = make(chan struct{}) - w.joinSubstreams() - for _, ws := range w.substreams { - ws.id = -1 - w.resuming = append(w.resuming, ws) - } - // strip out nils, if any - var resuming []*watcherStream - for _, ws := range w.resuming { - if ws != nil { - resuming = append(resuming, ws) - } - } - w.resuming = resuming - w.substreams = make(map[int64]*watcherStream) - - // connect to grpc stream while accepting watcher cancelation - stopc := make(chan struct{}) - donec := w.waitCancelSubstreams(stopc) - wc, err := w.openWatchClient() - close(stopc) - <-donec - - // serve all non-closing streams, even if there's a client error - // so that the teardown path can shutdown the streams as expected. - for _, ws := range w.resuming { - if ws.closing { - continue - } - ws.donec = make(chan struct{}) - w.wg.Add(1) - go w.serveSubstream(ws, w.resumec) - } - - if err != nil { - return nil, v3rpc.Error(err) - } - - // receive data from new grpc stream - go w.serveWatchClient(wc) - return wc, nil -} - -func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} { - var wg sync.WaitGroup - wg.Add(len(w.resuming)) - donec := make(chan struct{}) - for i := range w.resuming { - go func(ws *watcherStream) { - defer wg.Done() - if ws.closing { - if ws.initReq.ctx.Err() != nil && ws.outc != nil { - close(ws.outc) - ws.outc = nil - } - return - } - select { - case <-ws.initReq.ctx.Done(): - // closed ws will be removed from resuming - ws.closing = true - close(ws.outc) - ws.outc = nil - w.wg.Add(1) - go func() { - defer w.wg.Done() - w.closingc <- ws - }() - case <-stopc: - } - }(w.resuming[i]) - } - go func() { - defer close(donec) - wg.Wait() - }() - return donec -} - -// joinSubstreams waits for all substream goroutines to complete. -func (w *watchGrpcStream) joinSubstreams() { - for _, ws := range w.substreams { - <-ws.donec - } - for _, ws := range w.resuming { - if ws != nil { - <-ws.donec - } - } -} - -var maxBackoff = 100 * time.Millisecond - -// openWatchClient retries opening a watch client until success or halt. -// manually retry in case "ws==nil && err==nil" -// TODO: remove FailFast=false -func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { - backoff := time.Millisecond - for { - select { - case <-w.ctx.Done(): - if err == nil { - return nil, w.ctx.Err() - } - return nil, err - default: - } - if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil { - break - } - if isHaltErr(w.ctx, err) { - return nil, v3rpc.Error(err) - } - if isUnavailableErr(w.ctx, err) { - // retry, but backoff - if backoff < maxBackoff { - // 25% backoff factor - backoff = backoff + backoff/4 - if backoff > maxBackoff { - backoff = maxBackoff - } - } - time.Sleep(backoff) - } - } - return ws, nil -} - -// toPB converts an internal watch request structure to its protobuf WatchRequest structure. -func (wr *watchRequest) toPB() *pb.WatchRequest { - req := &pb.WatchCreateRequest{ - StartRevision: wr.rev, - Key: []byte(wr.key), - RangeEnd: []byte(wr.end), - ProgressNotify: wr.progressNotify, - Filters: wr.filters, - PrevKv: wr.prevKV, - Fragment: wr.fragment, - } - cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} - return &pb.WatchRequest{RequestUnion: cr} -} - -// toPB converts an internal progress request structure to its protobuf WatchRequest structure. -func (pr *progressRequest) toPB() *pb.WatchRequest { - req := &pb.WatchProgressRequest{} - cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req} - return &pb.WatchRequest{RequestUnion: cr} -} - -func streamKeyFromCtx(ctx context.Context) string { - if md, ok := metadata.FromOutgoingContext(ctx); ok { - return fmt.Sprintf("%+v", md) - } - return "" -} diff --git a/vendor/github.com/denisenkom/go-mssqldb/bulkcopy_sql.go b/vendor/github.com/denisenkom/go-mssqldb/bulkcopy_sql.go index 4824df9ad..709505b2a 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/bulkcopy_sql.go +++ b/vendor/github.com/denisenkom/go-mssqldb/bulkcopy_sql.go @@ -66,7 +66,7 @@ func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { if ci.closed { - return nil, errors.New("errCopyInClosed") + return nil, errors.New("copyin query is closed") } if len(v) == 0 { diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md index 3fc954460..affe322c1 100644 --- a/vendor/github.com/fatih/color/README.md +++ b/vendor/github.com/fatih/color/README.md @@ -1,7 +1,13 @@ +# Archived project. No maintenance. + +This project is not maintained anymore and is archived. Feel free to fork and +make your own changes if needed. For more detail read my blog post: [Taking an indefinite sabbatical from my projects](https://arslan.io/2018/10/09/taking-an-indefinite-sabbatical-from-my-projects/) + +Thanks to everyone for their valuable feedback and contributions. + + # Color [![GoDoc](https://godoc.org/github.com/fatih/color?status.svg)](https://godoc.org/github.com/fatih/color) [![Build Status](https://img.shields.io/travis/fatih/color.svg?style=flat-square)](https://travis-ci.org/fatih/color) - - Color lets you use colorized outputs in terms of [ANSI Escape Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It has support for Windows too! The API can be used in several ways, pick one that diff --git a/vendor/github.com/fatih/structs/README.md b/vendor/github.com/fatih/structs/README.md index a75eabf37..1a4a2fda3 100644 --- a/vendor/github.com/fatih/structs/README.md +++ b/vendor/github.com/fatih/structs/README.md @@ -1,3 +1,10 @@ +# Archived project. No maintenance. + +This project is not maintained anymore and is archived. Feel free to fork and +make your own changes if needed. For more detail read my blog post: [Taking an indefinite sabbatical from my projects](https://arslan.io/2018/10/09/taking-an-indefinite-sabbatical-from-my-projects/) + +Thanks to everyone for their valuable feedback and contributions. + # Structs [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structs) [![Build Status](http://img.shields.io/travis/fatih/structs.svg?style=flat-square)](https://travis-ci.org/fatih/structs) [![Coverage Status](http://img.shields.io/coveralls/fatih/structs.svg?style=flat-square)](https://coveralls.io/r/fatih/structs) Structs contains various utilities to work with Go (Golang) structs. It was diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go index 762e52551..61ef96362 100644 --- a/vendor/github.com/go-ini/ini/file.go +++ b/vendor/github.com/go-ini/ini/file.go @@ -45,6 +45,9 @@ type File struct { // newFile initializes File object with given data sources. func newFile(dataSources []dataSource, opts LoadOptions) *File { + if len(opts.KeyValueDelimiters) == 0 { + opts.KeyValueDelimiters = "=:" + } return &File{ BlockMode: true, dataSources: dataSources, @@ -286,7 +289,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { for _, kname := range sec.keyList { keyLength := len(kname) // First case will surround key by ` and second by """ - if strings.ContainsAny(kname, "\"=:") { + if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) { keyLength += 2 } else if strings.Contains(kname, "`") { keyLength += 6 @@ -329,7 +332,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { switch { case key.isAutoIncrement: kname = "-" - case strings.ContainsAny(kname, "\"=:"): + case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters): kname = "`" + kname + "`" case strings.Contains(kname, "`"): kname = `"""` + kname + `"""` diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go index 2920e0118..b6505a94b 100644 --- a/vendor/github.com/go-ini/ini/ini.go +++ b/vendor/github.com/go-ini/ini/ini.go @@ -34,7 +34,7 @@ const ( // Maximum allowed depth when recursively substituing variable names. _DEPTH_VALUES = 99 - _VERSION = "1.38.3" + _VERSION = "1.39.0" ) // Version returns current package version literal. @@ -168,6 +168,8 @@ type LoadOptions struct { // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise // conform to key/value pairs. Specify the names of those blocks here. UnparseableSections []string + // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". + KeyValueDelimiters string } func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go index 3daf54c38..36cb3dad8 100644 --- a/vendor/github.com/go-ini/ini/parser.go +++ b/vendor/github.com/go-ini/ini/parser.go @@ -100,7 +100,7 @@ func cleanComment(in []byte) ([]byte, bool) { return in[i:], true } -func readKeyName(in []byte) (string, int, error) { +func readKeyName(delimiters string, in []byte) (string, int, error) { line := string(in) // Check if key name surrounded by quotes. @@ -127,7 +127,7 @@ func readKeyName(in []byte) (string, int, error) { pos += startIdx // Find key-value delimiter - i := strings.IndexAny(line[pos+startIdx:], "=:") + i := strings.IndexAny(line[pos+startIdx:], delimiters) if i < 0 { return "", -1, ErrDelimiterNotFound{line} } @@ -135,7 +135,7 @@ func readKeyName(in []byte) (string, int, error) { return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil } - endIdx = strings.IndexAny(line, "=:") + endIdx = strings.IndexAny(line, delimiters) if endIdx < 0 { return "", -1, ErrDelimiterNotFound{line} } @@ -428,7 +428,7 @@ func (f *File) parse(reader io.Reader) (err error) { continue } - kname, offset, err := readKeyName(line) + kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) if err != nil { // Treat as boolean key when desired, and whole line is key name. if IsErrDelimiterNotFound(err) { diff --git a/vendor/github.com/go-ldap/ldap/Makefile b/vendor/github.com/go-ldap/ldap/Makefile index a9d351c76..0994d55f3 100644 --- a/vendor/github.com/go-ldap/ldap/Makefile +++ b/vendor/github.com/go-ldap/ldap/Makefile @@ -36,7 +36,23 @@ fmt: # Only run on go1.5+ vet: - go tool vet -atomic -bool -copylocks -nilfunc -printf -shadow -rangeloops -unreachable -unsafeptr -unusedresult . + @go tool -n vet >/dev/null 2>&1; \ + if [ $$? -eq 0 ]; then \ + echo "go vet" ; \ + go tool vet \ + -atomic \ + -bool \ + -copylocks \ + -nilfunc \ + -printf \ + -shadow \ + -rangeloops \ + -unreachable \ + -unsafeptr \ + -unusedresult \ + . ; \ + fi ; + # https://github.com/golang/lint # go get github.com/golang/lint/golint @@ -44,7 +60,7 @@ vet: # Only run on go1.5+ lint: @echo golint ./... - @OUTPUT=`golint ./... 2>&1`; \ + @OUTPUT=`command -v golint >/dev/null 2>&1 && golint ./... 2>&1`; \ if [ "$$OUTPUT" ]; then \ echo "golint errors:"; \ echo "$$OUTPUT"; \ diff --git a/vendor/github.com/gocql/gocql/README.md b/vendor/github.com/gocql/gocql/README.md index 929fe4657..1b3fd03ba 100644 --- a/vendor/github.com/gocql/gocql/README.md +++ b/vendor/github.com/gocql/gocql/README.md @@ -17,7 +17,7 @@ Supported Versions The following matrix shows the versions of Go and Cassandra that are tested with the integration test suite as part of the CI build: -Go/Cassandra | 2.1.x | 2.2.x | 3.0.x +Go/Cassandra | 2.1.x | 2.2.x | 3.x.x -------------| -------| ------| --------- 1.10 | yes | yes | yes 1.11 | yes | yes | yes diff --git a/vendor/github.com/gocql/gocql/conn.go b/vendor/github.com/gocql/gocql/conn.go index fb0bd1a20..0bec55ccf 100644 --- a/vendor/github.com/gocql/gocql/conn.go +++ b/vendor/github.com/gocql/gocql/conn.go @@ -227,51 +227,19 @@ func (s *Session) dial(host *HostInfo, cfg *ConnConfig, errorHandler ConnErrorHa } defer cancel() - frameTicker := make(chan struct{}, 1) - startupErr := make(chan error) - go func() { - for range frameTicker { - err := c.recv() - if err != nil { - select { - case startupErr <- err: - case <-ctx.Done(): - } + startup := &startupCoordinator{ + frameTicker: make(chan struct{}), + conn: c, + } - return - } - } - }() - - go func() { - defer close(frameTicker) - err := c.startup(ctx, frameTicker) - select { - case startupErr <- err: - case <-ctx.Done(): - } - }() - - select { - case err := <-startupErr: - if err != nil { - c.Close() - return nil, err - } - case <-ctx.Done(): - c.Close() - return nil, errors.New("gocql: no response to connection startup within timeout") + if err := startup.setupConn(ctx); err != nil { + c.close() + return nil, err } // dont coalesce startup frames if s.cfg.WriteCoalesceWaitTime > 0 { - w := &writeCoalescer{ - fcond: sync.NewCond(&sync.Mutex{}), - cond: sync.NewCond(&sync.Mutex{}), - w: c.w, - } - go w.writeFlusher(s.cfg.WriteCoalesceWaitTime, c.quit) - c.w = w + c.w = newWriteCoalescer(c.w, s.cfg.WriteCoalesceWaitTime, c.quit) } go c.serve() @@ -306,27 +274,98 @@ func (c *Conn) Read(p []byte) (n int, err error) { return } -func (c *Conn) startup(ctx context.Context, frameTicker chan struct{}) error { - m := map[string]string{ - "CQL_VERSION": c.cfg.CQLVersion, - } +type startupCoordinator struct { + conn *Conn + frameTicker chan struct{} +} - if c.compressor != nil { - m["COMPRESSION"] = c.compressor.Name() - } +func (s *startupCoordinator) setupConn(ctx context.Context) error { + startupErr := make(chan error) + go func() { + for range s.frameTicker { + err := s.conn.recv() + if err != nil { + select { + case startupErr <- err: + case <-ctx.Done(): + } + + return + } + } + }() + + go func() { + defer close(s.frameTicker) + err := s.options(ctx) + select { + case startupErr <- err: + case <-ctx.Done(): + } + }() select { - case frameTicker <- struct{}{}: + case err := <-startupErr: + if err != nil { + return err + } case <-ctx.Done(): - return ctx.Err() + return errors.New("gocql: no response to connection startup within timeout") } - framer, err := c.exec(ctx, &writeStartupFrame{opts: m}, nil) + return nil +} + +func (s *startupCoordinator) write(ctx context.Context, frame frameWriter) (frame, error) { + select { + case s.frameTicker <- struct{}{}: + case <-ctx.Done(): + return nil, ctx.Err() + } + + framer, err := s.conn.exec(ctx, frame, nil) + if err != nil { + return nil, err + } + + return framer.parseFrame() +} + +func (s *startupCoordinator) options(ctx context.Context) error { + frame, err := s.write(ctx, &writeOptionsFrame{}) if err != nil { return err } - frame, err := framer.parseFrame() + supported, ok := frame.(*supportedFrame) + if !ok { + return NewErrProtocol("Unknown type of response to startup frame: %T", frame) + } + + return s.startup(ctx, supported.supported) +} + +func (s *startupCoordinator) startup(ctx context.Context, supported map[string][]string) error { + m := map[string]string{ + "CQL_VERSION": s.conn.cfg.CQLVersion, + } + + if s.conn.compressor != nil { + comp := supported["COMPRESSION"] + name := s.conn.compressor.Name() + for _, compressor := range comp { + if compressor == name { + m["COMPRESSION"] = compressor + break + } + } + + if _, ok := m["COMPRESSION"]; !ok { + s.conn.compressor = nil + } + } + + frame, err := s.write(ctx, &writeStartupFrame{opts: m}) if err != nil { return err } @@ -337,37 +376,25 @@ func (c *Conn) startup(ctx context.Context, frameTicker chan struct{}) error { case *readyFrame: return nil case *authenticateFrame: - return c.authenticateHandshake(ctx, v, frameTicker) + return s.authenticateHandshake(ctx, v) default: return NewErrProtocol("Unknown type of response to startup frame: %s", v) } } -func (c *Conn) authenticateHandshake(ctx context.Context, authFrame *authenticateFrame, frameTicker chan struct{}) error { - if c.auth == nil { +func (s *startupCoordinator) authenticateHandshake(ctx context.Context, authFrame *authenticateFrame) error { + if s.conn.auth == nil { return fmt.Errorf("authentication required (using %q)", authFrame.class) } - resp, challenger, err := c.auth.Challenge([]byte(authFrame.class)) + resp, challenger, err := s.conn.auth.Challenge([]byte(authFrame.class)) if err != nil { return err } req := &writeAuthResponseFrame{data: resp} - for { - select { - case frameTicker <- struct{}{}: - case <-ctx.Done(): - return ctx.Err() - } - - framer, err := c.exec(ctx, req, nil) - if err != nil { - return err - } - - frame, err := framer.parseFrame() + frame, err := s.write(ctx, req) if err != nil { return err } @@ -612,11 +639,22 @@ func (c *deadlineWriter) Write(p []byte) (int, error) { return c.w.Write(p) } +func newWriteCoalescer(w io.Writer, d time.Duration, quit <-chan struct{}) *writeCoalescer { + wc := &writeCoalescer{ + writeCh: make(chan struct{}), // TODO: could this be sync? + cond: sync.NewCond(&sync.Mutex{}), + w: w, + quit: quit, + } + go wc.writeFlusher(d) + return wc +} + type writeCoalescer struct { w io.Writer - // fcond waits for a new write to start the flush loop - fcond *sync.Cond + quit <-chan struct{} + writeCh chan struct{} running bool // cond waits for the buffer to be flushed @@ -627,10 +665,8 @@ type writeCoalescer struct { err error } -func (w *writeCoalescer) flush() { - w.cond.L.Lock() - defer w.cond.L.Unlock() - +func (w *writeCoalescer) flushLocked() { + w.running = false if len(w.buffers) == 0 { return } @@ -645,16 +681,36 @@ func (w *writeCoalescer) flush() { w.cond.Broadcast() } -func (w *writeCoalescer) Write(p []byte) (int, error) { - // TODO: use atomics for this? - w.fcond.L.Lock() - if !w.running { - w.running = true - w.fcond.Broadcast() - } - w.fcond.L.Unlock() - +func (w *writeCoalescer) flush() { w.cond.L.Lock() + w.flushLocked() + w.cond.L.Unlock() +} + +func (w *writeCoalescer) stop() { + w.cond.L.Lock() + defer w.cond.L.Unlock() + + w.flushLocked() + // nil the channel out sends block forever on it + // instead of closing which causes a send on closed channel + // panic. + w.writeCh = nil +} + +func (w *writeCoalescer) Write(p []byte) (int, error) { + w.cond.L.Lock() + + if !w.running { + select { + case w.writeCh <- struct{}{}: + w.running = true + case <-w.quit: + w.cond.L.Unlock() + return 0, io.EOF // TODO: better error here? + } + } + w.buffers = append(w.buffers, p) for len(w.buffers) != 0 { w.cond.Wait() @@ -669,10 +725,10 @@ func (w *writeCoalescer) Write(p []byte) (int, error) { return len(p), nil } -func (w *writeCoalescer) writeFlusher(interval time.Duration, quit chan struct{}) { +func (w *writeCoalescer) writeFlusher(interval time.Duration) { timer := time.NewTimer(interval) defer timer.Stop() - defer w.flush() + defer w.stop() if !timer.Stop() { <-timer.C @@ -680,24 +736,21 @@ func (w *writeCoalescer) writeFlusher(interval time.Duration, quit chan struct{} for { // wait for a write to start the flush loop - w.fcond.L.Lock() - for !w.running { - w.fcond.Wait() + select { + case <-w.writeCh: + case <-w.quit: + return } - w.fcond.L.Unlock() + timer.Reset(interval) select { - case <-quit: + case <-w.quit: return case <-timer.C: } - w.fcond.L.Lock() w.flush() - - w.running = false - w.fcond.L.Unlock() } } @@ -1220,6 +1273,7 @@ func (c *Conn) executeBatch(batch *Batch) *Iter { func (c *Conn) query(statement string, values ...interface{}) (iter *Iter) { q := c.session.Query(statement, values...).Consistency(One) + q.trace = nil return c.executeQuery(q) } diff --git a/vendor/github.com/gocql/gocql/control.go b/vendor/github.com/gocql/gocql/control.go index a45226323..d26a09f8d 100644 --- a/vendor/github.com/gocql/gocql/control.go +++ b/vendor/github.com/gocql/gocql/control.go @@ -453,8 +453,7 @@ func (c *controlConn) query(statement string, values ...interface{}) (iter *Iter Logger.Printf("control: error executing %q: %v\n", statement, iter.err) } - metric := q.getHostMetrics(c.getConn().host) - metric.Attempts++ + q.AddAttempts(1, c.getConn().host) if iter.err == nil || !c.retry.Attempt(q) { break } diff --git a/vendor/github.com/gocql/gocql/policies.go b/vendor/github.com/gocql/gocql/policies.go index 4db4e40c6..16beef280 100644 --- a/vendor/github.com/gocql/gocql/policies.go +++ b/vendor/github.com/gocql/gocql/policies.go @@ -5,6 +5,8 @@ package gocql import ( + "context" + "errors" "fmt" "math" "math/rand" @@ -130,6 +132,7 @@ type RetryableQuery interface { Attempts() int SetConsistency(c Consistency) GetConsistency() Consistency + GetContext() context.Context } type RetryType uint16 @@ -141,6 +144,10 @@ const ( Rethrow RetryType = 0x03 // raise error and stop retrying ) +// ErrUnknownRetryType is returned if the retry policy returns a retry type +// unknown to the query executor. +var ErrUnknownRetryType = errors.New("unknown retry type returned by retry policy") + // RetryPolicy interface is used by gocql to determine if a query can be attempted // again after a retryable error has been received. The interface allows gocql // users to implement their own logic to determine if a query can be attempted @@ -852,3 +859,21 @@ func (e *ExponentialReconnectionPolicy) GetInterval(currentRetry int) time.Durat func (e *ExponentialReconnectionPolicy) GetMaxRetries() int { return e.MaxRetries } + +type SpeculativeExecutionPolicy interface { + Attempts() int + Delay() time.Duration +} + +type NonSpeculativeExecution struct{} + +func (sp NonSpeculativeExecution) Attempts() int { return 0 } // No additional attempts +func (sp NonSpeculativeExecution) Delay() time.Duration { return 1 } // The delay. Must be positive to be used in a ticker. + +type SimpleSpeculativeExecution struct { + NumAttempts int + TimeoutDelay time.Duration +} + +func (sp *SimpleSpeculativeExecution) Attempts() int { return sp.NumAttempts } +func (sp *SimpleSpeculativeExecution) Delay() time.Duration { return sp.TimeoutDelay } diff --git a/vendor/github.com/gocql/gocql/query_executor.go b/vendor/github.com/gocql/gocql/query_executor.go index bd8c55688..4a5d875c1 100644 --- a/vendor/github.com/gocql/gocql/query_executor.go +++ b/vendor/github.com/gocql/gocql/query_executor.go @@ -1,6 +1,7 @@ package gocql import ( + "sync" "time" ) @@ -8,9 +9,11 @@ type ExecutableQuery interface { execute(conn *Conn) *Iter attempt(keyspace string, end, start time.Time, iter *Iter, host *HostInfo) retryPolicy() RetryPolicy + speculativeExecutionPolicy() SpeculativeExecutionPolicy GetRoutingKey() ([]byte, error) Keyspace() string Cancel() + IsIdempotent() bool RetryableQuery } @@ -19,6 +22,11 @@ type queryExecutor struct { policy HostSelectionPolicy } +type queryResponse struct { + iter *Iter + err error +} + func (q *queryExecutor) attemptQuery(qry ExecutableQuery, conn *Conn) *Iter { start := time.Now() iter := qry.execute(conn) @@ -30,12 +38,74 @@ func (q *queryExecutor) attemptQuery(qry ExecutableQuery, conn *Conn) *Iter { } func (q *queryExecutor) executeQuery(qry ExecutableQuery) (*Iter, error) { - rt := qry.retryPolicy() + + // check if the query is not marked as idempotent, if + // it is, we force the policy to NonSpeculative + sp := qry.speculativeExecutionPolicy() + if !qry.IsIdempotent() { + sp = NonSpeculativeExecution{} + } + + results := make(chan queryResponse, 1) + stop := make(chan struct{}) + defer close(stop) + var specWG sync.WaitGroup + + // Launch the main execution + specWG.Add(1) + go q.run(qry, &specWG, results, stop) + + // The speculative executions are launched _in addition_ to the main + // execution, on a timer. So Speculation{2} would make 3 executions running + // in total. + go func() { + // Handle the closing of the resources. We do it here because it's + // right after we finish launching executions. Otherwise clearing the + // wait group is complicated. + defer func() { + specWG.Wait() + close(results) + }() + + // setup a ticker + ticker := time.NewTicker(sp.Delay()) + defer ticker.Stop() + + for i := 0; i < sp.Attempts(); i++ { + select { + case <-ticker.C: + // Launch the additional execution + specWG.Add(1) + go q.run(qry, &specWG, results, stop) + case <-qry.GetContext().Done(): + // not starting additional executions + return + case <-stop: + // not starting additional executions + return + } + } + }() + + res := <-results + if res.iter == nil && res.err == nil { + // if we're here, the results channel was closed, so no more hosts + return nil, ErrNoConnections + } + return res.iter, res.err +} + +func (q *queryExecutor) run(qry ExecutableQuery, specWG *sync.WaitGroup, results chan queryResponse, stop chan struct{}) { + // Handle the wait group + defer specWG.Done() + hostIter := q.policy.Pick(qry) + selectedHost := hostIter() + rt := qry.retryPolicy() var iter *Iter - for hostResponse := hostIter(); hostResponse != nil; hostResponse = hostIter() { - host := hostResponse.Info() + for selectedHost != nil { + host := selectedHost.Info() if host == nil || !host.IsUp() { continue } @@ -50,51 +120,50 @@ func (q *queryExecutor) executeQuery(qry ExecutableQuery) (*Iter, error) { continue } - iter = q.attemptQuery(qry, conn) - // Update host - hostResponse.Mark(iter.err) - - if rt == nil { - iter.host = host - break - } - - switch rt.GetRetryType(iter.err) { - case Retry: - for rt.Attempt(qry) { - iter = q.attemptQuery(qry, conn) - hostResponse.Mark(iter.err) - if iter.err == nil { - iter.host = host - return iter, nil - } - if rt.GetRetryType(iter.err) != Retry { - break - } - } - case Rethrow: - return nil, iter.err - case Ignore: - return iter, nil - case RetryNextHost: + select { + case <-stop: + // stop this execution and return + return default: + // Run the query + iter = q.attemptQuery(qry, conn) + iter.host = selectedHost.Info() + // Update host + selectedHost.Mark(iter.err) + + // Exit if the query was successful + // or no retry policy defined or retry attempts were reached + if iter.err == nil || rt == nil || !rt.Attempt(qry) { + results <- queryResponse{iter: iter} + return + } + + // If query is unsuccessful, check the error with RetryPolicy to retry + switch rt.GetRetryType(iter.err) { + case Retry: + // retry on the same host + continue + case Rethrow: + results <- queryResponse{err: iter.err} + return + case Ignore: + results <- queryResponse{iter: iter} + return + case RetryNextHost: + // retry on the next host + selectedHost = hostIter() + if selectedHost == nil { + results <- queryResponse{iter: iter} + return + } + continue + default: + // Undefined? Return nil and error, this will panic in the requester + results <- queryResponse{iter: nil, err: ErrUnknownRetryType} + return + } } - // Exit for loop if the query was successful - if iter.err == nil { - iter.host = host - return iter, nil - } - - if !rt.Attempt(qry) { - // What do here? Should we just return an error here? - break - } } - - if iter == nil { - return nil, ErrNoConnections - } - - return iter, nil + // All hosts are exhausted, return nothing } diff --git a/vendor/github.com/gocql/gocql/session.go b/vendor/github.com/gocql/gocql/session.go index 043534a21..e69d2bd61 100644 --- a/vendor/github.com/gocql/gocql/session.go +++ b/vendor/github.com/gocql/gocql/session.go @@ -658,11 +658,16 @@ func (s *Session) connect(host *HostInfo, errorHandler ConnErrorHandler) (*Conn, return s.dial(host, s.connCfg, errorHandler) } -type queryMetrics struct { +type hostMetrics struct { Attempts int TotalLatency int64 } +type queryMetrics struct { + l sync.RWMutex + m map[string]*hostMetrics +} + // Query represents a CQL statement that can be executed. type Query struct { stmt string @@ -677,6 +682,7 @@ type Query struct { observer QueryObserver session *Session rt RetryPolicy + spec SpeculativeExecutionPolicy binding func(q *QueryInfo) ([]interface{}, error) serialCons SerialConsistency defaultTimestamp bool @@ -685,8 +691,8 @@ type Query struct { context context.Context cancelQuery func() idempotent bool - metrics map[string]*queryMetrics customPayload map[string][]byte + metrics *queryMetrics disableAutoPage bool } @@ -704,23 +710,26 @@ func (q *Query) defaultsFromSession() { q.serialCons = s.cfg.SerialConsistency q.defaultTimestamp = s.cfg.DefaultTimestamp q.idempotent = s.cfg.DefaultIdempotence - q.metrics = make(map[string]*queryMetrics) + q.metrics = &queryMetrics{m: make(map[string]*hostMetrics)} // Initiate an empty context with a cancel call q.WithContext(context.Background()) + q.spec = &NonSpeculativeExecution{} s.mu.RUnlock() } -func (q *Query) getHostMetrics(host *HostInfo) *queryMetrics { - hostMetrics, exists := q.metrics[host.ConnectAddress().String()] +func (q *Query) getHostMetrics(host *HostInfo) *hostMetrics { + q.metrics.l.Lock() + metrics, exists := q.metrics.m[host.ConnectAddress().String()] if !exists { // if the host is not in the map, it means it's been accessed for the first time - hostMetrics = &queryMetrics{Attempts: 0, TotalLatency: 0} - q.metrics[host.ConnectAddress().String()] = hostMetrics + metrics = &hostMetrics{} + q.metrics.m[host.ConnectAddress().String()] = metrics } + q.metrics.l.Unlock() - return hostMetrics + return metrics } // Statement returns the statement that was used to generate this query. @@ -735,27 +744,45 @@ func (q Query) String() string { //Attempts returns the number of times the query was executed. func (q *Query) Attempts() int { - attempts := 0 - for _, metric := range q.metrics { + q.metrics.l.Lock() + var attempts int + for _, metric := range q.metrics.m { attempts += metric.Attempts } + q.metrics.l.Unlock() return attempts } +func (q *Query) AddAttempts(i int, host *HostInfo) { + hostMetric := q.getHostMetrics(host) + q.metrics.l.Lock() + hostMetric.Attempts += i + q.metrics.l.Unlock() +} + //Latency returns the average amount of nanoseconds per attempt of the query. func (q *Query) Latency() int64 { + q.metrics.l.Lock() var attempts int var latency int64 - for _, metric := range q.metrics { + for _, metric := range q.metrics.m { attempts += metric.Attempts latency += metric.TotalLatency } + q.metrics.l.Unlock() if attempts > 0 { return latency / int64(attempts) } return 0 } +func (q *Query) AddLatency(l int64, host *HostInfo) { + hostMetric := q.getHostMetrics(host) + q.metrics.l.Lock() + hostMetric.TotalLatency += l + q.metrics.l.Unlock() +} + // Consistency sets the consistency level for this query. If no consistency // level have been set, the default consistency level of the cluster // is used. @@ -781,6 +808,10 @@ func (q *Query) CustomPayload(customPayload map[string][]byte) *Query { return q } +func (q *Query) GetContext() context.Context { + return q.context +} + // Trace enables tracing of this query. Look at the documentation of the // Tracer interface to learn more about tracing. func (q *Query) Trace(trace Tracer) *Query { @@ -851,9 +882,8 @@ func (q *Query) execute(conn *Conn) *Iter { } func (q *Query) attempt(keyspace string, end, start time.Time, iter *Iter, host *HostInfo) { - hostMetrics := q.getHostMetrics(host) - hostMetrics.Attempts++ - hostMetrics.TotalLatency += end.Sub(start).Nanoseconds() + q.AddAttempts(1, host) + q.AddLatency(end.Sub(start).Nanoseconds(), host) if q.observer != nil { q.observer.ObserveQuery(q.context, ObservedQuery{ @@ -863,7 +893,7 @@ func (q *Query) attempt(keyspace string, end, start time.Time, iter *Iter, host End: end, Rows: iter.numRows, Host: host, - Metrics: hostMetrics, + Metrics: q.getHostMetrics(host), Err: iter.err, }) } @@ -983,6 +1013,17 @@ func (q *Query) RetryPolicy(r RetryPolicy) *Query { return q } +// SetSpeculativeExecutionPolicy sets the execution policy +func (q *Query) SetSpeculativeExecutionPolicy(sp SpeculativeExecutionPolicy) *Query { + q.spec = sp + return q +} + +// speculativeExecutionPolicy fetches the policy +func (q *Query) speculativeExecutionPolicy() SpeculativeExecutionPolicy { + return q.spec +} + func (q *Query) IsIdempotent() bool { return q.idempotent } @@ -1431,6 +1472,7 @@ type Batch struct { Cons Consistency CustomPayload map[string][]byte rt RetryPolicy + spec SpeculativeExecutionPolicy observer BatchObserver serialCons SerialConsistency defaultTimestamp bool @@ -1438,14 +1480,14 @@ type Batch struct { context context.Context cancelBatch func() keyspace string - metrics map[string]*queryMetrics + metrics *queryMetrics } // NewBatch creates a new batch operation without defaults from the cluster // // Deprecated: use session.NewBatch instead func NewBatch(typ BatchType) *Batch { - return &Batch{Type: typ, metrics: make(map[string]*queryMetrics)} + return &Batch{Type: typ, metrics: &queryMetrics{m: make(map[string]*hostMetrics)}} } // NewBatch creates a new batch operation using defaults defined in the cluster @@ -1459,7 +1501,8 @@ func (s *Session) NewBatch(typ BatchType) *Batch { Cons: s.cons, defaultTimestamp: s.cfg.DefaultTimestamp, keyspace: s.cfg.Keyspace, - metrics: make(map[string]*queryMetrics), + metrics: &queryMetrics{m: make(map[string]*hostMetrics)}, + spec: &NonSpeculativeExecution{}, } // Initiate an empty context with a cancel call @@ -1469,15 +1512,17 @@ func (s *Session) NewBatch(typ BatchType) *Batch { return batch } -func (b *Batch) getHostMetrics(host *HostInfo) *queryMetrics { - hostMetrics, exists := b.metrics[host.ConnectAddress().String()] +func (b *Batch) getHostMetrics(host *HostInfo) *hostMetrics { + b.metrics.l.Lock() + metrics, exists := b.metrics.m[host.ConnectAddress().String()] if !exists { // if the host is not in the map, it means it's been accessed for the first time - hostMetrics = &queryMetrics{Attempts: 0, TotalLatency: 0} - b.metrics[host.ConnectAddress().String()] = hostMetrics + metrics = &hostMetrics{} + b.metrics.m[host.ConnectAddress().String()] = metrics } + b.metrics.l.Unlock() - return hostMetrics + return metrics } // Observer enables batch-level observer on this batch. @@ -1493,18 +1538,33 @@ func (b *Batch) Keyspace() string { // Attempts returns the number of attempts made to execute the batch. func (b *Batch) Attempts() int { - attempts := 0 - for _, metric := range b.metrics { + b.metrics.l.Lock() + defer b.metrics.l.Unlock() + + var attempts int + for _, metric := range b.metrics.m { attempts += metric.Attempts } return attempts } +func (b *Batch) AddAttempts(i int, host *HostInfo) { + hostMetric := b.getHostMetrics(host) + b.metrics.l.Lock() + hostMetric.Attempts += i + b.metrics.l.Unlock() +} + //Latency returns the average number of nanoseconds to execute a single attempt of the batch. func (b *Batch) Latency() int64 { - attempts := 0 - var latency int64 = 0 - for _, metric := range b.metrics { + b.metrics.l.Lock() + defer b.metrics.l.Unlock() + + var ( + attempts int + latency int64 + ) + for _, metric := range b.metrics.m { attempts += metric.Attempts latency += metric.TotalLatency } @@ -1514,6 +1574,13 @@ func (b *Batch) Latency() int64 { return 0 } +func (b *Batch) AddLatency(l int64, host *HostInfo) { + hostMetric := b.getHostMetrics(host) + b.metrics.l.Lock() + hostMetric.TotalLatency += l + b.metrics.l.Unlock() +} + // GetConsistency returns the currently configured consistency level for the batch // operation. func (b *Batch) GetConsistency() Consistency { @@ -1526,6 +1593,28 @@ func (b *Batch) SetConsistency(c Consistency) { b.Cons = c } +func (b *Batch) GetContext() context.Context { + return b.context +} + +func (b *Batch) IsIdempotent() bool { + for _, entry := range b.Entries { + if !entry.Idempotent { + return false + } + } + return true +} + +func (b *Batch) speculativeExecutionPolicy() SpeculativeExecutionPolicy { + return b.spec +} + +func (b *Batch) SpeculativeExecutionPolicy(sp SpeculativeExecutionPolicy) *Batch { + b.spec = sp + return b +} + // Query adds the query to the batch operation func (b *Batch) Query(stmt string, args ...interface{}) { b.Entries = append(b.Entries, BatchEntry{Stmt: stmt, Args: args}) @@ -1601,9 +1690,8 @@ func (b *Batch) WithTimestamp(timestamp int64) *Batch { } func (b *Batch) attempt(keyspace string, end, start time.Time, iter *Iter, host *HostInfo) { - hostMetrics := b.getHostMetrics(host) - hostMetrics.Attempts++ - hostMetrics.TotalLatency += end.Sub(start).Nanoseconds() + b.AddAttempts(1, host) + b.AddLatency(end.Sub(start).Nanoseconds(), host) if b.observer == nil { return @@ -1621,7 +1709,7 @@ func (b *Batch) attempt(keyspace string, end, start time.Time, iter *Iter, host End: end, // Rows not used in batch observations // TODO - might be able to support it when using BatchCAS Host: host, - Metrics: hostMetrics, + Metrics: b.getHostMetrics(host), Err: iter.err, }) } @@ -1640,9 +1728,10 @@ const ( ) type BatchEntry struct { - Stmt string - Args []interface{} - binding func(q *QueryInfo) ([]interface{}, error) + Stmt string + Args []interface{} + Idempotent bool + binding func(q *QueryInfo) ([]interface{}, error) } type ColumnInfo struct { @@ -1775,7 +1864,7 @@ type ObservedQuery struct { Host *HostInfo // The metrics per this host - Metrics *queryMetrics + Metrics *hostMetrics // Err is the error in the query. // It only tracks network errors or errors of bad cassandra syntax, in particular selects with no match return nil error @@ -1807,7 +1896,7 @@ type ObservedBatch struct { Err error // The metrics per this host - Metrics *queryMetrics + Metrics *hostMetrics } // BatchObserver is the interface implemented by batch observers / stat collectors. diff --git a/vendor/github.com/google/go-github/github/activity_events.go b/vendor/github.com/google/go-github/github/activity_events.go index a919b11c5..47f0c735b 100644 --- a/vendor/github.com/google/go-github/github/activity_events.go +++ b/vendor/github.com/google/go-github/github/activity_events.go @@ -96,6 +96,8 @@ func (e *Event) ParsePayload() (payload interface{}, err error) { payload = &ReleaseEvent{} case "RepositoryEvent": payload = &RepositoryEvent{} + case "RepositoryVulnerabilityAlertEvent": + payload = &RepositoryVulnerabilityAlertEvent{} case "StatusEvent": payload = &StatusEvent{} case "TeamEvent": diff --git a/vendor/github.com/google/go-github/github/apps.go b/vendor/github.com/google/go-github/github/apps.go index 32d4f2f45..ae3aabda3 100644 --- a/vendor/github.com/google/go-github/github/apps.go +++ b/vendor/github.com/google/go-github/github/apps.go @@ -164,7 +164,7 @@ func (s *AppsService) ListUserInstallations(ctx context.Context, opt *ListOption // // GitHub API docs: https://developer.github.com/v3/apps/#create-a-new-installation-token func (s *AppsService) CreateInstallationToken(ctx context.Context, id int64) (*InstallationToken, *Response, error) { - u := fmt.Sprintf("installations/%v/access_tokens", id) + u := fmt.Sprintf("app/installations/%v/access_tokens", id) req, err := s.client.NewRequest("POST", u, nil) if err != nil { diff --git a/vendor/github.com/google/go-github/github/event_types.go b/vendor/github.com/google/go-github/github/event_types.go index 9f49b34f9..1792f43c7 100644 --- a/vendor/github.com/google/go-github/github/event_types.go +++ b/vendor/github.com/google/go-github/github/event_types.go @@ -710,6 +710,27 @@ type RepositoryEvent struct { Installation *Installation `json:"installation,omitempty"` } +// RepositoryVulnerabilityAlertEvent is triggered when a security alert is created, dismissed, or resolved. +// +// GitHub API docs: https://developer.github.com/v3/activity/events/types/#repositoryvulnerabilityalertevent +type RepositoryVulnerabilityAlertEvent struct { + // Action is the action that was performed. This can be: "create", "dismiss", "resolve". + Action *string `json:"action,omitempty"` + + //The security alert of the vulnerable dependency. + Alert *struct { + ID *int64 `json:"id,omitempty"` + AffectedRange *string `json:"affected_range,omitempty"` + AffectedPackageName *string `json:"affected_package_name,omitempty"` + ExternalReference *string `json:"external_reference,omitempty"` + ExternalIdentifier *string `json:"external_identifier,omitempty"` + FixedIn *string `json:"fixed_in,omitempty"` + Dismisser *User `json:"dismisser,omitempty"` + DismissReason *string `json:"dismiss_reason,omitempty"` + DismissedAt *Timestamp `json:"dismissed_at,omitempty"` + } `json:"alert,omitempty"` +} + // StatusEvent is triggered when the status of a Git commit changes. // The Webhook event name is "status". // diff --git a/vendor/github.com/google/go-github/github/git_commits.go b/vendor/github.com/google/go-github/github/git_commits.go index 1eb48a8e2..a2b17fcc3 100644 --- a/vendor/github.com/google/go-github/github/git_commits.go +++ b/vendor/github.com/google/go-github/github/git_commits.go @@ -58,7 +58,7 @@ func (c CommitAuthor) String() string { return Stringify(c) } -// GetCommit fetchs the Commit object for a given SHA. +// GetCommit fetches the Commit object for a given SHA. // // GitHub API docs: https://developer.github.com/v3/git/commits/#get-a-commit func (s *GitService) GetCommit(ctx context.Context, owner string, repo string, sha string) (*Commit, *Response, error) { diff --git a/vendor/github.com/google/go-github/github/git_tags.go b/vendor/github.com/google/go-github/github/git_tags.go index 90398b380..f66e4028f 100644 --- a/vendor/github.com/google/go-github/github/git_tags.go +++ b/vendor/github.com/google/go-github/github/git_tags.go @@ -33,7 +33,7 @@ type createTagRequest struct { Tagger *CommitAuthor `json:"tagger,omitempty"` } -// GetTag fetchs a tag from a repo given a SHA. +// GetTag fetches a tag from a repo given a SHA. // // GitHub API docs: https://developer.github.com/v3/git/tags/#get-a-tag func (s *GitService) GetTag(ctx context.Context, owner string, repo string, sha string) (*Tag, *Response, error) { diff --git a/vendor/github.com/google/go-github/github/github-accessors.go b/vendor/github.com/google/go-github/github/github-accessors.go index 22b5c0954..3ec79c2ca 100644 --- a/vendor/github.com/google/go-github/github/github-accessors.go +++ b/vendor/github.com/google/go-github/github/github-accessors.go @@ -3140,14 +3140,6 @@ func (h *Hook) GetID() int64 { return *h.ID } -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (h *Hook) GetName() string { - if h == nil || h.Name == nil { - return "" - } - return *h.Name -} - // GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. func (h *Hook) GetUpdatedAt() time.Time { if h == nil || h.UpdatedAt == nil { @@ -9964,6 +9956,14 @@ func (r *RepositoryTag) GetZipballURL() string { return *r.ZipballURL } +// GetAction returns the Action field if it's non-nil, zero value otherwise. +func (r *RepositoryVulnerabilityAlertEvent) GetAction() string { + if r == nil || r.Action == nil { + return "" + } + return *r.Action +} + // GetForkRepos returns the ForkRepos field if it's non-nil, zero value otherwise. func (r *RepoStats) GetForkRepos() int { if r == nil || r.ForkRepos == nil { diff --git a/vendor/github.com/google/go-github/github/messages.go b/vendor/github.com/google/go-github/github/messages.go index 519c1c03d..75bc77051 100644 --- a/vendor/github.com/google/go-github/github/messages.go +++ b/vendor/github.com/google/go-github/github/messages.go @@ -41,42 +41,43 @@ const ( var ( // eventTypeMapping maps webhooks types to their corresponding go-github struct types. eventTypeMapping = map[string]string{ - "check_run": "CheckRunEvent", - "check_suite": "CheckSuiteEvent", - "commit_comment": "CommitCommentEvent", - "create": "CreateEvent", - "delete": "DeleteEvent", - "deployment": "DeploymentEvent", - "deployment_status": "DeploymentStatusEvent", - "fork": "ForkEvent", - "gollum": "GollumEvent", - "installation": "InstallationEvent", - "installation_repositories": "InstallationRepositoriesEvent", - "issue_comment": "IssueCommentEvent", - "issues": "IssuesEvent", - "label": "LabelEvent", - "marketplace_purchase": "MarketplacePurchaseEvent", - "member": "MemberEvent", - "membership": "MembershipEvent", - "milestone": "MilestoneEvent", - "organization": "OrganizationEvent", - "org_block": "OrgBlockEvent", - "page_build": "PageBuildEvent", - "ping": "PingEvent", - "project": "ProjectEvent", - "project_card": "ProjectCardEvent", - "project_column": "ProjectColumnEvent", - "public": "PublicEvent", - "pull_request_review": "PullRequestReviewEvent", - "pull_request_review_comment": "PullRequestReviewCommentEvent", - "pull_request": "PullRequestEvent", - "push": "PushEvent", - "repository": "RepositoryEvent", - "release": "ReleaseEvent", - "status": "StatusEvent", - "team": "TeamEvent", - "team_add": "TeamAddEvent", - "watch": "WatchEvent", + "check_run": "CheckRunEvent", + "check_suite": "CheckSuiteEvent", + "commit_comment": "CommitCommentEvent", + "create": "CreateEvent", + "delete": "DeleteEvent", + "deployment": "DeploymentEvent", + "deployment_status": "DeploymentStatusEvent", + "fork": "ForkEvent", + "gollum": "GollumEvent", + "installation": "InstallationEvent", + "installation_repositories": "InstallationRepositoriesEvent", + "issue_comment": "IssueCommentEvent", + "issues": "IssuesEvent", + "label": "LabelEvent", + "marketplace_purchase": "MarketplacePurchaseEvent", + "member": "MemberEvent", + "membership": "MembershipEvent", + "milestone": "MilestoneEvent", + "organization": "OrganizationEvent", + "org_block": "OrgBlockEvent", + "page_build": "PageBuildEvent", + "ping": "PingEvent", + "project": "ProjectEvent", + "project_card": "ProjectCardEvent", + "project_column": "ProjectColumnEvent", + "public": "PublicEvent", + "pull_request_review": "PullRequestReviewEvent", + "pull_request_review_comment": "PullRequestReviewCommentEvent", + "pull_request": "PullRequestEvent", + "push": "PushEvent", + "repository": "RepositoryEvent", + "repository_vulnerability_alert": "RepositoryVulnerabilityAlertEvent", + "release": "ReleaseEvent", + "status": "StatusEvent", + "team": "TeamEvent", + "team_add": "TeamAddEvent", + "watch": "WatchEvent", } ) diff --git a/vendor/github.com/google/go-github/github/orgs_hooks.go b/vendor/github.com/google/go-github/github/orgs_hooks.go index c4dc134c4..b710ea402 100644 --- a/vendor/github.com/google/go-github/github/orgs_hooks.go +++ b/vendor/github.com/google/go-github/github/orgs_hooks.go @@ -49,7 +49,7 @@ func (s *OrganizationsService) GetHook(ctx context.Context, org string, id int64 } // CreateHook creates a Hook for the specified org. -// Name and Config are required fields. +// Config is a required field. // // Note that only a subset of the hook fields are used and hook must // not be nil. @@ -59,7 +59,6 @@ func (s *OrganizationsService) CreateHook(ctx context.Context, org string, hook u := fmt.Sprintf("orgs/%v/hooks", org) hookReq := &createHookRequest{ - Name: hook.Name, Events: hook.Events, Active: hook.Active, Config: hook.Config, diff --git a/vendor/github.com/google/go-github/github/pulls.go b/vendor/github.com/google/go-github/github/pulls.go index 32655651e..a0c3c041c 100644 --- a/vendor/github.com/google/go-github/github/pulls.go +++ b/vendor/github.com/google/go-github/github/pulls.go @@ -60,6 +60,10 @@ type PullRequest struct { NodeID *string `json:"node_id,omitempty"` RequestedReviewers []*User `json:"requested_reviewers,omitempty"` + // RequestedTeams is populated as part of the PullRequestEvent. + // See, https://developer.github.com/v3/activity/events/types/#pullrequestevent for an example. + RequestedTeams []*Team `json:"requested_teams,omitempty"` + Links *PRLinks `json:"_links,omitempty"` Head *PullRequestBranch `json:"head,omitempty"` Base *PullRequestBranch `json:"base,omitempty"` diff --git a/vendor/github.com/google/go-github/github/repos_hooks.go b/vendor/github.com/google/go-github/github/repos_hooks.go index 564f5e576..56374b3ec 100644 --- a/vendor/github.com/google/go-github/github/repos_hooks.go +++ b/vendor/github.com/google/go-github/github/repos_hooks.go @@ -75,8 +75,7 @@ type Hook struct { ID *int64 `json:"id,omitempty"` // Only the following fields are used when creating a hook. - // Name and Config are required. - Name *string `json:"name,omitempty"` + // Config is required. Config map[string]interface{} `json:"config,omitempty"` Events []string `json:"events,omitempty"` Active *bool `json:"active,omitempty"` @@ -92,16 +91,14 @@ func (h Hook) String() string { // See https://github.com/google/go-github/issues/1015 for more // information. type createHookRequest struct { - // Name and Config are required. - // Name must be passed as "web". - Name *string `json:"name,omitempty"` + // Config is required. Config map[string]interface{} `json:"config,omitempty"` Events []string `json:"events,omitempty"` Active *bool `json:"active,omitempty"` } // CreateHook creates a Hook for the specified repository. -// Name and Config are required fields. +// Config is a required field. // // Note that only a subset of the hook fields are used and hook must // not be nil. @@ -111,7 +108,6 @@ func (s *RepositoriesService) CreateHook(ctx context.Context, owner, repo string u := fmt.Sprintf("repos/%v/%v/hooks", owner, repo) hookReq := &createHookRequest{ - Name: hook.Name, Events: hook.Events, Active: hook.Active, Config: hook.Config, diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go index 41505e769..d6002f583 100644 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -38,6 +38,18 @@ const ( ProxyExecModeScript ProxyExecMode = "script" ) +// UpstreamDestType is the type of upstream discovery mechanism. +type UpstreamDestType string + +const ( + // UpstreamDestTypeService discovers instances via healthy service lookup. + UpstreamDestTypeService UpstreamDestType = "service" + + // UpstreamDestTypePreparedQuery discovers instances via prepared query + // execution. + UpstreamDestTypePreparedQuery UpstreamDestType = "prepared_query" +) + // AgentCheck represents a check known to the agent type AgentCheck struct { Node string @@ -59,7 +71,7 @@ type AgentWeights struct { // AgentService represents a service known to the agent type AgentService struct { - Kind ServiceKind + Kind ServiceKind `json:",omitempty"` ID string Service string Tags []string @@ -68,24 +80,40 @@ type AgentService struct { Address string Weights AgentWeights EnableTagOverride bool - CreateIndex uint64 - ModifyIndex uint64 - ProxyDestination string - Connect *AgentServiceConnect + CreateIndex uint64 `json:",omitempty"` + ModifyIndex uint64 `json:",omitempty"` + ContentHash string `json:",omitempty"` + // DEPRECATED (ProxyDestination) - remove this field + ProxyDestination string `json:",omitempty"` + Proxy *AgentServiceConnectProxyConfig `json:",omitempty"` + Connect *AgentServiceConnect `json:",omitempty"` } // AgentServiceConnect represents the Connect configuration of a service. type AgentServiceConnect struct { - Native bool - Proxy *AgentServiceConnectProxy + Native bool `json:",omitempty"` + Proxy *AgentServiceConnectProxy `json:",omitempty"` + SidecarService *AgentServiceRegistration `json:",omitempty"` } // AgentServiceConnectProxy represents the Connect Proxy configuration of a // service. type AgentServiceConnectProxy struct { - ExecMode ProxyExecMode - Command []string - Config map[string]interface{} + ExecMode ProxyExecMode `json:",omitempty"` + Command []string `json:",omitempty"` + Config map[string]interface{} `json:",omitempty"` + Upstreams []Upstream `json:",omitempty"` +} + +// AgentServiceConnectProxyConfig is the proxy configuration in a connect-proxy +// ServiceDefinition or response. +type AgentServiceConnectProxyConfig struct { + DestinationServiceName string + DestinationServiceID string `json:",omitempty"` + LocalServiceAddress string `json:",omitempty"` + LocalServicePort int `json:",omitempty"` + Config map[string]interface{} `json:",omitempty"` + Upstreams []Upstream } // AgentMember represents a cluster member known to the agent @@ -129,8 +157,10 @@ type AgentServiceRegistration struct { Weights *AgentWeights `json:",omitempty"` Check *AgentServiceCheck Checks AgentServiceChecks - ProxyDestination string `json:",omitempty"` - Connect *AgentServiceConnect `json:",omitempty"` + // DEPRECATED (ProxyDestination) - remove this field + ProxyDestination string `json:",omitempty"` + Proxy *AgentServiceConnectProxyConfig `json:",omitempty"` + Connect *AgentServiceConnect `json:",omitempty"` } // AgentCheckRegistration is used to register a new check @@ -161,6 +191,8 @@ type AgentServiceCheck struct { TLSSkipVerify bool `json:",omitempty"` GRPC string `json:",omitempty"` GRPCUseTLS bool `json:",omitempty"` + AliasNode string `json:",omitempty"` + AliasService string `json:",omitempty"` // In Consul 0.7 and later, checks that are associated with a service // may also contain this optional DeregisterCriticalServiceAfter field, @@ -233,9 +265,23 @@ type ConnectProxyConfig struct { TargetServiceID string TargetServiceName string ContentHash string - ExecMode ProxyExecMode - Command []string - Config map[string]interface{} + // DEPRECATED(managed-proxies) - this struct is re-used for sidecar configs + // but they don't need ExecMode or Command + ExecMode ProxyExecMode `json:",omitempty"` + Command []string `json:",omitempty"` + Config map[string]interface{} + Upstreams []Upstream +} + +// Upstream is the response structure for a proxy upstream configuration. +type Upstream struct { + DestinationType UpstreamDestType `json:",omitempty"` + DestinationNamespace string `json:",omitempty"` + DestinationName string + Datacenter string `json:",omitempty"` + LocalBindAddress string `json:",omitempty"` + LocalBindPort int `json:",omitempty"` + Config map[string]interface{} `json:",omitempty"` } // Agent can be used to query the Agent endpoints @@ -343,6 +389,33 @@ func (a *Agent) Services() (map[string]*AgentService, error) { return out, nil } +// Service returns a locally registered service instance and allows for +// hash-based blocking. +// +// Note that this uses an unconventional blocking mechanism since it's +// agent-local state. That means there is no persistent raft index so we block +// based on object hash instead. +func (a *Agent) Service(serviceID string, q *QueryOptions) (*AgentService, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/agent/service/"+serviceID) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *AgentService + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return out, qm, nil +} + // Members returns the known gossip members. The WAN // flag can be used to query a server for WAN members. func (a *Agent) Members(wan bool) ([]*AgentMember, error) { diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index 649238302..6efd9d4b0 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -61,6 +61,12 @@ const ( // HTTPSSLVerifyEnvName defines an environment variable name which sets // whether or not to disable certificate checking. HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY" + + // GRPCAddrEnvName defines an environment variable name which sets the gRPC + // address for consul connect envoy. Note this isn't actually used by the api + // client in this package but is defined here for consistency with all the + // other ENV names we use. + GRPCAddrEnvName = "CONSUL_GRPC_ADDR" ) // QueryOptions are used to parameterize a query @@ -78,6 +84,27 @@ type QueryOptions struct { // read. RequireConsistent bool + // UseCache requests that the agent cache results locally. See + // https://www.consul.io/api/index.html#agent-caching for more details on the + // semantics. + UseCache bool + + // MaxAge limits how old a cached value will be returned if UseCache is true. + // If there is a cached response that is older than the MaxAge, it is treated + // as a cache miss and a new fetch invoked. If the fetch fails, the error is + // returned. Clients that wish to allow for stale results on error can set + // StaleIfError to a longer duration to change this behaviour. It is ignored + // if the endpoint supports background refresh caching. See + // https://www.consul.io/api/index.html#agent-caching for more details. + MaxAge time.Duration + + // StaleIfError specifies how stale the client will accept a cached response + // if the servers are unavailable to fetch a fresh one. Only makes sense when + // UseCache is true and MaxAge is set to a lower, non-zero value. It is + // ignored if the endpoint supports background refresh caching. See + // https://www.consul.io/api/index.html#agent-caching for more details. + StaleIfError time.Duration + // WaitIndex is used to enable a blocking query. Waits // until the timeout or the next index is reached WaitIndex uint64 @@ -196,6 +223,13 @@ type QueryMeta struct { // Is address translation enabled for HTTP responses on this agent AddressTranslationEnabled bool + + // CacheHit is true if the result was served from agent-local cache. + CacheHit bool + + // CacheAge is set if request was ?cached and indicates how stale the cached + // response is. + CacheAge time.Duration } // WriteMeta is used to return meta data about a write @@ -591,6 +625,20 @@ func (r *request) setQueryOptions(q *QueryOptions) { if q.Connect { r.params.Set("connect", "true") } + if q.UseCache && !q.RequireConsistent { + r.params.Set("cached", "") + + cc := []string{} + if q.MaxAge > 0 { + cc = append(cc, fmt.Sprintf("max-age=%.0f", q.MaxAge.Seconds())) + } + if q.StaleIfError > 0 { + cc = append(cc, fmt.Sprintf("stale-if-error=%.0f", q.StaleIfError.Seconds())) + } + if len(cc) > 0 { + r.header.Set("Cache-Control", strings.Join(cc, ", ")) + } + } r.ctx = q.ctx } @@ -802,6 +850,18 @@ func parseQueryMeta(resp *http.Response, q *QueryMeta) error { q.AddressTranslationEnabled = false } + // Parse Cache info + if cacheStr := header.Get("X-Cache"); cacheStr != "" { + q.CacheHit = strings.EqualFold(cacheStr, "HIT") + } + if ageStr := header.Get("Age"); ageStr != "" { + age, err := strconv.ParseUint(ageStr, 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse Age Header: %v", err) + } + q.CacheAge = time.Duration(age) * time.Second + } + return nil } diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go index 6cb745c36..3ca89a472 100644 --- a/vendor/github.com/hashicorp/consul/api/catalog.go +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -31,8 +31,11 @@ type CatalogService struct { ServicePort int ServiceWeights Weights ServiceEnableTagOverride bool - CreateIndex uint64 - ModifyIndex uint64 + // DEPRECATED (ProxyDestination) - remove the next comment! + // We forgot to ever add ServiceProxyDestination here so no need to deprecate! + ServiceProxy *AgentServiceConnectProxyConfig + CreateIndex uint64 + ModifyIndex uint64 } type CatalogNode struct { @@ -162,23 +165,43 @@ func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, er // Service is used to query catalog entries for a given service func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { - return c.service(service, tag, q, false) + var tags []string + if tag != "" { + tags = []string{tag} + } + return c.service(service, tags, q, false) +} + +// Supports multiple tags for filtering +func (c *Catalog) ServiceMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + return c.service(service, tags, q, false) } // Connect is used to query catalog entries for a given Connect-enabled service func (c *Catalog) Connect(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { - return c.service(service, tag, q, true) + var tags []string + if tag != "" { + tags = []string{tag} + } + return c.service(service, tags, q, true) } -func (c *Catalog) service(service, tag string, q *QueryOptions, connect bool) ([]*CatalogService, *QueryMeta, error) { +// Supports multiple tags for filtering +func (c *Catalog) ConnectMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + return c.service(service, tags, q, true) +} + +func (c *Catalog) service(service string, tags []string, q *QueryOptions, connect bool) ([]*CatalogService, *QueryMeta, error) { path := "/v1/catalog/service/" + service if connect { path = "/v1/catalog/connect/" + service } r := c.c.newRequest("GET", path) r.setQueryOptions(q) - if tag != "" { - r.params.Set("tag", tag) + if len(tags) > 0 { + for _, tag := range tags { + r.params.Add("tag", tag) + } } rtt, resp, err := requireOK(c.c.doRequest(r)) if err != nil { diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go index 1835da559..eae6a01a8 100644 --- a/vendor/github.com/hashicorp/consul/api/health.go +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -159,7 +159,15 @@ func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMe // for a given service. It can optionally do server-side filtering on a tag // or nodes with passing health checks only. func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { - return h.service(service, tag, passingOnly, q, false) + var tags []string + if tag != "" { + tags = []string{tag} + } + return h.service(service, tags, passingOnly, q, false) +} + +func (h *Health) ServiceMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + return h.service(service, tags, passingOnly, q, false) } // Connect is equivalent to Service except that it will only return services @@ -168,18 +176,28 @@ func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) // passingOnly is true only instances where both the service and any proxy are // healthy will be returned. func (h *Health) Connect(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { - return h.service(service, tag, passingOnly, q, true) + var tags []string + if tag != "" { + tags = []string{tag} + } + return h.service(service, tags, passingOnly, q, true) } -func (h *Health) service(service, tag string, passingOnly bool, q *QueryOptions, connect bool) ([]*ServiceEntry, *QueryMeta, error) { +func (h *Health) ConnectMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + return h.service(service, tags, passingOnly, q, true) +} + +func (h *Health) service(service string, tags []string, passingOnly bool, q *QueryOptions, connect bool) ([]*ServiceEntry, *QueryMeta, error) { path := "/v1/health/service/" + service if connect { path = "/v1/health/connect/" + service } r := h.c.newRequest("GET", path) r.setQueryOptions(q) - if tag != "" { - r.params.Set("tag", tag) + if len(tags) > 0 { + for _, tag := range tags { + r.params.Add("tag", tag) + } } if passingOnly { r.params.Set(HealthPassing, "1") diff --git a/vendor/github.com/hashicorp/consul/version/version.go b/vendor/github.com/hashicorp/consul/version/version.go index 5800c96e5..ca3907593 100644 --- a/vendor/github.com/hashicorp/consul/version/version.go +++ b/vendor/github.com/hashicorp/consul/version/version.go @@ -15,7 +15,7 @@ var ( // // Version must conform to the format expected by github.com/hashicorp/go-version // for tests to work. - Version = "1.2.3" + Version = "1.3.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release diff --git a/vendor/github.com/hashicorp/go-plugin/go.mod b/vendor/github.com/hashicorp/go-plugin/go.mod new file mode 100644 index 000000000..20112852c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/go.mod @@ -0,0 +1,13 @@ +module github.com/hashicorp/go-plugin + +require ( + github.com/golang/protobuf v1.2.0 + github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd + github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb + github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 + github.com/oklog/run v1.0.0 + golang.org/x/net v0.0.0-20180826012351-8a410e7b638d + golang.org/x/text v0.3.0 // indirect + google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 // indirect + google.golang.org/grpc v1.14.0 +) diff --git a/vendor/github.com/hashicorp/go-plugin/go.sum b/vendor/github.com/hashicorp/go-plugin/go.sum new file mode 100644 index 000000000..9ae0bec8e --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/go.sum @@ -0,0 +1,18 @@ +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd h1:rNuUHR+CvK1IS89MMtcF0EpcVMZtjKfPRp4MEmt/aTs= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= diff --git a/vendor/github.com/hashicorp/nomad/api/allocations.go b/vendor/github.com/hashicorp/nomad/api/allocations.go index a3a830481..371e4feeb 100644 --- a/vendor/github.com/hashicorp/nomad/api/allocations.go +++ b/vendor/github.com/hashicorp/nomad/api/allocations.go @@ -77,6 +77,7 @@ type Allocation struct { TaskGroup string Resources *Resources TaskResources map[string]*Resources + AllocatedResources *AllocatedResources Services map[string]string Metrics *AllocationMetric DesiredStatus string @@ -158,6 +159,29 @@ type AllocDeploymentStatus struct { ModifyIndex uint64 } +type AllocatedResources struct { + Tasks map[string]*AllocatedTaskResources + Shared AllocatedSharedResources +} + +type AllocatedTaskResources struct { + Cpu AllocatedCpuResources + Memory AllocatedMemoryResources + Networks []*NetworkResource +} + +type AllocatedSharedResources struct { + DiskMB uint64 +} + +type AllocatedCpuResources struct { + CpuShares uint64 +} + +type AllocatedMemoryResources struct { + MemoryMB uint64 +} + // AllocIndexSort reverse sorts allocs by CreateIndex. type AllocIndexSort []*AllocationListStub diff --git a/vendor/github.com/hashicorp/nomad/api/nodes.go b/vendor/github.com/hashicorp/nomad/api/nodes.go index 6184f6cd7..beb109c2e 100644 --- a/vendor/github.com/hashicorp/nomad/api/nodes.go +++ b/vendor/github.com/hashicorp/nomad/api/nodes.go @@ -446,6 +446,8 @@ type Node struct { Attributes map[string]string Resources *Resources Reserved *Resources + NodeResources *NodeResources + ReservedResources *NodeReservedResources Links map[string]string Meta map[string]string NodeClass string @@ -461,6 +463,48 @@ type Node struct { ModifyIndex uint64 } +type NodeResources struct { + Cpu NodeCpuResources + Memory NodeMemoryResources + Disk NodeDiskResources + Networks []*NetworkResource +} + +type NodeCpuResources struct { + TotalShares uint64 +} + +type NodeMemoryResources struct { + MemoryMB uint64 +} + +type NodeDiskResources struct { + DiskMB uint64 +} + +type NodeReservedResources struct { + Cpu NodeReservedCpuResources + Memory NodeReservedMemoryResources + Disk NodeReservedDiskResources + Networks NodeReservedNetworkResources +} + +type NodeReservedCpuResources struct { + TotalShares uint64 +} + +type NodeReservedMemoryResources struct { + MemoryMB uint64 +} + +type NodeReservedDiskResources struct { + DiskMB uint64 +} + +type NodeReservedNetworkResources struct { + ReservedHostPorts string +} + // DrainStrategy describes a Node's drain behavior. type DrainStrategy struct { // DrainSpec is the user declared drain specification diff --git a/vendor/github.com/hashicorp/nomad/api/resources.go b/vendor/github.com/hashicorp/nomad/api/resources.go index 1abcf209d..ddcd949e3 100644 --- a/vendor/github.com/hashicorp/nomad/api/resources.go +++ b/vendor/github.com/hashicorp/nomad/api/resources.go @@ -10,6 +10,7 @@ type Resources struct { DiskMB *int `mapstructure:"disk"` IOPS *int Networks []*NetworkResource + Devices []*RequestedDevice } // Canonicalize will supply missing values in the cases @@ -28,6 +29,9 @@ func (r *Resources) Canonicalize() { for _, n := range r.Networks { n.Canonicalize() } + for _, d := range r.Devices { + d.Canonicalize() + } } // DefaultResources is a small resources object that contains the @@ -75,6 +79,9 @@ func (r *Resources) Merge(other *Resources) { if len(other.Networks) != 0 { r.Networks = other.Networks } + if len(other.Devices) != 0 { + r.Devices = other.Devices + } } type Port struct { @@ -98,3 +105,34 @@ func (n *NetworkResource) Canonicalize() { n.MBits = helper.IntToPtr(10) } } + +// RequestedDevice is used to request a device for a task. +type RequestedDevice struct { + // Name is the request name. The possible values are as follows: + // * : A single value only specifies the type of request. + // * /: A single slash delimiter assumes the vendor and type of device is specified. + // * //: Two slash delimiters assume vendor, type and specific model are specified. + // + // Examples are as follows: + // * "gpu" + // * "nvidia/gpu" + // * "nvidia/gpu/GTX2080Ti" + Name string + + // Count is the number of requested devices + Count *uint64 + + // Constraints are a set of constraints to apply when selecting the device + // to use. + Constraints []*Constraint + + // Affinities are a set of affinites to apply when selecting the device + // to use. + Affinities []*Affinity +} + +func (d *RequestedDevice) Canonicalize() { + if d.Count == nil { + d.Count = helper.Uint64ToPtr(1) + } +} diff --git a/vendor/github.com/hashicorp/nomad/helper/funcs.go b/vendor/github.com/hashicorp/nomad/helper/funcs.go index 49b300c24..083ab865b 100644 --- a/vendor/github.com/hashicorp/nomad/helper/funcs.go +++ b/vendor/github.com/hashicorp/nomad/helper/funcs.go @@ -57,11 +57,16 @@ func Int64ToPtr(i int64) *int64 { return &i } -// UintToPtr returns the pointer to an uint +// Uint64ToPtr returns the pointer to an uint64 func Uint64ToPtr(u uint64) *uint64 { return &u } +// UintToPtr returns the pointer to an uint +func UintToPtr(u uint) *uint { + return &u +} + // StringToPtr returns the pointer to a string func StringToPtr(str string) *string { return &str @@ -72,6 +77,11 @@ func TimeToPtr(t time.Duration) *time.Duration { return &t } +// Float64ToPtr returns the pointer to an float64 +func Float64ToPtr(f float64) *float64 { + return &f +} + func IntMin(a, b int) int { if a < b { return a diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go b/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go index 2334af661..a5bf9a1bd 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go @@ -840,6 +840,11 @@ func (r *Resources) Diff(other *Resources, contextual bool) *ObjectDiff { diff.Objects = append(diff.Objects, nDiffs...) } + // Requested Devices diff + if nDiffs := requestedDevicesDiffs(r.Devices, other.Devices, contextual); nDiffs != nil { + diff.Objects = append(diff.Objects, nDiffs...) + } + return diff } @@ -975,6 +980,67 @@ func portDiffs(old, new []Port, dynamic bool, contextual bool) []*ObjectDiff { } +// Diff returns a diff of two requested devices. If contextual diff is enabled, +// non-changed fields will still be returned. +func (r *RequestedDevice) Diff(other *RequestedDevice, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Device"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(r, other) { + return nil + } else if r == nil { + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(other, nil, true) + } else if other == nil { + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(r, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(r, nil, true) + newPrimitiveFlat = flatmap.Flatten(other, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + return diff +} + +// requestedDevicesDiffs diffs a set of RequestedDevices. If contextual diff is enabled, +// non-changed fields will still be returned. +func requestedDevicesDiffs(old, new []*RequestedDevice, contextual bool) []*ObjectDiff { + makeSet := func(devices []*RequestedDevice) map[string]*RequestedDevice { + deviceMap := make(map[string]*RequestedDevice, len(devices)) + for _, d := range devices { + deviceMap[d.Name] = d + } + + return deviceMap + } + + oldSet := makeSet(old) + newSet := makeSet(new) + + var diffs []*ObjectDiff + for k, oldV := range oldSet { + newV := newSet[k] + if diff := oldV.Diff(newV, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + for k, newV := range newSet { + if oldV, ok := oldSet[k]; !ok { + if diff := oldV.Diff(newV, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(ObjectDiffs(diffs)) + return diffs + +} + // configDiff returns the diff of two Task Config objects. If contextual diff is // enabled, all fields will be returned, even if no diff occurred. func configDiff(old, new map[string]interface{}, contextual bool) *ObjectDiff { diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go b/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go index 5ace4bc8c..7493596f8 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go @@ -7,6 +7,7 @@ import ( "fmt" "math" "sort" + "strconv" "strings" "golang.org/x/crypto/blake2b" @@ -98,16 +99,12 @@ func FilterTerminalAllocs(allocs []*Allocation) ([]*Allocation, map[string]*Allo // The netIdx can optionally be provided if its already been computed. // If the netIdx is provided, it is assumed that the client has already // ensured there are no collisions. -func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, string, *Resources, error) { +func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, string, *ComparableResources, error) { // Compute the utilization from zero - used := new(Resources) + used := new(ComparableResources) // Add the reserved resources of the node - if node.Reserved != nil { - if err := used.Add(node.Reserved); err != nil { - return false, "", nil, err - } - } + used.Add(node.ComparableReservedResources()) // For each alloc, add the resources for _, alloc := range allocs { @@ -116,32 +113,12 @@ func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, st continue } - if alloc.Resources != nil { - if err := used.Add(alloc.Resources); err != nil { - return false, "", nil, err - } - } else if alloc.TaskResources != nil { - - // Adding the shared resource asks for the allocation to the used - // resources - if err := used.Add(alloc.SharedResources); err != nil { - return false, "", nil, err - } - // Allocations within the plan have the combined resources stripped - // to save space, so sum up the individual task resources. - for _, taskResource := range alloc.TaskResources { - if err := used.Add(taskResource); err != nil { - return false, "", nil, err - } - } - } else { - return false, "", nil, fmt.Errorf("allocation %q has no resources set", alloc.ID) - } + used.Add(alloc.ComparableResources()) } // Check that the node resources are a super set of those // that are being allocated - if superset, dimension := node.Resources.Superset(used); !superset { + if superset, dimension := node.ComparableResources().Superset(used); !superset { return false, dimension, used, nil } @@ -166,20 +143,22 @@ func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, st // ScoreFit is used to score the fit based on the Google work published here: // http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt // This is equivalent to their BestFit v3 -func ScoreFit(node *Node, util *Resources) float64 { +func ScoreFit(node *Node, util *ComparableResources) float64 { + // COMPAT(0.11): Remove in 0.11 + reserved := node.ComparableReservedResources() + res := node.ComparableResources() + // Determine the node availability - nodeCpu := float64(node.Resources.CPU) - if node.Reserved != nil { - nodeCpu -= float64(node.Reserved.CPU) - } - nodeMem := float64(node.Resources.MemoryMB) - if node.Reserved != nil { - nodeMem -= float64(node.Reserved.MemoryMB) + nodeCpu := float64(res.Flattened.Cpu.CpuShares) + nodeMem := float64(res.Flattened.Memory.MemoryMB) + if reserved != nil { + nodeCpu -= float64(reserved.Flattened.Cpu.CpuShares) + nodeMem -= float64(reserved.Flattened.Memory.MemoryMB) } // Compute the free percentage - freePctCpu := 1 - (float64(util.CPU) / nodeCpu) - freePctRam := 1 - (float64(util.MemoryMB) / nodeMem) + freePctCpu := 1 - (float64(util.Flattened.Cpu.CpuShares) / nodeCpu) + freePctRam := 1 - (float64(util.Flattened.Memory.MemoryMB) / nodeMem) // Total will be "maximized" the smaller the value is. // At 100% utilization, the total is 2, while at 0% util it is 20. @@ -378,3 +357,67 @@ func CompareMigrateToken(allocID, nodeSecretID, otherMigrateToken string) bool { } return subtle.ConstantTimeCompare(h.Sum(nil), otherBytes) == 1 } + +// ParsePortRanges parses the passed port range string and returns a list of the +// ports. The specification is a comma separated list of either port numbers or +// port ranges. A port number is a single integer and a port range is two +// integers separated by a hyphen. As an example the following spec would +// convert to: ParsePortRanges("10,12-14,16") -> []uint64{10, 12, 13, 14, 16} +func ParsePortRanges(spec string) ([]uint64, error) { + parts := strings.Split(spec, ",") + + // Hot path the empty case + if len(parts) == 1 && parts[0] == "" { + return nil, nil + } + + ports := make(map[uint64]struct{}) + for _, part := range parts { + part = strings.TrimSpace(part) + rangeParts := strings.Split(part, "-") + l := len(rangeParts) + switch l { + case 1: + if val := rangeParts[0]; val == "" { + return nil, fmt.Errorf("can't specify empty port") + } else { + port, err := strconv.ParseUint(val, 10, 0) + if err != nil { + return nil, err + } + ports[port] = struct{}{} + } + case 2: + // We are parsing a range + start, err := strconv.ParseUint(rangeParts[0], 10, 0) + if err != nil { + return nil, err + } + + end, err := strconv.ParseUint(rangeParts[1], 10, 0) + if err != nil { + return nil, err + } + + if end < start { + return nil, fmt.Errorf("invalid range: starting value (%v) less than ending (%v) value", end, start) + } + + for i := start; i <= end; i++ { + ports[i] = struct{}{} + } + default: + return nil, fmt.Errorf("can only parse single port numbers or port ranges (ex. 80,100-120,150)") + } + } + + var results []uint64 + for port := range ports { + results = append(results, port) + } + + sort.Slice(results, func(i, j int) bool { + return results[i] < results[j] + }) + return results, nil +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/network.go b/vendor/github.com/hashicorp/nomad/nomad/structs/network.go index 3f0ebff4f..aaa81b64e 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/network.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/network.go @@ -70,22 +70,36 @@ func (idx *NetworkIndex) Overcommitted() bool { // SetNode is used to setup the available network resources. Returns // true if there is a collision func (idx *NetworkIndex) SetNode(node *Node) (collide bool) { + + // COMPAT(0.11): Remove in 0.11 + // Grab the network resources, handling both new and old + var networks []*NetworkResource + if node.NodeResources != nil && len(node.NodeResources.Networks) != 0 { + networks = node.NodeResources.Networks + } else if node.Resources != nil { + networks = node.Resources.Networks + } + // Add the available CIDR blocks - for _, n := range node.Resources.Networks { + for _, n := range networks { if n.Device != "" { idx.AvailNetworks = append(idx.AvailNetworks, n) idx.AvailBandwidth[n.Device] = n.MBits } } - // Add the reserved resources - if r := node.Reserved; r != nil { - for _, n := range r.Networks { + // COMPAT(0.11): Remove in 0.11 + // Handle reserving ports, handling both new and old + if node.ReservedResources != nil && node.ReservedResources.Networks.ReservedHostPorts != "" { + collide = idx.AddReservedPortRange(node.ReservedResources.Networks.ReservedHostPorts) + } else if node.Reserved != nil { + for _, n := range node.Reserved.Networks { if idx.AddReserved(n) { collide = true } } } + return } @@ -93,13 +107,31 @@ func (idx *NetworkIndex) SetNode(node *Node) (collide bool) { // true if there is a collision func (idx *NetworkIndex) AddAllocs(allocs []*Allocation) (collide bool) { for _, alloc := range allocs { - for _, task := range alloc.TaskResources { - if len(task.Networks) == 0 { - continue + // Do not consider the resource impact of terminal allocations + if alloc.TerminalStatus() { + continue + } + + if alloc.AllocatedResources != nil { + for _, task := range alloc.AllocatedResources.Tasks { + if len(task.Networks) == 0 { + continue + } + n := task.Networks[0] + if idx.AddReserved(n) { + collide = true + } } - n := task.Networks[0] - if idx.AddReserved(n) { - collide = true + } else { + // COMPAT(0.11): Remove in 0.11 + for _, task := range alloc.TaskResources { + if len(task.Networks) == 0 { + continue + } + n := task.Networks[0] + if idx.AddReserved(n) { + collide = true + } } } } @@ -142,6 +174,49 @@ func (idx *NetworkIndex) AddReserved(n *NetworkResource) (collide bool) { return } +// AddReservedPortRange marks the ports given as reserved on all network +// interfaces. The port format is comma delimited, with spans given as n1-n2 +// (80,100-200,205) +func (idx *NetworkIndex) AddReservedPortRange(ports string) (collide bool) { + // Convert the ports into a slice of ints + resPorts, err := ParsePortRanges(ports) + if err != nil { + return + } + + // Ensure we create a bitmap for each available network + for _, n := range idx.AvailNetworks { + used := idx.UsedPorts[n.IP] + if used == nil { + // Try to get a bitmap from the pool, else create + raw := bitmapPool.Get() + if raw != nil { + used = raw.(Bitmap) + used.Clear() + } else { + used, _ = NewBitmap(maxValidPort) + } + idx.UsedPorts[n.IP] = used + } + } + + for _, used := range idx.UsedPorts { + for _, port := range resPorts { + // Guard against invalid port + if port < 0 || port >= maxValidPort { + return true + } + if used.Check(uint(port)) { + collide = true + } else { + used.Set(uint(port)) + } + } + } + + return +} + // yieldIP is used to iteratively invoke the callback with // an available IP func (idx *NetworkIndex) yieldIP(cb func(net *NetworkResource, ip net.IP) bool) { diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go index e4980edd2..0f9fb0ee3 100644 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go @@ -1425,6 +1425,13 @@ type Node struct { // "docker.runtime=1.8.3" Attributes map[string]string + // NodeResources captures the available resources on the client. + NodeResources *NodeResources + + // ReservedResources captures the set resources on the client that are + // reserved from scheduling. + ReservedResources *NodeReservedResources + // Resources is the available resources on the client. // For example 'cpu=2' 'memory=2048' Resources *Resources @@ -1522,6 +1529,8 @@ func (n *Node) Copy() *Node { nn.Attributes = helper.CopyMapStringString(nn.Attributes) nn.Resources = nn.Resources.Copy() nn.Reserved = nn.Reserved.Copy() + nn.NodeResources = nn.NodeResources.Copy() + nn.ReservedResources = nn.ReservedResources.Copy() nn.Links = helper.CopyMapStringString(nn.Links) nn.Meta = helper.CopyMapStringString(nn.Meta) nn.Events = copyNodeEvents(n.Events) @@ -1569,6 +1578,64 @@ func (n *Node) TerminalStatus() bool { } } +// COMPAT(0.11): Remove in 0.11 +// ComparableReservedResources returns the reserved resouces on the node +// handling upgrade paths. Reserved networks must be handled separately. After +// 0.11 calls to this should be replaced with: +// node.ReservedResources.Comparable() +func (n *Node) ComparableReservedResources() *ComparableResources { + // See if we can no-op + if n.Reserved == nil && n.ReservedResources == nil { + return nil + } + + // Node already has 0.9+ behavior + if n.ReservedResources != nil { + return n.ReservedResources.Comparable() + } + + // Upgrade path + return &ComparableResources{ + Flattened: AllocatedTaskResources{ + Cpu: AllocatedCpuResources{ + CpuShares: uint64(n.Reserved.CPU), + }, + Memory: AllocatedMemoryResources{ + MemoryMB: uint64(n.Reserved.MemoryMB), + }, + }, + Shared: AllocatedSharedResources{ + DiskMB: uint64(n.Reserved.DiskMB), + }, + } +} + +// COMPAT(0.11): Remove in 0.11 +// ComparableResources returns the resouces on the node +// handling upgrade paths. Networking must be handled separately. After 0.11 +// calls to this should be replaced with: node.NodeResources.Comparable() +func (n *Node) ComparableResources() *ComparableResources { + // Node already has 0.9+ behavior + if n.NodeResources != nil { + return n.NodeResources.Comparable() + } + + // Upgrade path + return &ComparableResources{ + Flattened: AllocatedTaskResources{ + Cpu: AllocatedCpuResources{ + CpuShares: uint64(n.Resources.CPU), + }, + Memory: AllocatedMemoryResources{ + MemoryMB: uint64(n.Resources.MemoryMB), + }, + }, + Shared: AllocatedSharedResources{ + DiskMB: uint64(n.Resources.DiskMB), + }, + } +} + // Stub returns a summarized version of the node func (n *Node) Stub() *NodeListStub { @@ -1609,26 +1676,6 @@ type NodeListStub struct { ModifyIndex uint64 } -// Networks defined for a task on the Resources struct. -type Networks []*NetworkResource - -// Port assignment and IP for the given label or empty values. -func (ns Networks) Port(label string) (string, int) { - for _, n := range ns { - for _, p := range n.ReservedPorts { - if p.Label == label { - return n.IP, p.Value - } - } - for _, p := range n.DynamicPorts { - if p.Label == label { - return n.IP, p.Value - } - } - } - return "", 0 -} - // Resources is used to define the resources available // on a client type Resources struct { @@ -1637,6 +1684,7 @@ type Resources struct { DiskMB int IOPS int Networks Networks + Devices []*RequestedDevice } const ( @@ -1690,6 +1738,9 @@ func (r *Resources) Merge(other *Resources) { if len(other.Networks) != 0 { r.Networks = other.Networks } + if len(other.Devices) != 0 { + r.Devices = other.Devices + } } func (r *Resources) Canonicalize() { @@ -1698,6 +1749,9 @@ func (r *Resources) Canonicalize() { if len(r.Networks) == 0 { r.Networks = nil } + if len(r.Devices) == 0 { + r.Devices = nil + } for _, n := range r.Networks { n.Canonicalize() @@ -1735,6 +1789,8 @@ func (r *Resources) Copy() *Resources { } newR := new(Resources) *newR = *r + + // Copy the network objects if r.Networks != nil { n := len(r.Networks) newR.Networks = make([]*NetworkResource, n) @@ -1742,17 +1798,22 @@ func (r *Resources) Copy() *Resources { newR.Networks[i] = r.Networks[i].Copy() } } + + // Copy the devices + if r.Devices != nil { + n := len(r.Devices) + newR.Devices = make([]*RequestedDevice, n) + for i := 0; i < n; i++ { + newR.Devices[i] = r.Devices[i].Copy() + } + } + return newR } // NetIndex finds the matching net index using device name func (r *Resources) NetIndex(n *NetworkResource) int { - for idx, net := range r.Networks { - if net.Device == n.Device { - return idx - } - } - return -1 + return r.Networks.NetIndex(n) } // Superset checks if one set of resources is a superset @@ -1927,6 +1988,511 @@ func (n *NetworkResource) PortLabels() map[string]int { return labelValues } +// Networks defined for a task on the Resources struct. +type Networks []*NetworkResource + +// Port assignment and IP for the given label or empty values. +func (ns Networks) Port(label string) (string, int) { + for _, n := range ns { + for _, p := range n.ReservedPorts { + if p.Label == label { + return n.IP, p.Value + } + } + for _, p := range n.DynamicPorts { + if p.Label == label { + return n.IP, p.Value + } + } + } + return "", 0 +} + +func (ns Networks) NetIndex(n *NetworkResource) int { + for idx, net := range ns { + if net.Device == n.Device { + return idx + } + } + return -1 +} + +// RequestedDevice is used to request a device for a task. +type RequestedDevice struct { + // Name is the request name. The possible values are as follows: + // * : A single value only specifies the type of request. + // * /: A single slash delimiter assumes the vendor and type of device is specified. + // * //: Two slash delimiters assume vendor, type and specific model are specified. + // + // Examples are as follows: + // * "gpu" + // * "nvidia/gpu" + // * "nvidia/gpu/GTX2080Ti" + Name string + + // Count is the number of requested devices + Count uint64 + + // TODO validate + // Constraints are a set of constraints to apply when selecting the device + // to use. + Constraints []*Constraint + + // Affinities are a set of affinites to apply when selecting the device + // to use. + Affinities []*Affinity +} + +func (r *RequestedDevice) Copy() *RequestedDevice { + if r == nil { + return nil + } + + nr := *r + nr.Constraints = CopySliceConstraints(nr.Constraints) + nr.Affinities = CopySliceAffinities(nr.Affinities) + + return &nr +} + +// NodeResources is used to define the resources available on a client node. +type NodeResources struct { + Cpu NodeCpuResources + Memory NodeMemoryResources + Disk NodeDiskResources + Networks Networks +} + +func (n *NodeResources) Copy() *NodeResources { + if n == nil { + return nil + } + newN := new(NodeResources) + *newN = *n + if n.Networks != nil { + networks := len(n.Networks) + newN.Networks = make([]*NetworkResource, networks) + for i := 0; i < networks; i++ { + newN.Networks[i] = n.Networks[i].Copy() + } + } + return newN +} + +// Comparable returns a comparable version of the nodes resources. This +// conversion can be lossy so care must be taken when using it. +func (n *NodeResources) Comparable() *ComparableResources { + if n == nil { + return nil + } + + c := &ComparableResources{ + Flattened: AllocatedTaskResources{ + Cpu: AllocatedCpuResources{ + CpuShares: n.Cpu.CpuShares, + }, + Memory: AllocatedMemoryResources{ + MemoryMB: n.Memory.MemoryMB, + }, + Networks: n.Networks, + }, + Shared: AllocatedSharedResources{ + DiskMB: n.Disk.DiskMB, + }, + } + return c +} + +func (n *NodeResources) Merge(o *NodeResources) { + if o == nil { + return + } + + n.Cpu.Merge(&o.Cpu) + n.Memory.Merge(&o.Memory) + n.Disk.Merge(&o.Disk) + + if len(o.Networks) != 0 { + n.Networks = o.Networks + } +} + +func (n *NodeResources) Equals(o *NodeResources) bool { + if o == nil && n == nil { + return true + } else if o == nil { + return false + } else if n == nil { + return false + } + + if !n.Cpu.Equals(&o.Cpu) { + return false + } + if !n.Memory.Equals(&o.Memory) { + return false + } + if !n.Disk.Equals(&o.Disk) { + return false + } + + if len(n.Networks) != len(o.Networks) { + return false + } + for i, n := range n.Networks { + if !n.Equals(o.Networks[i]) { + return false + } + } + + return true +} + +// NodeCpuResources captures the CPU resources of the node. +type NodeCpuResources struct { + // CpuShares is the CPU shares available. This is calculated by number of + // cores multiplied by the core frequency. + CpuShares uint64 +} + +func (n *NodeCpuResources) Merge(o *NodeCpuResources) { + if o == nil { + return + } + + if o.CpuShares != 0 { + n.CpuShares = o.CpuShares + } +} + +func (n *NodeCpuResources) Equals(o *NodeCpuResources) bool { + if o == nil && n == nil { + return true + } else if o == nil { + return false + } else if n == nil { + return false + } + + if n.CpuShares != o.CpuShares { + return false + } + + return true +} + +// NodeMemoryResources captures the memory resources of the node +type NodeMemoryResources struct { + // MemoryMB is the total available memory on the node + MemoryMB uint64 +} + +func (n *NodeMemoryResources) Merge(o *NodeMemoryResources) { + if o == nil { + return + } + + if o.MemoryMB != 0 { + n.MemoryMB = o.MemoryMB + } +} + +func (n *NodeMemoryResources) Equals(o *NodeMemoryResources) bool { + if o == nil && n == nil { + return true + } else if o == nil { + return false + } else if n == nil { + return false + } + + if n.MemoryMB != o.MemoryMB { + return false + } + + return true +} + +// NodeDiskResources captures the disk resources of the node +type NodeDiskResources struct { + // DiskMB is the total available disk space on the node + DiskMB uint64 +} + +func (n *NodeDiskResources) Merge(o *NodeDiskResources) { + if o == nil { + return + } + if o.DiskMB != 0 { + n.DiskMB = o.DiskMB + } +} + +func (n *NodeDiskResources) Equals(o *NodeDiskResources) bool { + if o == nil && n == nil { + return true + } else if o == nil { + return false + } else if n == nil { + return false + } + + if n.DiskMB != o.DiskMB { + return false + } + + return true +} + +// NodeReservedResources is used to capture the resources on a client node that +// should be reserved and not made available to jobs. +type NodeReservedResources struct { + Cpu NodeReservedCpuResources + Memory NodeReservedMemoryResources + Disk NodeReservedDiskResources + Networks NodeReservedNetworkResources +} + +func (n *NodeReservedResources) Copy() *NodeReservedResources { + if n == nil { + return nil + } + newN := new(NodeReservedResources) + *newN = *n + return newN +} + +// Comparable returns a comparable version of the node's reserved resources. The +// returned resources doesn't contain any network information. This conversion +// can be lossy so care must be taken when using it. +func (n *NodeReservedResources) Comparable() *ComparableResources { + if n == nil { + return nil + } + + c := &ComparableResources{ + Flattened: AllocatedTaskResources{ + Cpu: AllocatedCpuResources{ + CpuShares: n.Cpu.CpuShares, + }, + Memory: AllocatedMemoryResources{ + MemoryMB: n.Memory.MemoryMB, + }, + }, + Shared: AllocatedSharedResources{ + DiskMB: n.Disk.DiskMB, + }, + } + return c +} + +// NodeReservedCpuResources captures the reserved CPU resources of the node. +type NodeReservedCpuResources struct { + CpuShares uint64 +} + +// NodeReservedMemoryResources captures the reserved memory resources of the node. +type NodeReservedMemoryResources struct { + MemoryMB uint64 +} + +// NodeReservedDiskResources captures the reserved disk resources of the node. +type NodeReservedDiskResources struct { + DiskMB uint64 +} + +// NodeReservedNetworkResources captures the reserved network resources of the node. +type NodeReservedNetworkResources struct { + // ReservedHostPorts is the set of ports reserved on all host network + // interfaces. Its format is a comma separate list of integers or integer + // ranges. (80,443,1000-2000,2005) + ReservedHostPorts string +} + +// ParsePortHostPorts returns the reserved host ports. +func (n *NodeReservedNetworkResources) ParseReservedHostPorts() ([]uint64, error) { + return ParsePortRanges(n.ReservedHostPorts) +} + +// AllocatedResources is the set of resources to be used by an allocation. +type AllocatedResources struct { + // Tasks is a mapping of task name to the resources for the task. + Tasks map[string]*AllocatedTaskResources + + // Shared is the set of resource that are shared by all tasks in the group. + Shared AllocatedSharedResources +} + +func (a *AllocatedResources) Copy() *AllocatedResources { + if a == nil { + return nil + } + newA := new(AllocatedResources) + *newA = *a + + if a.Tasks != nil { + tr := make(map[string]*AllocatedTaskResources, len(newA.Tasks)) + for task, resource := range newA.Tasks { + tr[task] = resource.Copy() + } + newA.Tasks = tr + } + + return newA +} + +// Comparable returns a comparable version of the allocations allocated +// resources. This conversion can be lossy so care must be taken when using it. +func (a *AllocatedResources) Comparable() *ComparableResources { + if a == nil { + return nil + } + + c := &ComparableResources{ + Shared: a.Shared, + } + for _, r := range a.Tasks { + c.Flattened.Add(r) + } + return c +} + +// OldTaskResources returns the pre-0.9.0 map of task resources +func (a *AllocatedResources) OldTaskResources() map[string]*Resources { + m := make(map[string]*Resources, len(a.Tasks)) + for name, res := range a.Tasks { + m[name] = &Resources{ + CPU: int(res.Cpu.CpuShares), + MemoryMB: int(res.Memory.MemoryMB), + Networks: res.Networks, + } + } + + return m +} + +// AllocatedTaskResources are the set of resources allocated to a task. +type AllocatedTaskResources struct { + Cpu AllocatedCpuResources + Memory AllocatedMemoryResources + Networks Networks +} + +func (a *AllocatedTaskResources) Copy() *AllocatedTaskResources { + if a == nil { + return nil + } + newA := new(AllocatedTaskResources) + *newA = *a + if a.Networks != nil { + n := len(a.Networks) + newA.Networks = make([]*NetworkResource, n) + for i := 0; i < n; i++ { + newA.Networks[i] = a.Networks[i].Copy() + } + } + return newA +} + +// NetIndex finds the matching net index using device name +func (a *AllocatedTaskResources) NetIndex(n *NetworkResource) int { + return a.Networks.NetIndex(n) +} + +func (a *AllocatedTaskResources) Add(delta *AllocatedTaskResources) { + if delta == nil { + return + } + + a.Cpu.Add(&delta.Cpu) + a.Memory.Add(&delta.Memory) + + for _, n := range delta.Networks { + // Find the matching interface by IP or CIDR + idx := a.NetIndex(n) + if idx == -1 { + a.Networks = append(a.Networks, n.Copy()) + } else { + a.Networks[idx].Add(n) + } + } +} + +// AllocatedSharedResources are the set of resources allocated to a task group. +type AllocatedSharedResources struct { + DiskMB uint64 +} + +func (a *AllocatedSharedResources) Add(delta *AllocatedSharedResources) { + if delta == nil { + return + } + + a.DiskMB += delta.DiskMB +} + +// AllocatedCpuResources captures the allocated CPU resources. +type AllocatedCpuResources struct { + CpuShares uint64 +} + +func (a *AllocatedCpuResources) Add(delta *AllocatedCpuResources) { + if delta == nil { + return + } + + a.CpuShares += delta.CpuShares +} + +// AllocatedMemoryResources captures the allocated memory resources. +type AllocatedMemoryResources struct { + MemoryMB uint64 +} + +func (a *AllocatedMemoryResources) Add(delta *AllocatedMemoryResources) { + if delta == nil { + return + } + + a.MemoryMB += delta.MemoryMB +} + +// ComparableResources is the set of resources allocated to a task group but +// not keyed by Task, making it easier to compare. +type ComparableResources struct { + Flattened AllocatedTaskResources + Shared AllocatedSharedResources +} + +func (c *ComparableResources) Add(delta *ComparableResources) { + if delta == nil { + return + } + + c.Flattened.Add(&delta.Flattened) + c.Shared.Add(&delta.Shared) +} + +// Superset checks if one set of resources is a superset of another. This +// ignores network resources, and the NetworkIndex should be used for that. +func (c *ComparableResources) Superset(other *ComparableResources) (bool, string) { + if c.Flattened.Cpu.CpuShares < other.Flattened.Cpu.CpuShares { + return false, "cpu" + } + if c.Flattened.Memory.MemoryMB < other.Flattened.Memory.MemoryMB { + return false, "memory" + } + if c.Shared.DiskMB < other.Shared.DiskMB { + return false, "disk" + } + return true, "" +} + +// allocated finds the matching net index using device name +func (c *ComparableResources) NetIndex(n *NetworkResource) int { + return c.Flattened.Networks.NetIndex(n) +} + const ( // JobTypeNomad is reserved for internal system tasks and is // always handled by the CoreScheduler. @@ -3427,19 +3993,6 @@ func (tg *TaskGroup) Canonicalize(job *Job) { for _, task := range tg.Tasks { task.Canonicalize(job, tg) } - - // Add up the disk resources to EphemeralDisk. This is done so that users - // are not required to move their disk attribute from resources to - // EphemeralDisk section of the job spec in Nomad 0.5 - // COMPAT 0.4.1 -> 0.5 - // Remove in 0.6 - var diskMB int - for _, task := range tg.Tasks { - diskMB += task.Resources.DiskMB - } - if diskMB > 0 { - tg.EphemeralDisk.SizeMB = diskMB - } } // Validate is used to sanity check a task group @@ -3622,17 +4175,6 @@ func (tg *TaskGroup) GoString() string { return fmt.Sprintf("*%#v", *tg) } -// CombinedResources returns the combined resources for the task group -func (tg *TaskGroup) CombinedResources() *Resources { - r := &Resources{ - DiskMB: tg.EphemeralDisk.SizeMB, - } - for _, task := range tg.Tasks { - r.Add(task.Resources) - } - return r -} - // CheckRestart describes if and when a task should be restarted based on // failing health checks. type CheckRestart struct { @@ -6032,18 +6574,24 @@ type Allocation struct { // TaskGroup is the name of the task group that should be run TaskGroup string + // COMPAT(0.11): Remove in 0.11 // Resources is the total set of resources allocated as part // of this allocation of the task group. Resources *Resources + // COMPAT(0.11): Remove in 0.11 // SharedResources are the resources that are shared by all the tasks in an // allocation SharedResources *Resources + // COMPAT(0.11): Remove in 0.11 // TaskResources is the set of resources allocated to each // task. These should sum to the total Resources. TaskResources map[string]*Resources + // AllocatedResources is the total resources allocated for the task group. + AllocatedResources *AllocatedResources + // Metrics associated with this allocation Metrics *AllocMetric @@ -6137,6 +6685,7 @@ func (a *Allocation) copyImpl(job bool) *Allocation { na.Job = na.Job.Copy() } + na.AllocatedResources = na.AllocatedResources.Copy() na.Resources = na.Resources.Copy() na.SharedResources = na.SharedResources.Copy() @@ -6401,6 +6950,43 @@ func (a *Allocation) SetEventDisplayMessages() { setDisplayMsg(a.TaskStates) } +// COMPAT(0.11): Remove in 0.11 +// ComparableResources returns the resouces on the allocation +// handling upgrade paths. After 0.11 calls to this should be replaced with: +// alloc.AllocatedResources.Comparable() +func (a *Allocation) ComparableResources() *ComparableResources { + // ALloc already has 0.9+ behavior + if a.AllocatedResources != nil { + return a.AllocatedResources.Comparable() + } + + var resources *Resources + if a.Resources != nil { + resources = a.Resources + } else if a.TaskResources != nil { + resources = new(Resources) + resources.Add(a.SharedResources) + for _, taskResource := range a.TaskResources { + resources.Add(taskResource) + } + } + + // Upgrade path + return &ComparableResources{ + Flattened: AllocatedTaskResources{ + Cpu: AllocatedCpuResources{ + CpuShares: uint64(resources.CPU), + }, + Memory: AllocatedMemoryResources{ + MemoryMB: uint64(resources.MemoryMB), + }, + }, + Shared: AllocatedSharedResources{ + DiskMB: uint64(resources.DiskMB), + }, + } +} + // Stub returns a list stub for the allocation func (a *Allocation) Stub() *AllocListStub { return &AllocListStub{ diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/testing.go b/vendor/github.com/hashicorp/nomad/nomad/structs/testing.go new file mode 100644 index 000000000..bbe2a2e58 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/testing.go @@ -0,0 +1,26 @@ +package structs + +// NodeResourcesToAllocatedResources converts a node resources to an allocated +// resources. The task name used is "web" and network is omitted. This is +// useful when trying to make an allocation fill an entire node. +func NodeResourcesToAllocatedResources(n *NodeResources) *AllocatedResources { + if n == nil { + return nil + } + + return &AllocatedResources{ + Tasks: map[string]*AllocatedTaskResources{ + "web": { + Cpu: AllocatedCpuResources{ + CpuShares: n.Cpu.CpuShares, + }, + Memory: AllocatedMemoryResources{ + MemoryMB: n.Memory.MemoryMB, + }, + }, + }, + Shared: AllocatedSharedResources{ + DiskMB: n.Disk.DiskMB, + }, + } +} diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-alicloud/Gopkg.lock b/vendor/github.com/hashicorp/vault-plugin-auth-alicloud/Gopkg.lock index 78c5a5f68..7849a487e 100644 --- a/vendor/github.com/hashicorp/vault-plugin-auth-alicloud/Gopkg.lock +++ b/vendor/github.com/hashicorp/vault-plugin-auth-alicloud/Gopkg.lock @@ -15,7 +15,7 @@ version = "1.1" [[projects]] - digest = "1:cde027e8bb29425770dd8ddc87789e0139f2dc53a80e248c8d6a0698c7e3f0bc" + digest = "1:fd5206897fecaccd4d4f247bfb399a8e183b54a06426ffb9b157ed1119a3910f" name = "github.com/aliyun/alibaba-cloud-sdk-go" packages = [ "sdk", @@ -31,19 +31,19 @@ "services/sts", ] pruneopts = "UT" - revision = "ef9535c490beb6b59620d93f6c7ba88e9b3b1ad0" - version = "1.26.2" + revision = "9669db6328e053fefc47bfe8ddf2e82625444fab" + version = "1.31.4" [[projects]] - branch = "master" - digest = "1:6bf6d532e503d9526d46e69aff04d11632c8c1e28b847dbd226babc1689aa723" + digest = "1:c47f4964978e211c6e566596ec6246c329912ea92e9bb99c00798bb4564c5b09" name = "github.com/armon/go-radix" packages = ["."] pruneopts = "UT" - revision = "7fddfc383310abc091d79a27f116d30cf0424032" + revision = "1a2de0c21c94309923825da3df33a4381872c795" + version = "v1.0.0" [[projects]] - digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260" + digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf" name = "github.com/golang/protobuf" packages = [ "proto", @@ -53,8 +53,8 @@ "ptypes/timestamp", ] pruneopts = "UT" - revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" - version = "v1.1.0" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" [[projects]] branch = "master" @@ -66,51 +66,51 @@ [[projects]] branch = "master" - digest = "1:d1971637b21871ec2033a44ca87c99c5608a7340cb34ec75fab8d2ab503276c9" + digest = "1:0ade334594e69404d80d9d323445d2297ff8161637f9b2d347cc6973d2d6f05b" name = "github.com/hashicorp/errwrap" packages = ["."] pruneopts = "UT" - revision = "d6c0cd88035724dd42e0f335ae30161c20575ecc" + revision = "8a6fb523712970c966eefc6b39ed2c5e74880354" [[projects]] branch = "master" - digest = "1:77cb3be9b21ba7f1a4701e870c84ea8b66e7d74c7c8951c58155fdadae9414ec" + digest = "1:f47d6109c2034cb16bd62b220e18afd5aa9d5a1630fe5d937ad96a4fb7cbb277" name = "github.com/hashicorp/go-cleanhttp" packages = ["."] pruneopts = "UT" - revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d" + revision = "e8ab9daed8d1ddd2d3c4efba338fe2eeae2e4f18" [[projects]] branch = "master" - digest = "1:e8d99882caa8c74d68f340ddb9bba3f7e433117ce57c3e52501edfa7e195d2c7" + digest = "1:0876aeb6edb07e20b6b0ce1d346655cb63dbe0a26ccfb47b68a9b7697709777b" name = "github.com/hashicorp/go-hclog" packages = ["."] pruneopts = "UT" - revision = "ff2cf002a8dd750586d91dddd4470c341f981fe1" + revision = "61d530d6c27f994fb6c83b80f99a69c54125ec8a" [[projects]] - branch = "master" - digest = "1:2394f5a25132b3868eff44599cc28d44bdd0330806e34c495d754dd052df612b" + digest = "1:2be5a35f0c5b35162c41bb24971e5dcf6ce825403296ee435429cdcc4e1e847e" name = "github.com/hashicorp/go-immutable-radix" packages = ["."] pruneopts = "UT" - revision = "7f3cd4390caab3250a57f30efdb2a65dd7649ecf" + revision = "27df80928bb34bb1b0d6d0e01b9e679902e7a6b5" + version = "v1.0.0" [[projects]] - branch = "master" - digest = "1:46fb6a9f1b9667f32ac93e08b1da118b2c666991424ea12e848b05d4fe5155ef" + digest = "1:f668349b83f7d779567c880550534addeca7ebadfdcf44b0b9c39be61864b4b7" name = "github.com/hashicorp/go-multierror" packages = ["."] pruneopts = "UT" - revision = "3d5d8f294aa03d8e98859feac328afbdf1ae0703" + revision = "886a7fbe3eb1c874d46f623bfa70af45f425b3d1" + version = "v1.0.0" [[projects]] branch = "master" - digest = "1:20f78c1cf1b6fe6c55ba1407350d6fc7dc77d1591f8106ba693c28014a1a1b37" + digest = "1:ed6b6f1d3d949ad31aba00953f0fc58aaaa1df1a37102ff5646df82233329853" name = "github.com/hashicorp/go-plugin" packages = ["."] pruneopts = "UT" - revision = "a4620f9913d19f03a6bf19b2f304daaaf83ea130" + revision = "1faddcf740b61468a23dacc67369c28ec96d7fc7" [[projects]] branch = "master" @@ -137,35 +137,34 @@ revision = "6d291a969b86c4b633730bfc6b8b9d64c3aafed9" [[projects]] - branch = "master" - digest = "1:354978aad16c56c27f57e5b152224806d87902e4935da3b03e18263d82ae77aa" + digest = "1:12ed7dcca9531e58c65cdadb8af0052724bef7fa1581380523fb9cb1215faf0d" name = "github.com/hashicorp/go-uuid" packages = ["."] pruneopts = "UT" - revision = "27454136f0364f2d44b1276c552d69105cf8c498" + revision = "de160f5c59f693fed329e73e291bb751fe4ea4dc" + version = "v1.0.0" [[projects]] - branch = "master" - digest = "1:32c0e96a63bd093eccf37db757fb314be5996f34de93969321c2cbef893a7bd6" + digest = "1:77395dd3847dac9c45118c668f5dab85aedf0163dc3b38aea6578c5cf0d502f9" name = "github.com/hashicorp/go-version" packages = ["."] pruneopts = "UT" - revision = "270f2f71b1ee587f3b609f00f422b76a6b28f348" + revision = "b5a281d3160aa11950a6182bd9a9dc2cb1e02d50" + version = "v1.0.0" [[projects]] - branch = "master" - digest = "1:cf296baa185baae04a9a7004efee8511d08e2f5f51d4cbe5375da89722d681db" + digest = "1:8ec8d88c248041a6df5f6574b87bc00e7e0b493881dad2e7ef47b11dc69093b5" name = "github.com/hashicorp/golang-lru" packages = [ ".", "simplelru", ] pruneopts = "UT" - revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" + revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768" + version = "v0.5.0" [[projects]] - branch = "master" - digest = "1:12247a2e99a060cc692f6680e5272c8adf0b8f572e6bce0d7095e624c958a240" + digest = "1:ea40c24cdbacd054a6ae9de03e62c5f252479b96c716375aace5c120d68647c8" name = "github.com/hashicorp/hcl" packages = [ ".", @@ -179,11 +178,12 @@ "json/token", ] pruneopts = "UT" - revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168" + revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241" + version = "v1.0.0" [[projects]] branch = "master" - digest = "1:c9caf8fc607b9b8fa503965eca966ae6f1fb96573a0a1c04017b9cd0a98adad3" + digest = "1:9b851e29f662c4522e3c9a235bb23008b8fce207b071e50eee2a014fd50f1059" name = "github.com/hashicorp/vault" packages = [ "api", @@ -194,6 +194,7 @@ "helper/errutil", "helper/hclutil", "helper/jsonutil", + "helper/license", "helper/locksutil", "helper/logging", "helper/mlock", @@ -212,15 +213,15 @@ "version", ] pruneopts = "UT" - revision = "8655d167084028d627f687ddc25d0c71307eb5be" + revision = "e7a0452736177a4ecf6955cdf72a93c325943a18" [[projects]] branch = "master" - digest = "1:89658943622e6bc5e76b4da027ee9583fa0b321db0c797bd554edab96c1ca2b1" + digest = "1:a4826c308e84f5f161b90b54a814f0be7d112b80164b9b884698a6903ea47ab3" name = "github.com/hashicorp/yamux" packages = ["."] pruneopts = "UT" - revision = "3520598351bb3500a49ae9563f5539666ae0a27c" + revision = "7221087c3d281fda5f794e28c2ea4c6e4d5c4558" [[projects]] digest = "1:b87714e57a511d88f307aba7d5b63522da12bed0a050889c81272fc50f71100e" @@ -239,28 +240,28 @@ version = "1.1.5" [[projects]] - branch = "master" - digest = "1:c7354463195544b1ab3c1f1fadb41430947f5d28dfbf2cdbd38268c5717a5a03" + digest = "1:78bbb1ba5b7c3f2ed0ea1eab57bdd3859aec7e177811563edc41198a760b06af" name = "github.com/mitchellh/go-homedir" packages = ["."] pruneopts = "UT" - revision = "58046073cbffe2f25d425fe1331102f55cf719de" + revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4" + version = "v1.0.0" [[projects]] - branch = "master" - digest = "1:cae1afe858922bd10e9573b87130f730a6e4183a00eba79920d6656629468bfa" + digest = "1:42eb1f52b84a06820cedc9baec2e710bfbda3ee6dac6cdb97f8b9a5066134ec6" name = "github.com/mitchellh/go-testing-interface" packages = ["."] pruneopts = "UT" - revision = "a61a99592b77c9ba629d254a693acffaeb4b7e28" + revision = "6d0b8010fcc857872e42fc6c931227569016843c" + version = "v1.0.0" [[projects]] - branch = "master" - digest = "1:5ab79470a1d0fb19b041a624415612f8236b3c06070161a910562f2b2d064355" + digest = "1:e32dfc6abff6a3633ef4d9a1022fd707c8ef26f1e1e8f855dc58dc415ce7c8f3" name = "github.com/mitchellh/mapstructure" packages = ["."] pruneopts = "UT" - revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac" + revision = "fe40af7a9c397fa3ddba203c38a5042c5d0475ad" + version = "v1.1.1" [[projects]] digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563" @@ -286,6 +287,17 @@ revision = "4dadeb3030eda0273a12382bb2348ffc7c9d1a39" version = "v1.0.0" +[[projects]] + digest = "1:4f0885b3f0dba96128a09a6f4b4231c42688fbd05f323224c6aa5adc9f4e87bf" + name = "github.com/pierrec/lz4" + packages = [ + ".", + "internal/xxh32", + ] + pruneopts = "UT" + revision = "bb6bfd13c6a262f1943c0446eb25b7f54c1fb9a2" + version = "v2.0.6" + [[projects]] digest = "1:0e792eea6c96ec55ff302ef33886acbaa5006e900fefe82689e88d96439dcd84" name = "github.com/ryanuber/go-glob" @@ -304,7 +316,7 @@ [[projects]] branch = "master" - digest = "1:b5c3834d33445efdc5a8dcb154bed9e4c211edadbf02f6f5cc20c5e9be26a499" + digest = "1:505dbee0833715a72a529bb57c354826ad42a4496fad787fa143699b4de1a6d0" name = "golang.org/x/net" packages = [ "context", @@ -316,15 +328,15 @@ "trace", ] pruneopts = "UT" - revision = "aaf60122140d3fcf75376d319f0554393160eb50" + revision = "f5e5bdd778241bfefa8627f7124c39cd6ad8d74f" [[projects]] branch = "master" - digest = "1:05662433b3a13c921587a6e622b5722072edff83211efd1cd79eeaeedfd83f07" + digest = "1:746ccf620ef9726c42453032e8e039860851ab5914278d24202f343a479a3073" name = "golang.org/x/sys" packages = ["unix"] pruneopts = "UT" - revision = "1c9583448a9c3aa0f9a6a5241bf73c0bd8aafded" + revision = "af653ce8b74f808d092db8ca9741fbb63d2a469d" [[projects]] digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" @@ -359,14 +371,14 @@ [[projects]] branch = "master" - digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c" + digest = "1:1e6b0176e8c5dd8ff551af65c76f8b73a99bcf4d812cedff1b91711b7df4804c" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] pruneopts = "UT" - revision = "d0a8f471bba2dbb160885b0000d814ee5d559bad" + revision = "c7e5094acea1ca1b899e2259d80a6b0f882f81f8" [[projects]] - digest = "1:047efbc3c9a51f3002b0002f92543857d372654a676fb6b01931982cd80467dd" + digest = "1:1b6d2676ea895d33cbd1999c75dfc8e25b103c754ccfc66dc06ae845ce3a47bc" name = "google.golang.org/grpc" packages = [ ".", @@ -399,8 +411,8 @@ "tap", ] pruneopts = "UT" - revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455" - version = "v1.14.0" + revision = "8dea3dc473e90c8179e519d91302d0597c0ca1d1" + version = "v1.15.0" [solve-meta] analyzer-name = "dep" @@ -408,6 +420,7 @@ input-imports = [ "github.com/aliyun/alibaba-cloud-sdk-go/sdk", "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth", + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials", "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers", "github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints", "github.com/aliyun/alibaba-cloud-sdk-go/services/sts", @@ -415,6 +428,7 @@ "github.com/hashicorp/go-cleanhttp", "github.com/hashicorp/go-hclog", "github.com/hashicorp/go-sockaddr", + "github.com/hashicorp/go-uuid", "github.com/hashicorp/vault/api", "github.com/hashicorp/vault/helper/cidrutil", "github.com/hashicorp/vault/helper/parseutil", diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/path_role.go b/vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/path_role.go index 212b1d20c..8dc1342e3 100644 --- a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/path_role.go +++ b/vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/path_role.go @@ -289,7 +289,7 @@ func (b *GcpAuthBackend) pathRoleRead(ctx context.Context, req *logical.Request, resp := make(map[string]interface{}) if role.RoleType != "" { - resp["role_type"] = role.RoleType + resp["role"] = role.RoleType } if role.ProjectId != "" { resp["project_id"] = role.ProjectId diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/README.md b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/README.md index b865a1eb9..442027430 100644 --- a/vendor/github.com/hashicorp/vault-plugin-auth-jwt/README.md +++ b/vendor/github.com/hashicorp/vault-plugin-auth-jwt/README.md @@ -5,10 +5,6 @@ This plugin allows for JWTs (including OIDC tokens) to authenticate with Vault. **Please note**: We take Vault's security and our users' trust very seriously. If you believe you have found a security issue in Vault, _please responsibly disclose_ by contacting us at [security@hashicorp.com](mailto:security@hashicorp.com). -## IMPORTANT - -This plugin is in pre-release state. It is not well tested (in fact, not tested at all) and there is no documentation currently available. - ## Quick Links - Vault Website: https://www.vaultproject.io - JWT Auth Docs: https://www.vaultproject.io/docs/auth/jwt.html diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-alicloud/Gopkg.lock b/vendor/github.com/hashicorp/vault-plugin-secrets-alicloud/Gopkg.lock index fd5c45181..76fc321a4 100644 --- a/vendor/github.com/hashicorp/vault-plugin-secrets-alicloud/Gopkg.lock +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-alicloud/Gopkg.lock @@ -2,8 +2,7 @@ [[projects]] - branch = "master" - digest = "1:e26170d7ec7d444d7b74a5b1dbd6437fd8e552d27efce9327f733311737c4ae9" + digest = "1:a69ab3f1445ffd4815add4bd31ba05b65b3b9fec1ade5057d5d717f30e6efd6d" name = "github.com/SermoDigital/jose" packages = [ ".", @@ -12,10 +11,11 @@ "jwt", ] pruneopts = "UT" - revision = "803625baeddc3526d01d321b5066029f53eafc81" + revision = "f6df55f235c24f236d11dbcf665249a59ac2021f" + version = "1.1" [[projects]] - digest = "1:309704ec478c46e59bc84e0968ce7ab3707cef00c26c1f1bc058e3d1d26afadb" + digest = "1:ed934376091abfd27e465770d48f469af62f4ec5a61506e80af9b4f97b6defa7" name = "github.com/aliyun/alibaba-cloud-sdk-go" packages = [ "sdk", @@ -32,18 +32,19 @@ "services/sts", ] pruneopts = "UT" - revision = "ef9535c490beb6b59620d93f6c7ba88e9b3b1ad0" - version = "1.26.2" + revision = "9669db6328e053fefc47bfe8ddf2e82625444fab" + version = "1.31.4" [[projects]] - digest = "1:9fd3a6ab34bb103ba228eefd044d3f9aa476237ea95a46d12e8cccd3abf3fea2" + digest = "1:c47f4964978e211c6e566596ec6246c329912ea92e9bb99c00798bb4564c5b09" name = "github.com/armon/go-radix" packages = ["."] pruneopts = "UT" - revision = "1fca145dffbcaa8fe914309b1ec0cfc67500fe61" + revision = "1a2de0c21c94309923825da3df33a4381872c795" + version = "v1.0.0" [[projects]] - digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260" + digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf" name = "github.com/golang/protobuf" packages = [ "proto", @@ -53,8 +54,8 @@ "ptypes/timestamp", ] pruneopts = "UT" - revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" - version = "v1.1.0" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" [[projects]] branch = "master" @@ -65,50 +66,52 @@ revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" [[projects]] - digest = "1:07671f8997086ed115824d1974507d2b147d1e0463675ea5dbf3be89b1c2c563" + digest = "1:0ade334594e69404d80d9d323445d2297ff8161637f9b2d347cc6973d2d6f05b" name = "github.com/hashicorp/errwrap" packages = ["."] pruneopts = "UT" - revision = "7554cd9344cec97297fa6649b055a8c98c2a1e55" + revision = "8a6fb523712970c966eefc6b39ed2c5e74880354" + version = "v1.0.0" [[projects]] - branch = "master" - digest = "1:77cb3be9b21ba7f1a4701e870c84ea8b66e7d74c7c8951c58155fdadae9414ec" + digest = "1:f47d6109c2034cb16bd62b220e18afd5aa9d5a1630fe5d937ad96a4fb7cbb277" name = "github.com/hashicorp/go-cleanhttp" packages = ["."] pruneopts = "UT" - revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d" + revision = "e8ab9daed8d1ddd2d3c4efba338fe2eeae2e4f18" + version = "v0.5.0" [[projects]] branch = "master" - digest = "1:e8d99882caa8c74d68f340ddb9bba3f7e433117ce57c3e52501edfa7e195d2c7" + digest = "1:0876aeb6edb07e20b6b0ce1d346655cb63dbe0a26ccfb47b68a9b7697709777b" name = "github.com/hashicorp/go-hclog" packages = ["."] pruneopts = "UT" - revision = "ff2cf002a8dd750586d91dddd4470c341f981fe1" + revision = "61d530d6c27f994fb6c83b80f99a69c54125ec8a" [[projects]] - branch = "master" - digest = "1:2394f5a25132b3868eff44599cc28d44bdd0330806e34c495d754dd052df612b" + digest = "1:2be5a35f0c5b35162c41bb24971e5dcf6ce825403296ee435429cdcc4e1e847e" name = "github.com/hashicorp/go-immutable-radix" packages = ["."] pruneopts = "UT" - revision = "7f3cd4390caab3250a57f30efdb2a65dd7649ecf" + revision = "27df80928bb34bb1b0d6d0e01b9e679902e7a6b5" + version = "v1.0.0" [[projects]] - digest = "1:e5048c5da80697be2fcdecc944e29d2999e01fd7f48b643168443209779f3463" + digest = "1:f668349b83f7d779567c880550534addeca7ebadfdcf44b0b9c39be61864b4b7" name = "github.com/hashicorp/go-multierror" packages = ["."] pruneopts = "UT" - revision = "b7773ae218740a7be65057fc60b366a49b538a44" + revision = "886a7fbe3eb1c874d46f623bfa70af45f425b3d1" + version = "v1.0.0" [[projects]] branch = "master" - digest = "1:82320f8469d1524df337bc315a38c87644765cd89ec4cf3cbda249a3acdde671" + digest = "1:ed6b6f1d3d949ad31aba00953f0fc58aaaa1df1a37102ff5646df82233329853" name = "github.com/hashicorp/go-plugin" packages = ["."] pruneopts = "UT" - revision = "e8d22c780116115ae5624720c9af0c97afe4f551" + revision = "1faddcf740b61468a23dacc67369c28ec96d7fc7" [[projects]] branch = "master" @@ -136,33 +139,33 @@ [[projects]] branch = "master" - digest = "1:354978aad16c56c27f57e5b152224806d87902e4935da3b03e18263d82ae77aa" + digest = "1:12ed7dcca9531e58c65cdadb8af0052724bef7fa1581380523fb9cb1215faf0d" name = "github.com/hashicorp/go-uuid" packages = ["."] pruneopts = "UT" - revision = "27454136f0364f2d44b1276c552d69105cf8c498" + revision = "de160f5c59f693fed329e73e291bb751fe4ea4dc" [[projects]] - digest = "1:e12b92b8bb20af6e299e9829534cfe790857702a988d3f0443e772c9d82a4fd2" + digest = "1:77395dd3847dac9c45118c668f5dab85aedf0163dc3b38aea6578c5cf0d502f9" name = "github.com/hashicorp/go-version" packages = ["."] pruneopts = "UT" - revision = "23480c0665776210b5fbbac6eaaee40e3e6a96b7" + revision = "b5a281d3160aa11950a6182bd9a9dc2cb1e02d50" + version = "v1.0.0" [[projects]] - branch = "master" - digest = "1:cf296baa185baae04a9a7004efee8511d08e2f5f51d4cbe5375da89722d681db" + digest = "1:8ec8d88c248041a6df5f6574b87bc00e7e0b493881dad2e7ef47b11dc69093b5" name = "github.com/hashicorp/golang-lru" packages = [ ".", "simplelru", ] pruneopts = "UT" - revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" + revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768" + version = "v0.5.0" [[projects]] - branch = "master" - digest = "1:12247a2e99a060cc692f6680e5272c8adf0b8f572e6bce0d7095e624c958a240" + digest = "1:ea40c24cdbacd054a6ae9de03e62c5f252479b96c716375aace5c120d68647c8" name = "github.com/hashicorp/hcl" packages = [ ".", @@ -176,7 +179,8 @@ "json/token", ] pruneopts = "UT" - revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168" + revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241" + version = "v1.0.0" [[projects]] digest = "1:4be4315ec4768c829b2bc87c7e28dbb2420e831e242770d845833c13bb658d70" @@ -212,11 +216,11 @@ [[projects]] branch = "master" - digest = "1:89658943622e6bc5e76b4da027ee9583fa0b321db0c797bd554edab96c1ca2b1" + digest = "1:a4826c308e84f5f161b90b54a814f0be7d112b80164b9b884698a6903ea47ab3" name = "github.com/hashicorp/yamux" packages = ["."] pruneopts = "UT" - revision = "3520598351bb3500a49ae9563f5539666ae0a27c" + revision = "7221087c3d281fda5f794e28c2ea4c6e4d5c4558" [[projects]] digest = "1:b87714e57a511d88f307aba7d5b63522da12bed0a050889c81272fc50f71100e" @@ -235,27 +239,28 @@ version = "1.1.5" [[projects]] - branch = "master" - digest = "1:c7354463195544b1ab3c1f1fadb41430947f5d28dfbf2cdbd38268c5717a5a03" + digest = "1:78bbb1ba5b7c3f2ed0ea1eab57bdd3859aec7e177811563edc41198a760b06af" name = "github.com/mitchellh/go-homedir" packages = ["."] pruneopts = "UT" - revision = "58046073cbffe2f25d425fe1331102f55cf719de" + revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4" + version = "v1.0.0" [[projects]] - branch = "master" - digest = "1:cae1afe858922bd10e9573b87130f730a6e4183a00eba79920d6656629468bfa" + digest = "1:42eb1f52b84a06820cedc9baec2e710bfbda3ee6dac6cdb97f8b9a5066134ec6" name = "github.com/mitchellh/go-testing-interface" packages = ["."] pruneopts = "UT" - revision = "a61a99592b77c9ba629d254a693acffaeb4b7e28" + revision = "6d0b8010fcc857872e42fc6c931227569016843c" + version = "v1.0.0" [[projects]] - digest = "1:e730597b38a4d56e2361e0b6236cb800e52c73cace2ff91396f4ff35792ddfa7" + digest = "1:e32dfc6abff6a3633ef4d9a1022fd707c8ef26f1e1e8f855dc58dc415ce7c8f3" name = "github.com/mitchellh/mapstructure" packages = ["."] pruneopts = "UT" - revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b" + revision = "fe40af7a9c397fa3ddba203c38a5042c5d0475ad" + version = "v1.1.1" [[projects]] digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563" @@ -282,12 +287,12 @@ version = "v1.0.0" [[projects]] - branch = "master" - digest = "1:5b92d232e81c3e8eec282c92dcaa2e0e1ad3c23157be19a01b3e33f7e6e8d137" + digest = "1:0e792eea6c96ec55ff302ef33886acbaa5006e900fefe82689e88d96439dcd84" name = "github.com/ryanuber/go-glob" packages = ["."] pruneopts = "UT" - revision = "256dc444b735e061061cf46c809487313d5b0065" + revision = "572520ed46dbddaed19ea3d9541bdd0494163693" + version = "v0.1" [[projects]] digest = "1:274f67cb6fed9588ea2521ecdac05a6d62a8c51c074c1fccc6a49a40ba80e925" @@ -298,7 +303,8 @@ version = "v1.2.0" [[projects]] - digest = "1:2131278c6fb9a84f2da205d35fdace38c530c2f9394342c52476cb4d23b50ffb" + branch = "master" + digest = "1:505dbee0833715a72a529bb57c354826ad42a4496fad787fa143699b4de1a6d0" name = "golang.org/x/net" packages = [ "context", @@ -310,14 +316,15 @@ "trace", ] pruneopts = "UT" - revision = "039a4258aec0ad3c79b905677cceeab13b296a77" + revision = "f5e5bdd778241bfefa8627f7124c39cd6ad8d74f" [[projects]] - digest = "1:d773e525476aefa22ea944a5425a9bfb99819b2e67eeb9b1966454fd57522bbf" + branch = "master" + digest = "1:746ccf620ef9726c42453032e8e039860851ab5914278d24202f343a479a3073" name = "golang.org/x/sys" packages = ["unix"] pruneopts = "UT" - revision = "1b2967e3c290b7c545b3db0deeda16e9be4f98a2" + revision = "af653ce8b74f808d092db8ca9741fbb63d2a469d" [[projects]] digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" @@ -352,14 +359,14 @@ [[projects]] branch = "master" - digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c" + digest = "1:1e6b0176e8c5dd8ff551af65c76f8b73a99bcf4d812cedff1b91711b7df4804c" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] pruneopts = "UT" - revision = "383e8b2c3b9e36c4076b235b32537292176bae20" + revision = "c7e5094acea1ca1b899e2259d80a6b0f882f81f8" [[projects]] - digest = "1:5c64b5dd01a6d9dbcf4c160b9a36996605c79f39b46ea7570b3d861dbc71f641" + digest = "1:1b6d2676ea895d33cbd1999c75dfc8e25b103c754ccfc66dc06ae845ce3a47bc" name = "google.golang.org/grpc" packages = [ ".", @@ -392,7 +399,8 @@ "tap", ] pruneopts = "UT" - revision = "11b582728a13ef54e14e4fa930d693de2e32420f" + revision = "8dea3dc473e90c8179e519d91302d0597c0ca1d1" + version = "v1.15.0" [solve-meta] analyzer-name = "dep" @@ -406,6 +414,7 @@ "github.com/hashicorp/go-hclog", "github.com/hashicorp/go-multierror", "github.com/hashicorp/go-uuid", + "github.com/hashicorp/vault/helper/jsonutil", "github.com/hashicorp/vault/helper/pluginutil", "github.com/hashicorp/vault/logical", "github.com/hashicorp/vault/logical/framework", diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-azure/README.md b/vendor/github.com/hashicorp/vault-plugin-secrets-azure/README.md index 8edfe9fb5..772aa19b7 100644 --- a/vendor/github.com/hashicorp/vault-plugin-secrets-azure/README.md +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-azure/README.md @@ -80,7 +80,7 @@ Once the server is started, register the plugin in the Vault server's [plugin ca ```sh $ vault write sys/plugins/catalog/azure \ - sha_256= \ + sha256= \ command="vault-plugin-secrets-azure" ... Success! Data written to: sys/plugins/catalog/azure diff --git a/vendor/github.com/jackc/pgx/copy_from.go b/vendor/github.com/jackc/pgx/copy_from.go index 314d441fa..27e2fc9a5 100644 --- a/vendor/github.com/jackc/pgx/copy_from.go +++ b/vendor/github.com/jackc/pgx/copy_from.go @@ -298,7 +298,7 @@ func (c *Conn) CopyFromReader(r io.Reader, sql string) error { sp := len(buf) for { n, err := r.Read(buf[5:cap(buf)]) - if err == io.EOF { + if err == io.EOF && n == 0 { break } buf = buf[0 : n+5] diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod new file mode 100644 index 000000000..716c61312 --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod @@ -0,0 +1 @@ +module github.com/konsorten/go-windows-terminal-sequences diff --git a/vendor/github.com/miekg/dns/Gopkg.lock b/vendor/github.com/miekg/dns/Gopkg.lock index f8d1c78ca..686632207 100644 --- a/vendor/github.com/miekg/dns/Gopkg.lock +++ b/vendor/github.com/miekg/dns/Gopkg.lock @@ -3,31 +3,55 @@ [[projects]] branch = "master" + digest = "1:6914c49eed986dfb8dffb33516fa129c49929d4d873f41e073c83c11c372b870" name = "golang.org/x/crypto" - packages = ["ed25519","ed25519/internal/edwards25519"] - revision = "b47b1587369238182299fe4dad77d05b8b461e06" + packages = [ + "ed25519", + "ed25519/internal/edwards25519", + ] + pruneopts = "" + revision = "e3636079e1a4c1f337f212cc5cd2aca108f6c900" [[projects]] branch = "master" + digest = "1:08e41d63f8dac84d83797368b56cf0b339e42d0224e5e56668963c28aec95685" name = "golang.org/x/net" - packages = ["bpf","context","internal/iana","internal/socket","ipv4","ipv6"] - revision = "1e491301e022f8f977054da4c2d852decd59571f" + packages = [ + "bpf", + "context", + "internal/iana", + "internal/socket", + "ipv4", + "ipv6", + ] + pruneopts = "" + revision = "4dfa2610cdf3b287375bbba5b8f2a14d3b01d8de" [[projects]] branch = "master" + digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1" name = "golang.org/x/sync" packages = ["errgroup"] + pruneopts = "" revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" [[projects]] branch = "master" + digest = "1:149a432fabebb8221a80f77731b1cd63597197ded4f14af606ebe3a0959004ec" name = "golang.org/x/sys" packages = ["unix"] + pruneopts = "" revision = "e4b3c5e9061176387e7cea65e4dc5853801f3fb7" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "89261d224d04ffe1530fb9e91fcf649ef0e571531482d043fdad683898871768" + input-imports = [ + "golang.org/x/crypto/ed25519", + "golang.org/x/net/ipv4", + "golang.org/x/net/ipv6", + "golang.org/x/sync/errgroup", + "golang.org/x/sys/unix", + ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/miekg/dns/Gopkg.toml b/vendor/github.com/miekg/dns/Gopkg.toml index 2f655b2c7..85e6ff31b 100644 --- a/vendor/github.com/miekg/dns/Gopkg.toml +++ b/vendor/github.com/miekg/dns/Gopkg.toml @@ -24,3 +24,15 @@ [[constraint]] branch = "master" name = "golang.org/x/crypto" + +[[constraint]] + branch = "master" + name = "golang.org/x/net" + +[[constraint]] + branch = "master" + name = "golang.org/x/sys" + +[[constraint]] + branch = "master" + name = "golang.org/x/sync" diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 77874642b..7f1aaa5de 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -67,6 +67,7 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://github.com/xor-gate/sshfp * https://github.com/rs/dnstrace * https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss)) +* https://github.com/semihalev/sdns Send pull request if you want to be listed here. diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go index 719198659..24afab9f8 100644 --- a/vendor/github.com/miekg/dns/dnssec_keyscan.go +++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go @@ -1,6 +1,7 @@ package dns import ( + "bufio" "crypto" "crypto/dsa" "crypto/ecdsa" @@ -194,23 +195,12 @@ func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) { // parseKey reads a private key from r. It returns a map[string]string, // with the key-value pairs, or an error when the file is not correct. func parseKey(r io.Reader, file string) (map[string]string, error) { - s, cancel := scanInit(r) m := make(map[string]string) - c := make(chan lex) - k := "" - defer func() { - cancel() - // zlexer can send up to two tokens, the next one and possibly 1 remainders. - // Do a non-blocking read. - _, ok := <-c - _, ok = <-c - if !ok { - // too bad - } - }() - // Start the lexer - go klexer(s, c) - for l := range c { + var k string + + c := newKLexer(r) + + for l, ok := c.Next(); ok; l, ok = c.Next() { // It should alternate switch l.value { case zKey: @@ -219,41 +209,111 @@ func parseKey(r io.Reader, file string) (map[string]string, error) { if k == "" { return nil, &ParseError{file, "no private key seen", l} } - //println("Setting", strings.ToLower(k), "to", l.token, "b") + m[strings.ToLower(k)] = l.token k = "" } } + + // Surface any read errors from r. + if err := c.Err(); err != nil { + return nil, &ParseError{file: file, err: err.Error()} + } + return m, nil } -// klexer scans the sourcefile and returns tokens on the channel c. -func klexer(s *scan, c chan lex) { - var l lex - str := "" // Hold the current read text - commt := false - key := true - x, err := s.tokenText() - defer close(c) - for err == nil { - l.column = s.position.Column - l.line = s.position.Line +type klexer struct { + br io.ByteReader + + readErr error + + line int + column int + + key bool + + eol bool // end-of-line +} + +func newKLexer(r io.Reader) *klexer { + br, ok := r.(io.ByteReader) + if !ok { + br = bufio.NewReaderSize(r, 1024) + } + + return &klexer{ + br: br, + + line: 1, + + key: true, + } +} + +func (kl *klexer) Err() error { + if kl.readErr == io.EOF { + return nil + } + + return kl.readErr +} + +// readByte returns the next byte from the input +func (kl *klexer) readByte() (byte, bool) { + if kl.readErr != nil { + return 0, false + } + + c, err := kl.br.ReadByte() + if err != nil { + kl.readErr = err + return 0, false + } + + // delay the newline handling until the next token is delivered, + // fixes off-by-one errors when reporting a parse error. + if kl.eol { + kl.line++ + kl.column = 0 + kl.eol = false + } + + if c == '\n' { + kl.eol = true + } else { + kl.column++ + } + + return c, true +} + +func (kl *klexer) Next() (lex, bool) { + var ( + l lex + + str strings.Builder + + commt bool + ) + + for x, ok := kl.readByte(); ok; x, ok = kl.readByte() { + l.line, l.column = kl.line, kl.column + switch x { case ':': - if commt { + if commt || !kl.key { break } - l.token = str - if key { - l.value = zKey - c <- l - // Next token is a space, eat it - s.tokenText() - key = false - str = "" - } else { - l.value = zValue - } + + kl.key = false + + // Next token is a space, eat it + kl.readByte() + + l.value = zKey + l.token = str.String() + return l, true case ';': commt = true case '\n': @@ -261,24 +321,27 @@ func klexer(s *scan, c chan lex) { // Reset a comment commt = false } + + kl.key = true + l.value = zValue - l.token = str - c <- l - str = "" - commt = false - key = true + l.token = str.String() + return l, true default: if commt { break } - str += string(x) + + str.WriteByte(x) } - x, err = s.tokenText() } - if len(str) > 0 { + + if str.Len() > 0 { // Send remainder - l.token = str l.value = zValue - c <- l + l.token = str.String() + return l, true } + + return lex{value: zEOF}, false } diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go index 3a559793f..74e670203 100644 --- a/vendor/github.com/miekg/dns/generate.go +++ b/vendor/github.com/miekg/dns/generate.go @@ -20,7 +20,7 @@ import ( // of $ after that are interpreted. // Any error are returned as a string value, the empty string signals // "no error". -func generate(l lex, c chan lex, t chan *Token, o string) string { +func generate(l lex, c *zlexer, t chan *Token, o string) string { step := 1 if i := strings.IndexAny(l.token, "/"); i != -1 { if i+1 == len(l.token) { @@ -52,11 +52,11 @@ func generate(l lex, c chan lex, t chan *Token, o string) string { return "bad range in $GENERATE range" } - <-c // _BLANK + c.Next() // _BLANK // Create a complete new string, which we then parse again. s := "" BuildRR: - l = <-c + l, _ = c.Next() if l.value != zNewline && l.value != zEOF { s += l.token goto BuildRR @@ -107,6 +107,8 @@ BuildRR: mod, offset, err = modToPrintf(s[j+2 : j+2+sep]) if err != nil { return err.Error() + } else if start+offset < 0 || end+offset > 1<<31-1 { + return "bad offset in $GENERATE" } j += 2 + sep // Jump to it } @@ -152,7 +154,7 @@ func modToPrintf(s string) (string, int, error) { return "", 0, errors.New("bad base in $GENERATE") } offset, err := strconv.Atoi(xs[0]) - if err != nil || offset > 255 { + if err != nil { return "", 0, errors.New("bad offset in $GENERATE") } width, err := strconv.Atoi(xs[1]) diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go index 154b65ee4..47ac6cf28 100644 --- a/vendor/github.com/miekg/dns/msg.go +++ b/vendor/github.com/miekg/dns/msg.go @@ -302,6 +302,12 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c } // If we did compression and we find something add the pointer here if pointer != -1 { + // Clear the msg buffer after the pointer location, otherwise + // packDataNsec writes the wrong data to msg. + tainted := msg[nameoffset:off] + for i := range tainted { + tainted[i] = 0 + } // We have two bytes (14 bits) to put the pointer in // if msg == nil, we will never do compression binary.BigEndian.PutUint16(msg[nameoffset:], uint16(pointer^0xC000)) @@ -525,6 +531,10 @@ func dddToByte(s []byte) byte { return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) } +func dddStringToByte(s string) byte { + return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) +} + // Helper function for packing and unpacking func intToBytes(i *big.Int, length int) []byte { buf := i.Bytes() diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go index d58ff1d70..81fc2b1be 100644 --- a/vendor/github.com/miekg/dns/msg_helpers.go +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -6,7 +6,6 @@ import ( "encoding/binary" "encoding/hex" "net" - "strconv" "strings" ) @@ -276,13 +275,7 @@ func unpackString(msg []byte, off int) (string, int, error) { s.WriteByte('\\') s.WriteByte(b) case b < ' ' || b > '~': // unprintable - var buf [3]byte - bufs := strconv.AppendInt(buf[:0], int64(b), 10) - s.WriteByte('\\') - for i := len(bufs); i < 3; i++ { - s.WriteByte('0') - } - s.Write(bufs) + writeEscapedByte(&s, b) default: s.WriteByte(b) } diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go index d931da7ef..74544a74e 100644 --- a/vendor/github.com/miekg/dns/privaterr.go +++ b/vendor/github.com/miekg/dns/privaterr.go @@ -105,7 +105,7 @@ func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) return rr, off, err } - setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + setPrivateRR := func(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := mkPrivateRR(h.Rrtype) rr.Hdr = h @@ -115,7 +115,7 @@ func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) for { // TODO(miek): we could also be returning _QUOTE, this might or might not // be an issue (basically parsing TXT becomes hard) - switch l = <-c; l.value { + switch l, _ = c.Next(); l.value { case zNewline, zEOF: break Fetch case zString: diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go index a752dbd01..b4fa0566f 100644 --- a/vendor/github.com/miekg/dns/scan.go +++ b/vendor/github.com/miekg/dns/scan.go @@ -1,6 +1,7 @@ package dns import ( + "bufio" "fmt" "io" "os" @@ -74,15 +75,13 @@ func (e *ParseError) Error() (s string) { } type lex struct { - token string // text of the token - tokenUpper string // uppercase text of the token - length int // length of the token - err bool // when true, token text has lexer error - value uint8 // value: zString, _BLANK, etc. - torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar - line int // line in the file - column int // column in the file - comment string // any comment text seen + token string // text of the token + err bool // when true, token text has lexer error + value uint8 // value: zString, _BLANK, etc. + torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar + line int // line in the file + column int // column in the file + comment string // any comment text seen } // Token holds the token that are returned when a zone file is parsed. @@ -152,7 +151,8 @@ func ReadRR(q io.Reader, filename string) (RR, error) { // foo. IN A 10.0.0.1 ; this is a comment // // The text "; this is comment" is returned in Token.Comment. Comments inside the -// RR are discarded. Comments on a line by themselves are discarded too. +// RR are returned concatenated along with the RR. Comments on a line by themselves +// are discarded. func ParseZone(r io.Reader, origin, file string) chan *Token { return parseZoneHelper(r, origin, file, nil, 10000) } @@ -169,22 +169,9 @@ func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, i close(t) } }() - s, cancel := scanInit(r) - c := make(chan lex) - // Start the lexer - go zlexer(s, c) - defer func() { - cancel() - // zlexer can send up to three tokens, the next one and possibly 2 remainders. - // Do a non-blocking read. - _, ok := <-c - _, ok = <-c - _, ok = <-c - if !ok { - // too bad - } - }() + c := newZLexer(r) + // 6 possible beginnings of a line, _ is a space // 0. zRRTYPE -> all omitted until the rrtype // 1. zOwner _ zRrtype -> class/ttl omitted @@ -206,7 +193,7 @@ func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, i st := zExpectOwnerDir // initial state var h RR_Header var prevName string - for l := range c { + for l, ok := c.Next(); ok; l, ok = c.Next() { // Lexer spotted an error already if l.err { t <- &Token{Error: &ParseError{f, l.token, l}} @@ -279,9 +266,9 @@ func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, i return } neworigin := origin // There may be optionally a new origin set after the filename, if not use current one - switch l := <-c; l.value { + switch l, _ := c.Next(); l.value { case zBlank: - l := <-c + l, _ := c.Next() if l.value == zString { name, ok := toAbsoluteName(l.token, origin) if !ok { @@ -482,69 +469,157 @@ func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, i } // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this // is not an error, because an empty zone file is still a zone file. + + // Surface any read errors from r. + if err := c.Err(); err != nil { + t <- &Token{Error: &ParseError{file: f, err: err.Error()}} + } } -// zlexer scans the sourcefile and returns tokens on the channel c. -func zlexer(s *scan, c chan lex) { - var l lex - str := make([]byte, maxTok) // Should be enough for any token - stri := 0 // Offset in str (0 means empty) - com := make([]byte, maxTok) // Hold comment text - comi := 0 - quote := false - escape := false - space := false - commt := false - rrtype := false - owner := true - brace := 0 - x, err := s.tokenText() - defer close(c) - for err == nil { - l.column = s.position.Column - l.line = s.position.Line - if stri >= maxTok { +type zlexer struct { + br io.ByteReader + + readErr error + + line int + column int + + com string + + l lex + + brace int + quote bool + space bool + commt bool + rrtype bool + owner bool + + nextL bool + + eol bool // end-of-line +} + +func newZLexer(r io.Reader) *zlexer { + br, ok := r.(io.ByteReader) + if !ok { + br = bufio.NewReaderSize(r, 1024) + } + + return &zlexer{ + br: br, + + line: 1, + + owner: true, + } +} + +func (zl *zlexer) Err() error { + if zl.readErr == io.EOF { + return nil + } + + return zl.readErr +} + +// readByte returns the next byte from the input +func (zl *zlexer) readByte() (byte, bool) { + if zl.readErr != nil { + return 0, false + } + + c, err := zl.br.ReadByte() + if err != nil { + zl.readErr = err + return 0, false + } + + // delay the newline handling until the next token is delivered, + // fixes off-by-one errors when reporting a parse error. + if zl.eol { + zl.line++ + zl.column = 0 + zl.eol = false + } + + if c == '\n' { + zl.eol = true + } else { + zl.column++ + } + + return c, true +} + +func (zl *zlexer) Next() (lex, bool) { + l := &zl.l + if zl.nextL { + zl.nextL = false + return *l, true + } + if l.err { + // Parsing errors should be sticky. + return lex{value: zEOF}, false + } + + var ( + str [maxTok]byte // Hold string text + com [maxTok]byte // Hold comment text + + stri int // Offset in str (0 means empty) + comi int // Offset in com (0 means empty) + + escape bool + ) + + if zl.com != "" { + comi = copy(com[:], zl.com) + zl.com = "" + } + + for x, ok := zl.readByte(); ok; x, ok = zl.readByte() { + l.line, l.column = zl.line, zl.column + l.comment = "" + + if stri >= len(str) { l.token = "token length insufficient for parsing" l.err = true - c <- l - return + return *l, true } - if comi >= maxTok { + if comi >= len(com) { l.token = "comment length insufficient for parsing" l.err = true - c <- l - return + return *l, true } switch x { case ' ', '\t': - if escape { + if escape || zl.quote { + // Inside quotes or escaped this is legal. + str[stri] = x + stri++ + escape = false - str[stri] = x - stri++ break } - if quote { - // Inside quotes this is legal - str[stri] = x - stri++ - break - } - if commt { + + if zl.commt { com[comi] = x comi++ break } + + var retL lex if stri == 0 { // Space directly in the beginning, handled in the grammar - } else if owner { + } else if zl.owner { // If we have a string and its the first, make it an owner l.value = zOwner l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri + // escape $... start with a \ not a $, so this will work - switch l.tokenUpper { + switch strings.ToUpper(l.token) { case "$TTL": l.value = zDirTTL case "$ORIGIN": @@ -554,258 +629,311 @@ func zlexer(s *scan, c chan lex) { case "$GENERATE": l.value = zDirGenerate } - c <- l + + retL = *l } else { l.value = zString l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri - if !rrtype { - if t, ok := StringToType[l.tokenUpper]; ok { + + if !zl.rrtype { + tokenUpper := strings.ToUpper(l.token) + if t, ok := StringToType[tokenUpper]; ok { l.value = zRrtpe l.torc = t - rrtype = true - } else { - if strings.HasPrefix(l.tokenUpper, "TYPE") { - t, ok := typeToInt(l.token) - if !ok { - l.token = "unknown RR type" - l.err = true - c <- l - return - } - l.value = zRrtpe - rrtype = true - l.torc = t + + zl.rrtype = true + } else if strings.HasPrefix(tokenUpper, "TYPE") { + t, ok := typeToInt(l.token) + if !ok { + l.token = "unknown RR type" + l.err = true + return *l, true } + + l.value = zRrtpe + l.torc = t + + zl.rrtype = true } - if t, ok := StringToClass[l.tokenUpper]; ok { + + if t, ok := StringToClass[tokenUpper]; ok { l.value = zClass l.torc = t - } else { - if strings.HasPrefix(l.tokenUpper, "CLASS") { - t, ok := classToInt(l.token) - if !ok { - l.token = "unknown class" - l.err = true - c <- l - return - } - l.value = zClass - l.torc = t + } else if strings.HasPrefix(tokenUpper, "CLASS") { + t, ok := classToInt(l.token) + if !ok { + l.token = "unknown class" + l.err = true + return *l, true } + + l.value = zClass + l.torc = t } } - c <- l - } - stri = 0 - if !space && !commt { + retL = *l + } + + zl.owner = false + + if !zl.space { + zl.space = true + l.value = zBlank l.token = " " - l.length = 1 - c <- l + + if retL == (lex{}) { + return *l, true + } + + zl.nextL = true + } + + if retL != (lex{}) { + return retL, true } - owner = false - space = true case ';': - if escape { + if escape || zl.quote { + // Inside quotes or escaped this is legal. + str[stri] = x + stri++ + escape = false - str[stri] = x - stri++ break } - if quote { - // Inside quotes this is legal - str[stri] = x - stri++ - break + + zl.commt = true + zl.com = "" + + if comi > 1 { + // A newline was previously seen inside a comment that + // was inside braces and we delayed adding it until now. + com[comi] = ' ' // convert newline to space + comi++ } - if stri > 0 { - l.value = zString - l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri - c <- l - stri = 0 - } - commt = true + com[comi] = ';' comi++ + + if stri > 0 { + zl.com = string(com[:comi]) + + l.value = zString + l.token = string(str[:stri]) + return *l, true + } case '\r': escape = false - if quote { + + if zl.quote { str[stri] = x stri++ } + // discard if outside of quotes case '\n': escape = false + // Escaped newline - if quote { + if zl.quote { str[stri] = x stri++ break } - // inside quotes this is legal - if commt { + + if zl.commt { // Reset a comment - commt = false - rrtype = false - stri = 0 + zl.commt = false + zl.rrtype = false + // If not in a brace this ends the comment AND the RR - if brace == 0 { - owner = true - owner = true + if zl.brace == 0 { + zl.owner = true + l.value = zNewline l.token = "\n" - l.tokenUpper = l.token - l.length = 1 l.comment = string(com[:comi]) - c <- l - l.comment = "" - comi = 0 - break + return *l, true } - com[comi] = ' ' // convert newline to space - comi++ + + zl.com = string(com[:comi]) break } - if brace == 0 { + if zl.brace == 0 { // If there is previous text, we should output it here + var retL lex if stri != 0 { l.value = zString l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri - if !rrtype { - if t, ok := StringToType[l.tokenUpper]; ok { + if !zl.rrtype { + tokenUpper := strings.ToUpper(l.token) + if t, ok := StringToType[tokenUpper]; ok { + zl.rrtype = true + l.value = zRrtpe l.torc = t - rrtype = true } } - c <- l + + retL = *l } + l.value = zNewline l.token = "\n" - l.tokenUpper = l.token - l.length = 1 - c <- l - stri = 0 - commt = false - rrtype = false - owner = true - comi = 0 + l.comment = zl.com + + zl.com = "" + zl.rrtype = false + zl.owner = true + + if retL != (lex{}) { + zl.nextL = true + return retL, true + } + + return *l, true } case '\\': // comments do not get escaped chars, everything is copied - if commt { + if zl.commt { com[comi] = x comi++ break } + // something already escaped must be in string if escape { str[stri] = x stri++ + escape = false break } + // something escaped outside of string gets added to string str[stri] = x stri++ + escape = true case '"': - if commt { + if zl.commt { com[comi] = x comi++ break } + if escape { str[stri] = x stri++ + escape = false break } - space = false + + zl.space = false + // send previous gathered text and the quote + var retL lex if stri != 0 { l.value = zString l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri - c <- l - stri = 0 + retL = *l } // send quote itself as separate token l.value = zQuote l.token = "\"" - l.tokenUpper = l.token - l.length = 1 - c <- l - quote = !quote + + zl.quote = !zl.quote + + if retL != (lex{}) { + zl.nextL = true + return retL, true + } + + return *l, true case '(', ')': - if commt { + if zl.commt { com[comi] = x comi++ break } - if escape { + + if escape || zl.quote { + // Inside quotes or escaped this is legal. str[stri] = x stri++ + escape = false break } - if quote { - str[stri] = x - stri++ - break - } + switch x { case ')': - brace-- - if brace < 0 { + zl.brace-- + + if zl.brace < 0 { l.token = "extra closing brace" - l.tokenUpper = l.token l.err = true - c <- l - return + return *l, true } case '(': - brace++ + zl.brace++ } default: escape = false - if commt { + + if zl.commt { com[comi] = x comi++ break } + str[stri] = x stri++ - space = false + + zl.space = false } - x, err = s.tokenText() } + + var retL lex if stri > 0 { - // Send remainder - l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri + // Send remainder of str l.value = zString - c <- l + l.token = string(str[:stri]) + retL = *l + + if comi <= 0 { + return retL, true + } } - if brace != 0 { + + if comi > 0 { + // Send remainder of com + l.value = zNewline + l.token = "\n" + l.comment = string(com[:comi]) + + if retL != (lex{}) { + zl.nextL = true + return retL, true + } + + return *l, true + } + + if zl.brace != 0 { + l.comment = "" // in case there was left over string and comment l.token = "unbalanced brace" - l.tokenUpper = l.token l.err = true - c <- l + return *l, true } + + return lex{value: zEOF}, false } // Extract the class number from CLASSxx @@ -966,12 +1094,12 @@ func locCheckEast(token string, longitude uint32) (uint32, bool) { } // "Eat" the rest of the "line". Return potential comments -func slurpRemainder(c chan lex, f string) (*ParseError, string) { - l := <-c +func slurpRemainder(c *zlexer, f string) (*ParseError, string) { + l, _ := c.Next() com := "" switch l.value { case zBlank: - l = <-c + l, _ = c.Next() com = l.comment if l.value != zNewline && l.value != zEOF { return &ParseError{f, "garbage after rdata", l}, "" diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index 67f884b0d..935d22c3f 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -11,7 +11,7 @@ type parserFunc struct { // Func defines the function that parses the tokens and returns the RR // or an error. The last string contains any comments in the line as // they returned by the lexer as well. - Func func(h RR_Header, c chan lex, origin string, file string) (RR, *ParseError, string) + Func func(h RR_Header, c *zlexer, origin string, file string) (RR, *ParseError, string) // Signals if the RR ending is of variable length, like TXT or records // that have Hexadecimal or Base64 as their last element in the Rdata. Records // that have a fixed ending or for instance A, AAAA, SOA and etc. @@ -23,7 +23,7 @@ type parserFunc struct { // After the rdata there may come a zBlank and then a zNewline // or immediately a zNewline. If this is not the case we flag // an *ParseError: garbage after rdata. -func setRR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setRR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { parserfunc, ok := typeToparserFunc[h.Rrtype] if ok { r, e, cm := parserfunc.Func(h, c, o, f) @@ -45,9 +45,9 @@ func setRR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { // A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) // or an error -func endingToString(c chan lex, errstr, f string) (string, *ParseError, string) { +func endingToString(c *zlexer, errstr, f string) (string, *ParseError, string) { s := "" - l := <-c // zString + l, _ := c.Next() // zString for l.value != zNewline && l.value != zEOF { if l.err { return s, &ParseError{f, errstr, l}, "" @@ -59,16 +59,16 @@ func endingToString(c chan lex, errstr, f string) (string, *ParseError, string) default: return "", &ParseError{f, errstr, l}, "" } - l = <-c + l, _ = c.Next() } return s, nil, l.comment } // A remainder of the rdata with embedded spaces, split on unquoted whitespace // and return the parsed string slice or an error -func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, string) { +func endingToTxtSlice(c *zlexer, errstr, f string) ([]string, *ParseError, string) { // Get the remaining data until we see a zNewline - l := <-c + l, _ := c.Next() if l.err { return nil, &ParseError{f, errstr, l}, "" } @@ -117,7 +117,7 @@ func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, stri default: return nil, &ParseError{f, errstr, l}, "" } - l = <-c + l, _ = c.Next() } if quote { return nil, &ParseError{f, errstr, l}, "" @@ -125,12 +125,12 @@ func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, stri return s, nil, l.comment } -func setA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(A) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -141,12 +141,12 @@ func setA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setAAAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setAAAA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(AAAA) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -157,13 +157,13 @@ func setAAAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setNS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setNS(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(NS) rr.Hdr = h - l := <-c + l, _ := c.Next() rr.Ns = l.token - if l.length == 0 { // dynamic update rr. + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -175,13 +175,13 @@ func setNS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setPTR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(PTR) rr.Hdr = h - l := <-c + l, _ := c.Next() rr.Ptr = l.token - if l.length == 0 { // dynamic update rr. + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -193,13 +193,13 @@ func setPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setNSAPPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setNSAPPTR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(NSAPPTR) rr.Hdr = h - l := <-c + l, _ := c.Next() rr.Ptr = l.token - if l.length == 0 { // dynamic update rr. + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -211,13 +211,13 @@ func setNSAPPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) return rr, nil, "" } -func setRP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setRP(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(RP) rr.Hdr = h - l := <-c + l, _ := c.Next() rr.Mbox = l.token - if l.length == 0 { // dynamic update rr. + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -227,8 +227,8 @@ func setRP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.Mbox = mbox - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() rr.Txt = l.token txt, txtOk := toAbsoluteName(l.token, o) @@ -240,13 +240,13 @@ func setRP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setMR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setMR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(MR) rr.Hdr = h - l := <-c + l, _ := c.Next() rr.Mr = l.token - if l.length == 0 { // dynamic update rr. + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -258,13 +258,13 @@ func setMR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setMB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setMB(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(MB) rr.Hdr = h - l := <-c + l, _ := c.Next() rr.Mb = l.token - if l.length == 0 { // dynamic update rr. + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -276,13 +276,13 @@ func setMB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setMG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setMG(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(MG) rr.Hdr = h - l := <-c + l, _ := c.Next() rr.Mg = l.token - if l.length == 0 { // dynamic update rr. + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -294,7 +294,7 @@ func setMG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setHINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setHINFO(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(HINFO) rr.Hdr = h @@ -320,13 +320,13 @@ func setHINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setMINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setMINFO(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(MINFO) rr.Hdr = h - l := <-c + l, _ := c.Next() rr.Rmail = l.token - if l.length == 0 { // dynamic update rr. + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -336,8 +336,8 @@ func setMINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.Rmail = rmail - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() rr.Email = l.token email, emailOk := toAbsoluteName(l.token, o) @@ -349,13 +349,13 @@ func setMINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setMF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setMF(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(MF) rr.Hdr = h - l := <-c + l, _ := c.Next() rr.Mf = l.token - if l.length == 0 { // dynamic update rr. + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -367,13 +367,13 @@ func setMF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setMD(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setMD(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(MD) rr.Hdr = h - l := <-c + l, _ := c.Next() rr.Md = l.token - if l.length == 0 { // dynamic update rr. + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -385,12 +385,12 @@ func setMD(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setMX(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(MX) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -400,8 +400,8 @@ func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString rr.Mx = l.token name, nameOk := toAbsoluteName(l.token, o) @@ -413,12 +413,12 @@ func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setRT(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(RT) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -428,8 +428,8 @@ func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString rr.Host = l.token name, nameOk := toAbsoluteName(l.token, o) @@ -441,12 +441,12 @@ func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setAFSDB(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(AFSDB) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -456,8 +456,8 @@ func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.Subtype = uint16(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString rr.Hostname = l.token name, nameOk := toAbsoluteName(l.token, o) @@ -468,12 +468,12 @@ func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setX25(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setX25(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(X25) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -484,12 +484,12 @@ func setX25(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setKX(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(KX) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -499,8 +499,8 @@ func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString rr.Exchanger = l.token name, nameOk := toAbsoluteName(l.token, o) @@ -511,13 +511,13 @@ func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setCNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setCNAME(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(CNAME) rr.Hdr = h - l := <-c + l, _ := c.Next() rr.Target = l.token - if l.length == 0 { // dynamic update rr. + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -529,13 +529,13 @@ func setCNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setDNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setDNAME(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(DNAME) rr.Hdr = h - l := <-c + l, _ := c.Next() rr.Target = l.token - if l.length == 0 { // dynamic update rr. + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -547,13 +547,13 @@ func setDNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setSOA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(SOA) rr.Hdr = h - l := <-c + l, _ := c.Next() rr.Ns = l.token - if l.length == 0 { // dynamic update rr. + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -563,8 +563,8 @@ func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.Ns = ns - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() rr.Mbox = l.token mbox, mboxOk := toAbsoluteName(l.token, o) @@ -573,14 +573,14 @@ func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.Mbox = mbox - <-c // zBlank + c.Next() // zBlank var ( v uint32 ok bool ) for i := 0; i < 5; i++ { - l = <-c + l, _ = c.Next() if l.err { return nil, &ParseError{f, "bad SOA zone parameter", l}, "" } @@ -600,16 +600,16 @@ func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { switch i { case 0: rr.Serial = v - <-c // zBlank + c.Next() // zBlank case 1: rr.Refresh = v - <-c // zBlank + c.Next() // zBlank case 2: rr.Retry = v - <-c // zBlank + c.Next() // zBlank case 3: rr.Expire = v - <-c // zBlank + c.Next() // zBlank case 4: rr.Minttl = v } @@ -617,12 +617,12 @@ func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setSRV(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(SRV) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -632,24 +632,24 @@ func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.Priority = uint16(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString i, e = strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { return nil, &ParseError{f, "bad SRV Weight", l}, "" } rr.Weight = uint16(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString i, e = strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { return nil, &ParseError{f, "bad SRV Port", l}, "" } rr.Port = uint16(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString rr.Target = l.token name, nameOk := toAbsoluteName(l.token, o) @@ -660,12 +660,12 @@ func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setNAPTR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(NAPTR) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -675,8 +675,8 @@ func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.Order = uint16(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString i, e = strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { return nil, &ParseError{f, "bad NAPTR Preference", l}, "" @@ -684,15 +684,15 @@ func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Preference = uint16(i) // Flags - <-c // zBlank - l = <-c // _QUOTE + c.Next() // zBlank + l, _ = c.Next() // _QUOTE if l.value != zQuote { return nil, &ParseError{f, "bad NAPTR Flags", l}, "" } - l = <-c // Either String or Quote + l, _ = c.Next() // Either String or Quote if l.value == zString { rr.Flags = l.token - l = <-c // _QUOTE + l, _ = c.Next() // _QUOTE if l.value != zQuote { return nil, &ParseError{f, "bad NAPTR Flags", l}, "" } @@ -703,15 +703,15 @@ func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } // Service - <-c // zBlank - l = <-c // _QUOTE + c.Next() // zBlank + l, _ = c.Next() // _QUOTE if l.value != zQuote { return nil, &ParseError{f, "bad NAPTR Service", l}, "" } - l = <-c // Either String or Quote + l, _ = c.Next() // Either String or Quote if l.value == zString { rr.Service = l.token - l = <-c // _QUOTE + l, _ = c.Next() // _QUOTE if l.value != zQuote { return nil, &ParseError{f, "bad NAPTR Service", l}, "" } @@ -722,15 +722,15 @@ func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } // Regexp - <-c // zBlank - l = <-c // _QUOTE + c.Next() // zBlank + l, _ = c.Next() // _QUOTE if l.value != zQuote { return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" } - l = <-c // Either String or Quote + l, _ = c.Next() // Either String or Quote if l.value == zString { rr.Regexp = l.token - l = <-c // _QUOTE + l, _ = c.Next() // _QUOTE if l.value != zQuote { return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" } @@ -741,8 +741,8 @@ func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } // After quote no space?? - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString rr.Replacement = l.token name, nameOk := toAbsoluteName(l.token, o) @@ -753,13 +753,13 @@ func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setTALINK(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setTALINK(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(TALINK) rr.Hdr = h - l := <-c + l, _ := c.Next() rr.PreviousName = l.token - if l.length == 0 { // dynamic update rr. + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -769,8 +769,8 @@ func setTALINK(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.PreviousName = previousName - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() rr.NextName = l.token nextName, nextNameOk := toAbsoluteName(l.token, o) @@ -782,7 +782,7 @@ func setTALINK(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setLOC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(LOC) rr.Hdr = h // Non zero defaults for LOC record, see RFC 1876, Section 3. @@ -792,8 +792,8 @@ func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { ok := false // North - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } i, e := strconv.ParseUint(l.token, 10, 32) @@ -802,9 +802,9 @@ func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.Latitude = 1000 * 60 * 60 * uint32(i) - <-c // zBlank + c.Next() // zBlank // Either number, 'N' or 'S' - l = <-c + l, _ = c.Next() if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { goto East } @@ -814,16 +814,16 @@ func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.Latitude += 1000 * 60 * uint32(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { return nil, &ParseError{f, "bad LOC Latitude seconds", l}, "" } else { rr.Latitude += uint32(1000 * i) } - <-c // zBlank + c.Next() // zBlank // Either number, 'N' or 'S' - l = <-c + l, _ = c.Next() if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { goto East } @@ -832,16 +832,16 @@ func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { East: // East - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { return nil, &ParseError{f, "bad LOC Longitude", l}, "" } else { rr.Longitude = 1000 * 60 * 60 * uint32(i) } - <-c // zBlank + c.Next() // zBlank // Either number, 'E' or 'W' - l = <-c + l, _ = c.Next() if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { goto Altitude } @@ -850,16 +850,16 @@ East: } else { rr.Longitude += 1000 * 60 * uint32(i) } - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { return nil, &ParseError{f, "bad LOC Longitude seconds", l}, "" } else { rr.Longitude += uint32(1000 * i) } - <-c // zBlank + c.Next() // zBlank // Either number, 'E' or 'W' - l = <-c + l, _ = c.Next() if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { goto Altitude } @@ -867,9 +867,9 @@ East: return nil, &ParseError{f, "bad LOC Longitude East/West", l}, "" Altitude: - <-c // zBlank - l = <-c - if l.length == 0 || l.err { + c.Next() // zBlank + l, _ = c.Next() + if len(l.token) == 0 || l.err { return nil, &ParseError{f, "bad LOC Altitude", l}, "" } if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { @@ -882,7 +882,7 @@ Altitude: } // And now optionally the other values - l = <-c + l, _ = c.Next() count := 0 for l.value != zNewline && l.value != zEOF { switch l.value { @@ -913,18 +913,18 @@ Altitude: default: return nil, &ParseError{f, "bad LOC Size, HorizPre or VertPre", l}, "" } - l = <-c + l, _ = c.Next() } return rr, nil, "" } -func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setHIP(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(HIP) rr.Hdr = h // HitLength is not represented - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, l.comment } @@ -934,24 +934,24 @@ func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.PublicKeyAlgorithm = uint8(i) - <-c // zBlank - l = <-c // zString - if l.length == 0 || l.err { + c.Next() // zBlank + l, _ = c.Next() // zString + if len(l.token) == 0 || l.err { return nil, &ParseError{f, "bad HIP Hit", l}, "" } rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6. rr.HitLength = uint8(len(rr.Hit)) / 2 - <-c // zBlank - l = <-c // zString - if l.length == 0 || l.err { + c.Next() // zBlank + l, _ = c.Next() // zString + if len(l.token) == 0 || l.err { return nil, &ParseError{f, "bad HIP PublicKey", l}, "" } rr.PublicKey = l.token // This cannot contain spaces rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey))) // RendezvousServers (if any) - l = <-c + l, _ = c.Next() var xs []string for l.value != zNewline && l.value != zEOF { switch l.value { @@ -966,18 +966,18 @@ func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { default: return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" } - l = <-c + l, _ = c.Next() } rr.RendezvousServers = xs return rr, nil, l.comment } -func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setCERT(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(CERT) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, l.comment } @@ -988,15 +988,15 @@ func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } else { rr.Type = uint16(i) } - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { return nil, &ParseError{f, "bad CERT KeyTag", l}, "" } rr.KeyTag = uint16(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString if v, ok := StringToAlgorithm[l.token]; ok { rr.Algorithm = v } else if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { @@ -1012,7 +1012,7 @@ func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setOPENPGPKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setOPENPGPKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(OPENPGPKEY) rr.Hdr = h @@ -1024,12 +1024,12 @@ func setOPENPGPKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, strin return rr, nil, c1 } -func setCSYNC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setCSYNC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(CSYNC) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, l.comment } j, e := strconv.ParseUint(l.token, 10, 32) @@ -1039,9 +1039,9 @@ func setCSYNC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.Serial = uint32(j) - <-c // zBlank + c.Next() // zBlank - l = <-c + l, _ = c.Next() j, e = strconv.ParseUint(l.token, 10, 16) if e != nil { // Serial must be a number @@ -1054,14 +1054,15 @@ func setCSYNC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { k uint16 ok bool ) - l = <-c + l, _ = c.Next() for l.value != zNewline && l.value != zEOF { switch l.value { case zBlank: // Ok case zString: - if k, ok = StringToType[l.tokenUpper]; !ok { - if k, ok = typeToInt(l.tokenUpper); !ok { + tokenUpper := strings.ToUpper(l.token) + if k, ok = StringToType[tokenUpper]; !ok { + if k, ok = typeToInt(l.token); !ok { return nil, &ParseError{f, "bad CSYNC TypeBitMap", l}, "" } } @@ -1069,12 +1070,12 @@ func setCSYNC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { default: return nil, &ParseError{f, "bad CSYNC TypeBitMap", l}, "" } - l = <-c + l, _ = c.Next() } return rr, nil, l.comment } -func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setSIG(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { r, e, s := setRRSIG(h, c, o, f) if r != nil { return &SIG{*r.(*RRSIG)}, e, s @@ -1082,18 +1083,19 @@ func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return nil, e, s } -func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setRRSIG(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(RRSIG) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, l.comment } - if t, ok := StringToType[l.tokenUpper]; !ok { - if strings.HasPrefix(l.tokenUpper, "TYPE") { - t, ok = typeToInt(l.tokenUpper) + tokenUpper := strings.ToUpper(l.token) + if t, ok := StringToType[tokenUpper]; !ok { + if strings.HasPrefix(tokenUpper, "TYPE") { + t, ok = typeToInt(l.token) if !ok { return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" } @@ -1105,32 +1107,32 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.TypeCovered = t } - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() i, err := strconv.ParseUint(l.token, 10, 8) if err != nil || l.err { return nil, &ParseError{f, "bad RRSIG Algorithm", l}, "" } rr.Algorithm = uint8(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() i, err = strconv.ParseUint(l.token, 10, 8) if err != nil || l.err { return nil, &ParseError{f, "bad RRSIG Labels", l}, "" } rr.Labels = uint8(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() i, err = strconv.ParseUint(l.token, 10, 32) if err != nil || l.err { return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, "" } rr.OrigTtl = uint32(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() if i, err := StringToTime(l.token); err != nil { // Try to see if all numeric and use it as epoch if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { @@ -1143,8 +1145,8 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Expiration = i } - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() if i, err := StringToTime(l.token); err != nil { if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { rr.Inception = uint32(i) @@ -1155,16 +1157,16 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Inception = i } - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() i, err = strconv.ParseUint(l.token, 10, 16) if err != nil || l.err { return nil, &ParseError{f, "bad RRSIG KeyTag", l}, "" } rr.KeyTag = uint16(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() rr.SignerName = l.token name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { @@ -1181,13 +1183,13 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setNSEC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setNSEC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(NSEC) rr.Hdr = h - l := <-c + l, _ := c.Next() rr.NextDomain = l.token - if l.length == 0 { // dynamic update rr. + if len(l.token) == 0 { // dynamic update rr. return rr, nil, l.comment } @@ -1202,14 +1204,15 @@ func setNSEC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { k uint16 ok bool ) - l = <-c + l, _ = c.Next() for l.value != zNewline && l.value != zEOF { switch l.value { case zBlank: // Ok case zString: - if k, ok = StringToType[l.tokenUpper]; !ok { - if k, ok = typeToInt(l.tokenUpper); !ok { + tokenUpper := strings.ToUpper(l.token) + if k, ok = StringToType[tokenUpper]; !ok { + if k, ok = typeToInt(l.token); !ok { return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" } } @@ -1217,17 +1220,17 @@ func setNSEC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { default: return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" } - l = <-c + l, _ = c.Next() } return rr, nil, l.comment } -func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setNSEC3(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(NSEC3) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, l.comment } @@ -1236,22 +1239,22 @@ func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return nil, &ParseError{f, "bad NSEC3 Hash", l}, "" } rr.Hash = uint8(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { return nil, &ParseError{f, "bad NSEC3 Flags", l}, "" } rr.Flags = uint8(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { return nil, &ParseError{f, "bad NSEC3 Iterations", l}, "" } rr.Iterations = uint16(i) - <-c - l = <-c + c.Next() + l, _ = c.Next() if len(l.token) == 0 || l.err { return nil, &ParseError{f, "bad NSEC3 Salt", l}, "" } @@ -1260,8 +1263,8 @@ func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { rr.Salt = l.token } - <-c - l = <-c + c.Next() + l, _ = c.Next() if len(l.token) == 0 || l.err { return nil, &ParseError{f, "bad NSEC3 NextDomain", l}, "" } @@ -1273,14 +1276,15 @@ func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { k uint16 ok bool ) - l = <-c + l, _ = c.Next() for l.value != zNewline && l.value != zEOF { switch l.value { case zBlank: // Ok case zString: - if k, ok = StringToType[l.tokenUpper]; !ok { - if k, ok = typeToInt(l.tokenUpper); !ok { + tokenUpper := strings.ToUpper(l.token) + if k, ok = StringToType[tokenUpper]; !ok { + if k, ok = typeToInt(l.token); !ok { return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" } } @@ -1288,17 +1292,17 @@ func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { default: return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" } - l = <-c + l, _ = c.Next() } return rr, nil, l.comment } -func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setNSEC3PARAM(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(NSEC3PARAM) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -1307,22 +1311,22 @@ func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, strin return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, "" } rr.Hash = uint8(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, "" } rr.Flags = uint8(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, "" } rr.Iterations = uint16(i) - <-c - l = <-c + c.Next() + l, _ = c.Next() if l.token != "-" { rr.SaltLength = uint8(len(l.token)) rr.Salt = l.token @@ -1330,16 +1334,16 @@ func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, strin return rr, nil, "" } -func setEUI48(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setEUI48(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(EUI48) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } - if l.length != 17 || l.err { + if len(l.token) != 17 || l.err { return nil, &ParseError{f, "bad EUI48 Address", l}, "" } addr := make([]byte, 12) @@ -1363,16 +1367,16 @@ func setEUI48(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setEUI64(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(EUI64) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } - if l.length != 23 || l.err { + if len(l.token) != 23 || l.err { return nil, &ParseError{f, "bad EUI64 Address", l}, "" } addr := make([]byte, 16) @@ -1396,12 +1400,12 @@ func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setSSHFP(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(SSHFP) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -1410,14 +1414,14 @@ func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return nil, &ParseError{f, "bad SSHFP Algorithm", l}, "" } rr.Algorithm = uint8(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { return nil, &ParseError{f, "bad SSHFP Type", l}, "" } rr.Type = uint8(i) - <-c // zBlank + c.Next() // zBlank s, e1, c1 := endingToString(c, "bad SSHFP Fingerprint", f) if e1 != nil { return nil, e1, c1 @@ -1426,12 +1430,12 @@ func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { +func setDNSKEYs(h RR_Header, c *zlexer, o, f, typ string) (RR, *ParseError, string) { rr := new(DNSKEY) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, l.comment } @@ -1440,15 +1444,15 @@ func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, str return nil, &ParseError{f, "bad " + typ + " Flags", l}, "" } rr.Flags = uint16(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { return nil, &ParseError{f, "bad " + typ + " Protocol", l}, "" } rr.Protocol = uint8(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" @@ -1462,7 +1466,7 @@ func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, str return rr, nil, c1 } -func setKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { r, e, s := setDNSKEYs(h, c, o, f, "KEY") if r != nil { return &KEY{*r.(*DNSKEY)}, e, s @@ -1470,12 +1474,12 @@ func setKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return nil, e, s } -func setDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setDNSKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { r, e, s := setDNSKEYs(h, c, o, f, "DNSKEY") return r, e, s } -func setCDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setCDNSKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { r, e, s := setDNSKEYs(h, c, o, f, "CDNSKEY") if r != nil { return &CDNSKEY{*r.(*DNSKEY)}, e, s @@ -1483,12 +1487,12 @@ func setCDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) return nil, e, s } -func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setRKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(RKEY) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, l.comment } @@ -1497,15 +1501,15 @@ func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return nil, &ParseError{f, "bad RKEY Flags", l}, "" } rr.Flags = uint16(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { return nil, &ParseError{f, "bad RKEY Protocol", l}, "" } rr.Protocol = uint8(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { return nil, &ParseError{f, "bad RKEY Algorithm", l}, "" @@ -1519,7 +1523,7 @@ func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setEID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setEID(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(EID) rr.Hdr = h s, e, c1 := endingToString(c, "bad EID Endpoint", f) @@ -1530,7 +1534,7 @@ func setEID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setNIMLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setNIMLOC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(NIMLOC) rr.Hdr = h s, e, c1 := endingToString(c, "bad NIMLOC Locator", f) @@ -1541,12 +1545,12 @@ func setNIMLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setGPOS(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(GPOS) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -1555,15 +1559,15 @@ func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return nil, &ParseError{f, "bad GPOS Longitude", l}, "" } rr.Longitude = l.token - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() _, e = strconv.ParseFloat(l.token, 64) if e != nil || l.err { return nil, &ParseError{f, "bad GPOS Latitude", l}, "" } rr.Latitude = l.token - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() _, e = strconv.ParseFloat(l.token, 64) if e != nil || l.err { return nil, &ParseError{f, "bad GPOS Altitude", l}, "" @@ -1572,12 +1576,12 @@ func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { +func setDSs(h RR_Header, c *zlexer, o, f, typ string) (RR, *ParseError, string) { rr := new(DS) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, l.comment } @@ -1586,10 +1590,11 @@ func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, "" } rr.KeyTag = uint16(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() if i, e = strconv.ParseUint(l.token, 10, 8); e != nil { - i, ok := StringToAlgorithm[l.tokenUpper] + tokenUpper := strings.ToUpper(l.token) + i, ok := StringToAlgorithm[tokenUpper] if !ok || l.err { return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" } @@ -1597,8 +1602,8 @@ func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) } else { rr.Algorithm = uint8(i) } - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { return nil, &ParseError{f, "bad " + typ + " DigestType", l}, "" @@ -1612,12 +1617,12 @@ func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) return rr, nil, c1 } -func setDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setDS(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { r, e, s := setDSs(h, c, o, f, "DS") return r, e, s } -func setDLV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setDLV(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { r, e, s := setDSs(h, c, o, f, "DLV") if r != nil { return &DLV{*r.(*DS)}, e, s @@ -1625,7 +1630,7 @@ func setDLV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return nil, e, s } -func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setCDS(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { r, e, s := setDSs(h, c, o, f, "CDS") if r != nil { return &CDS{*r.(*DS)}, e, s @@ -1633,12 +1638,12 @@ func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return nil, e, s } -func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setTA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(TA) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, l.comment } @@ -1647,10 +1652,11 @@ func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return nil, &ParseError{f, "bad TA KeyTag", l}, "" } rr.KeyTag = uint16(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { - i, ok := StringToAlgorithm[l.tokenUpper] + tokenUpper := strings.ToUpper(l.token) + i, ok := StringToAlgorithm[tokenUpper] if !ok || l.err { return nil, &ParseError{f, "bad TA Algorithm", l}, "" } @@ -1658,8 +1664,8 @@ func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } else { rr.Algorithm = uint8(i) } - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { return nil, &ParseError{f, "bad TA DigestType", l}, "" @@ -1673,12 +1679,12 @@ func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setTLSA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(TLSA) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, l.comment } @@ -1687,15 +1693,15 @@ func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return nil, &ParseError{f, "bad TLSA Usage", l}, "" } rr.Usage = uint8(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { return nil, &ParseError{f, "bad TLSA Selector", l}, "" } rr.Selector = uint8(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { return nil, &ParseError{f, "bad TLSA MatchingType", l}, "" @@ -1710,12 +1716,12 @@ func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setSMIMEA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setSMIMEA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(SMIMEA) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, l.comment } @@ -1724,15 +1730,15 @@ func setSMIMEA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return nil, &ParseError{f, "bad SMIMEA Usage", l}, "" } rr.Usage = uint8(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { return nil, &ParseError{f, "bad SMIMEA Selector", l}, "" } rr.Selector = uint8(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { return nil, &ParseError{f, "bad SMIMEA MatchingType", l}, "" @@ -1747,17 +1753,17 @@ func setSMIMEA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setRFC3597(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(RFC3597) rr.Hdr = h - l := <-c + l, _ := c.Next() if l.token != "\\#" { return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" } - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() rdlength, e := strconv.Atoi(l.token) if e != nil || l.err { return nil, &ParseError{f, "bad RFC3597 Rdata ", l}, "" @@ -1774,7 +1780,7 @@ func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) return rr, nil, c1 } -func setSPF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setSPF(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(SPF) rr.Hdr = h @@ -1786,7 +1792,7 @@ func setSPF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setAVC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setAVC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(AVC) rr.Hdr = h @@ -1798,7 +1804,7 @@ func setAVC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setTXT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setTXT(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(TXT) rr.Hdr = h @@ -1812,7 +1818,7 @@ func setTXT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } // identical to setTXT -func setNINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setNINFO(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(NINFO) rr.Hdr = h @@ -1824,12 +1830,12 @@ func setNINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setURI(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(URI) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -1838,15 +1844,15 @@ func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return nil, &ParseError{f, "bad URI Priority", l}, "" } rr.Priority = uint16(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() i, e = strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { return nil, &ParseError{f, "bad URI Weight", l}, "" } rr.Weight = uint16(i) - <-c // zBlank + c.Next() // zBlank s, err, c1 := endingToTxtSlice(c, "bad URI Target", f) if err != nil { return nil, err, "" @@ -1858,7 +1864,7 @@ func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setDHCID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setDHCID(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { // awesome record to parse! rr := new(DHCID) rr.Hdr = h @@ -1871,12 +1877,12 @@ func setDHCID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setNID(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(NID) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -1885,8 +1891,8 @@ func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return nil, &ParseError{f, "bad NID Preference", l}, "" } rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString u, err := stringToNodeID(l) if err != nil || l.err { return nil, err, "" @@ -1895,12 +1901,12 @@ func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setL32(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(L32) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -1909,8 +1915,8 @@ func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return nil, &ParseError{f, "bad L32 Preference", l}, "" } rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString rr.Locator32 = net.ParseIP(l.token) if rr.Locator32 == nil || l.err { return nil, &ParseError{f, "bad L32 Locator", l}, "" @@ -1918,12 +1924,12 @@ func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setLP(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(LP) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -1933,8 +1939,8 @@ func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString rr.Fqdn = l.token name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { @@ -1945,12 +1951,12 @@ func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setL64(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(L64) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -1959,8 +1965,8 @@ func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return nil, &ParseError{f, "bad L64 Preference", l}, "" } rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString u, err := stringToNodeID(l) if err != nil || l.err { return nil, err, "" @@ -1969,12 +1975,12 @@ func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setUID(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(UID) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -1986,12 +1992,12 @@ func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setGID(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(GID) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -2003,7 +2009,7 @@ func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setUINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setUINFO(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(UINFO) rr.Hdr = h @@ -2018,12 +2024,12 @@ func setUINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setPX(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(PX) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, "" } @@ -2033,8 +2039,8 @@ func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString rr.Map822 = l.token map822, map822Ok := toAbsoluteName(l.token, o) if l.err || !map822Ok { @@ -2042,8 +2048,8 @@ func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.Map822 = map822 - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString rr.Mapx400 = l.token mapx400, mapx400Ok := toAbsoluteName(l.token, o) if l.err || !mapx400Ok { @@ -2054,12 +2060,12 @@ func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, "" } -func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setCAA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(CAA) rr.Hdr = h - l := <-c - if l.length == 0 { // dynamic update rr. + l, _ := c.Next() + if len(l.token) == 0 { // dynamic update rr. return rr, nil, l.comment } @@ -2069,14 +2075,14 @@ func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { } rr.Flag = uint8(i) - <-c // zBlank - l = <-c // zString + c.Next() // zBlank + l, _ = c.Next() // zString if l.value != zString { return nil, &ParseError{f, "bad CAA Tag", l}, "" } rr.Tag = l.token - <-c // zBlank + c.Next() // zBlank s, e, c1 := endingToTxtSlice(c, "bad CAA Value", f) if e != nil { return nil, e, "" @@ -2088,43 +2094,43 @@ func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { return rr, nil, c1 } -func setTKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { +func setTKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { rr := new(TKEY) rr.Hdr = h - l := <-c + l, _ := c.Next() // Algorithm if l.value != zString { return nil, &ParseError{f, "bad TKEY algorithm", l}, "" } rr.Algorithm = l.token - <-c // zBlank + c.Next() // zBlank // Get the key length and key values - l = <-c + l, _ = c.Next() i, err := strconv.ParseUint(l.token, 10, 8) if err != nil || l.err { return nil, &ParseError{f, "bad TKEY key length", l}, "" } rr.KeySize = uint16(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() if l.value != zString { return nil, &ParseError{f, "bad TKEY key", l}, "" } rr.Key = l.token - <-c // zBlank + c.Next() // zBlank // Get the otherdata length and string data - l = <-c + l, _ = c.Next() i, err = strconv.ParseUint(l.token, 10, 8) if err != nil || l.err { return nil, &ParseError{f, "bad TKEY otherdata length", l}, "" } rr.OtherLen = uint16(i) - <-c // zBlank - l = <-c + c.Next() // zBlank + l, _ = c.Next() if l.value != zString { return nil, &ParseError{f, "bad TKEY otherday", l}, "" } diff --git a/vendor/github.com/miekg/dns/scanner.go b/vendor/github.com/miekg/dns/scanner.go deleted file mode 100644 index 5b124ec59..000000000 --- a/vendor/github.com/miekg/dns/scanner.go +++ /dev/null @@ -1,56 +0,0 @@ -package dns - -// Implement a simple scanner, return a byte stream from an io reader. - -import ( - "bufio" - "context" - "io" - "text/scanner" -) - -type scan struct { - src *bufio.Reader - position scanner.Position - eof bool // Have we just seen a eof - ctx context.Context -} - -func scanInit(r io.Reader) (*scan, context.CancelFunc) { - s := new(scan) - s.src = bufio.NewReader(r) - s.position.Line = 1 - - ctx, cancel := context.WithCancel(context.Background()) - s.ctx = ctx - - return s, cancel -} - -// tokenText returns the next byte from the input -func (s *scan) tokenText() (byte, error) { - c, err := s.src.ReadByte() - if err != nil { - return c, err - } - select { - case <-s.ctx.Done(): - return c, context.Canceled - default: - break - } - - // delay the newline handling until the next token is delivered, - // fixes off-by-one errors when reporting a parse error. - if s.eof { - s.position.Line++ - s.position.Column = 0 - s.eof = false - } - if c == '\n' { - s.eof = true - return c, nil - } - s.position.Column++ - return c, nil -} diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go index b231c3430..4b4ec33c8 100644 --- a/vendor/github.com/miekg/dns/server.go +++ b/vendor/github.com/miekg/dns/server.go @@ -416,14 +416,13 @@ func (srv *Server) Shutdown() error { // to terminate. func (srv *Server) ShutdownContext(ctx context.Context) error { srv.lock.Lock() - started := srv.started - srv.started = false - srv.lock.Unlock() - - if !started { + if !srv.started { + srv.lock.Unlock() return &Error{err: "server not started"} } + srv.started = false + if srv.PacketConn != nil { srv.PacketConn.SetReadDeadline(aLongTimeAgo) // Unblock reads } @@ -432,10 +431,10 @@ func (srv *Server) ShutdownContext(ctx context.Context) error { srv.Listener.Close() } - srv.lock.Lock() for rw := range srv.conns { rw.SetReadDeadline(aLongTimeAgo) // Unblock reads } + srv.lock.Unlock() if testShutdownNotify != nil { @@ -666,7 +665,16 @@ func (srv *Server) serveDNS(w *response) { } func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { - conn.SetReadDeadline(time.Now().Add(timeout)) + // If we race with ShutdownContext, the read deadline may + // have been set in the distant past to unblock the read + // below. We must not override it, otherwise we may block + // ShutdownContext. + srv.lock.RLock() + if srv.started { + conn.SetReadDeadline(time.Now().Add(timeout)) + } + srv.lock.RUnlock() + l := make([]byte, 2) n, err := conn.Read(l) if err != nil || n != 2 { @@ -701,7 +709,13 @@ func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) } func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { - conn.SetReadDeadline(time.Now().Add(timeout)) + srv.lock.RLock() + if srv.started { + // See the comment in readTCP above. + conn.SetReadDeadline(time.Now().Add(timeout)) + } + srv.lock.RUnlock() + m := srv.udpPool.Get().([]byte) n, s, err := ReadFromSessionUDP(conn, m) if err != nil { @@ -753,24 +767,33 @@ func (w *response) Write(m []byte) (int, error) { n, err := io.Copy(w.tcp, bytes.NewReader(m)) return int(n), err + default: + panic("dns: Write called after Close") } - panic("not reached") } // LocalAddr implements the ResponseWriter.LocalAddr method. func (w *response) LocalAddr() net.Addr { - if w.tcp != nil { + switch { + case w.udp != nil: + return w.udp.LocalAddr() + case w.tcp != nil: return w.tcp.LocalAddr() + default: + panic("dns: LocalAddr called after Close") } - return w.udp.LocalAddr() } // RemoteAddr implements the ResponseWriter.RemoteAddr method. func (w *response) RemoteAddr() net.Addr { - if w.tcp != nil { + switch { + case w.udpSession != nil: + return w.udpSession.RemoteAddr() + case w.tcp != nil: return w.tcp.RemoteAddr() + default: + panic("dns: RemoteAddr called after Close") } - return w.udpSession.RemoteAddr() } // TsigStatus implements the ResponseWriter.TsigStatus method. diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index a64f4d7d8..115f2c7bd 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -419,128 +419,130 @@ type TXT struct { func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } func sprintName(s string) string { - src := []byte(s) - dst := make([]byte, 0, len(src)) - for i := 0; i < len(src); { - if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { - dst = append(dst, src[i:i+2]...) + var dst strings.Builder + dst.Grow(len(s)) + for i := 0; i < len(s); { + if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' { + dst.WriteString(s[i : i+2]) i += 2 - } else { - b, n := nextByte(src, i) - if n == 0 { - i++ // dangling back slash - } else if b == '.' { - dst = append(dst, b) - } else { - dst = appendDomainNameByte(dst, b) - } - i += n + continue } + + b, n := nextByte(s, i) + switch { + case n == 0: + i++ // dangling back slash + case b == '.': + dst.WriteByte('.') + default: + writeDomainNameByte(&dst, b) + } + i += n } - return string(dst) + return dst.String() } func sprintTxtOctet(s string) string { - src := []byte(s) - dst := make([]byte, 0, len(src)) - dst = append(dst, '"') - for i := 0; i < len(src); { - if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { - dst = append(dst, src[i:i+2]...) + var dst strings.Builder + dst.Grow(2 + len(s)) + dst.WriteByte('"') + for i := 0; i < len(s); { + if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' { + dst.WriteString(s[i : i+2]) i += 2 - } else { - b, n := nextByte(src, i) - if n == 0 { - i++ // dangling back slash - } else if b == '.' { - dst = append(dst, b) - } else { - if b < ' ' || b > '~' { - dst = appendByte(dst, b) - } else { - dst = append(dst, b) - } - } - i += n + continue } + + b, n := nextByte(s, i) + switch { + case n == 0: + i++ // dangling back slash + case b == '.': + dst.WriteByte('.') + case b < ' ' || b > '~': + writeEscapedByte(&dst, b) + default: + dst.WriteByte(b) + } + i += n } - dst = append(dst, '"') - return string(dst) + dst.WriteByte('"') + return dst.String() } func sprintTxt(txt []string) string { - var out []byte + var out strings.Builder for i, s := range txt { + out.Grow(3 + len(s)) if i > 0 { - out = append(out, ` "`...) + out.WriteString(` "`) } else { - out = append(out, '"') + out.WriteByte('"') } - bs := []byte(s) - for j := 0; j < len(bs); { - b, n := nextByte(bs, j) + for j := 0; j < len(s); { + b, n := nextByte(s, j) if n == 0 { break } - out = appendTXTStringByte(out, b) + writeTXTStringByte(&out, b) j += n } - out = append(out, '"') + out.WriteByte('"') } - return string(out) + return out.String() } -func appendDomainNameByte(s []byte, b byte) []byte { +func writeDomainNameByte(s *strings.Builder, b byte) { switch b { case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape - return append(s, '\\', b) + s.WriteByte('\\') + s.WriteByte(b) + default: + writeTXTStringByte(s, b) } - return appendTXTStringByte(s, b) } -func appendTXTStringByte(s []byte, b byte) []byte { - switch b { - case '"', '\\': - return append(s, '\\', b) +func writeTXTStringByte(s *strings.Builder, b byte) { + switch { + case b == '"' || b == '\\': + s.WriteByte('\\') + s.WriteByte(b) + case b < ' ' || b > '~': + writeEscapedByte(s, b) + default: + s.WriteByte(b) } - if b < ' ' || b > '~' { - return appendByte(s, b) - } - return append(s, b) } -func appendByte(s []byte, b byte) []byte { +func writeEscapedByte(s *strings.Builder, b byte) { var buf [3]byte bufs := strconv.AppendInt(buf[:0], int64(b), 10) - s = append(s, '\\') - for i := 0; i < 3-len(bufs); i++ { - s = append(s, '0') + s.WriteByte('\\') + for i := len(bufs); i < 3; i++ { + s.WriteByte('0') } - for _, r := range bufs { - s = append(s, r) - } - return s + s.Write(bufs) } -func nextByte(b []byte, offset int) (byte, int) { - if offset >= len(b) { +func nextByte(s string, offset int) (byte, int) { + if offset >= len(s) { return 0, 0 } - if b[offset] != '\\' { + if s[offset] != '\\' { // not an escape sequence - return b[offset], 1 + return s[offset], 1 } - switch len(b) - offset { + switch len(s) - offset { case 1: // dangling escape return 0, 0 case 2, 3: // too short to be \ddd default: // maybe \ddd - if isDigit(b[offset+1]) && isDigit(b[offset+2]) && isDigit(b[offset+3]) { - return dddToByte(b[offset+1:]), 4 + if isDigit(s[offset+1]) && isDigit(s[offset+2]) && isDigit(s[offset+3]) { + return dddStringToByte(s[offset+1:]), 4 } } // not \ddd, just an RFC 1035 "quoted" character - return b[offset+1], 2 + return s[offset+1], 2 } // SPF RR. See RFC 4408, Section 3.1.1. diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index 7658518b6..403b9ef97 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = V{1, 0, 12} +var Version = V{1, 0, 13} // V holds the version of this library. type V struct { diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index e6476497e..3b3cb723f 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,8 +1,13 @@ +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + ## 1.1.1 * Fix panic that can happen in `decodePtr` -## 1.1.0 (September 30, 2018) +## 1.1.0 * Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] * Support struct to struct decoding [GH-137] diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 95eb68656..256ee63fb 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -316,7 +316,16 @@ func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) if val.IsValid() && val.Elem().IsValid() { return d.decode(name, data, val.Elem()) } - dataVal := reflect.Indirect(reflect.ValueOf(data)) + + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + if !dataVal.IsValid() { dataVal = reflect.Zero(val.Type()) } diff --git a/vendor/github.com/ncw/swift/swift.go b/vendor/github.com/ncw/swift/swift.go index a73f7471a..d98c77af7 100644 --- a/vendor/github.com/ncw/swift/swift.go +++ b/vendor/github.com/ncw/swift/swift.go @@ -1013,7 +1013,8 @@ type Object struct { Bytes int64 `json:"bytes"` // size in bytes ServerLastModified string `json:"last_modified"` // Last modified time, eg '2011-06-30T08:20:47.736680' as a string supplied by the server LastModified time.Time // Last modified time converted to a time.Time - Hash string `json:"hash"` // MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e" + Hash string `json:"hash"` // MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e" + SLOHash string `json:"slo_etag"` // MD5 hash of all segments' MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e" PseudoDirectory bool // Set when using delimiter to show that this directory object does not really exist SubDir string `json:"subdir"` // returned only when using delimiter to mark "pseudo directories" ObjectType ObjectType // type of this object @@ -1065,6 +1066,9 @@ func (c *Connection) Objects(container string, opts *ObjectsOpts) ([]Object, err return nil, err } } + if object.SLOHash != "" { + object.ObjectType = StaticLargeObjectType + } } return objects, err } diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index e3f88c155..58f1095ab 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -22,7 +22,7 @@ const ( // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 0 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "-dev" diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go index ef24f17e5..00b1111b9 100644 --- a/vendor/github.com/pierrec/lz4/block.go +++ b/vendor/github.com/pierrec/lz4/block.go @@ -286,7 +286,7 @@ func CompressBlockHC(src, dst []byte, depth int) (di int, err error) { for ml < sn-si && src[next+ml] == src[si+ml] { ml++ } - if ml+1 < minMatch || ml <= mLen { + if ml < minMatch || ml <= mLen { // Match too small ( 0, we use a smart buffer internally for performance purposes. @@ -1188,6 +1195,7 @@ func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { dd.ReadMapEnd() return } + d.depthIncr() tisfi := fti.sfiSort hasLen := containerLen >= 0 @@ -1219,12 +1227,14 @@ func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { // keepAlive4StringView(rvkencnameB) // not needed, as reference is outside loop } dd.ReadMapEnd() + d.depthDecr() } else if ctyp == valueTypeArray { containerLen := dd.ReadArrayStart() if containerLen == 0 { dd.ReadArrayEnd() return } + d.depthIncr() // Not much gain from doing it two ways for array. // Arrays are not used as much for structs. hasLen := containerLen >= 0 @@ -1259,6 +1269,7 @@ func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { } } dd.ReadArrayEnd() + d.depthDecr() } else { d.errorstr(errstrOnlyMapOrArrayCanDecodeIntoStruct) return @@ -1328,6 +1339,8 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { return } + d.depthIncr() + rtelem0Size := int(rtelem0.Size()) rtElem0Kind := rtelem0.Kind() rtelem0Mut := !isImmutableKind(rtElem0Kind) @@ -1399,7 +1412,6 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) rvChanged = true } else { // chan - // xdebugf(">>>>>> haslen = %v, make chan of type '%v' with length: %v", hasLen, ti.rt, rvlen) rv = reflect.MakeChan(ti.rt, rvlen) rvChanged = true } @@ -1421,7 +1433,6 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { fn = d.cf.get(rtelem, true, true) } d.decodeValue(rv9, fn, true) - // xdebugf(">>>> rv9 sent on %v during decode: %v, with len=%v, cap=%v", rv.Type(), rv9, rv.Len(), rv.Cap()) rv.Send(rv9) } else { // if indefinite, etc, then expand the slice if necessary @@ -1458,9 +1469,9 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { rtelem0Zero = reflect.Zero(rtelem0) } rv9.Set(rtelem0Zero) - } - if decodeAsNil { - continue + if decodeAsNil { + continue + } } if fn == nil { @@ -1491,6 +1502,8 @@ func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { if rvChanged { // infers rvCanset=true, so it can be reset rv0.Set(rv) } + + d.depthDecr() } // func (d *Decoder) kArray(f *codecFnInfo, rv reflect.Value) { @@ -1504,7 +1517,8 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { elemsep := d.esep ti := f.ti if rv.IsNil() { - rv.Set(makeMapReflect(ti.rt, containerLen)) + rvlen := decInferLen(containerLen, d.h.MaxInitLen, int(ti.key.Size()+ti.elem.Size())) + rv.Set(makeMapReflect(ti.rt, rvlen)) } if containerLen == 0 { @@ -1512,6 +1526,8 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { return } + d.depthIncr() + ktype, vtype := ti.key, ti.elem ktypeId := rt2id(ktype) vtypeKind := vtype.Kind() @@ -1558,13 +1574,14 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { if elemsep { dd.ReadMapElemKey() } - if false && dd.TryDecodeAsNil() { // nil cannot be a map key, so disregard this block - // Previously, if a nil key, we just ignored the mapped value and continued. - // However, that makes the result of encoding and then decoding map[intf]intf{nil:nil} - // to be an empty map. - // Instead, we treat a nil key as the zero value of the type. - rvk.Set(reflect.Zero(ktype)) - } else if ktypeIsString { + // if false && dd.TryDecodeAsNil() { // nil cannot be a map key, so disregard this block + // // Previously, if a nil key, we just ignored the mapped value and continued. + // // However, that makes the result of encoding and then decoding map[intf]intf{nil:nil} + // // to be an empty map. + // // Instead, we treat a nil key as the zero value of the type. + // rvk.Set(reflect.Zero(ktype)) + // } else if ktypeIsString { + if ktypeIsString { kstrbs = dd.DecodeStringAsBytes() rvk.SetString(stringView(kstrbs)) // NOTE: if doing an insert, you MUST use a real string (not stringview) @@ -1653,6 +1670,8 @@ func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { } dd.ReadMapEnd() + + d.depthDecr() } // decNaked is used to keep track of the primitives decoded. @@ -1889,7 +1908,10 @@ type Decoder struct { err error h *BasicHandle - _ [1]uint64 // padding + + depth int16 + maxdepth int16 + _ [4]uint8 // padding // ---- cpu cache line boundary? b [decScratchByteArrayLen]byte // scratch buffer, used by Decoder and xxxEncDrivers @@ -1943,6 +1965,11 @@ func (d *Decoder) resetCommon() { d.n.reset() d.d.reset() d.err = nil + d.depth = 0 + d.maxdepth = d.h.MaxDepth + if d.maxdepth <= 0 { + d.maxdepth = decDefMaxDepth + } // reset all things which were cached from the Handle, but could change d.mtid, d.stid = 0, 0 d.mtr, d.str = false, false @@ -2417,6 +2444,17 @@ func (d *Decoder) ensureDecodeable(rv reflect.Value) (rv2 reflect.Value) { return } +func (d *Decoder) depthIncr() { + d.depth++ + if d.depth >= d.maxdepth { + panic(errMaxDepthExceeded) + } +} + +func (d *Decoder) depthDecr() { + d.depth-- +} + // Possibly get an interned version of a string // // This should mostly be used for map keys, where the key type is string. @@ -2532,6 +2570,13 @@ func decByteSlice(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) { return } +// func decByteSliceZeroCopy(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) { +// if _, ok := r.(*bytesDecReader); ok && clen <= maxInitLen { +// return r.readx(clen) +// } +// return decByteSlice(r, clen, maxInitLen, bs) +// } + func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) { if xlen := len(in); xlen > 0 { if isBytesReader || xlen <= scratchByteArrayLen { diff --git a/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/ugorji/go/codec/fast-path.generated.go index 87f2562f6..6dc566b6d 100644 --- a/vendor/github.com/ugorji/go/codec/fast-path.generated.go +++ b/vendor/github.com/ugorji/go/codec/fast-path.generated.go @@ -17757,6 +17757,7 @@ func (_ fastpathT) DecSliceIntfV(v []interface{}, canChange bool, d *Decoder) (_ slh.End() return v, changed } + d.depthIncr() hasLen := containerLenS > 0 var xlen int if hasLen && canChange { @@ -17814,6 +17815,7 @@ func (_ fastpathT) DecSliceIntfV(v []interface{}, canChange bool, d *Decoder) (_ } } slh.End() + d.depthDecr() return v, changed } @@ -17853,6 +17855,7 @@ func (_ fastpathT) DecSliceStringV(v []string, canChange bool, d *Decoder) (_ [] slh.End() return v, changed } + d.depthIncr() hasLen := containerLenS > 0 var xlen int if hasLen && canChange { @@ -17910,6 +17913,7 @@ func (_ fastpathT) DecSliceStringV(v []string, canChange bool, d *Decoder) (_ [] } } slh.End() + d.depthDecr() return v, changed } @@ -17949,6 +17953,7 @@ func (_ fastpathT) DecSliceFloat32V(v []float32, canChange bool, d *Decoder) (_ slh.End() return v, changed } + d.depthIncr() hasLen := containerLenS > 0 var xlen int if hasLen && canChange { @@ -18006,6 +18011,7 @@ func (_ fastpathT) DecSliceFloat32V(v []float32, canChange bool, d *Decoder) (_ } } slh.End() + d.depthDecr() return v, changed } @@ -18045,6 +18051,7 @@ func (_ fastpathT) DecSliceFloat64V(v []float64, canChange bool, d *Decoder) (_ slh.End() return v, changed } + d.depthIncr() hasLen := containerLenS > 0 var xlen int if hasLen && canChange { @@ -18102,6 +18109,7 @@ func (_ fastpathT) DecSliceFloat64V(v []float64, canChange bool, d *Decoder) (_ } } slh.End() + d.depthDecr() return v, changed } @@ -18141,6 +18149,7 @@ func (_ fastpathT) DecSliceUintV(v []uint, canChange bool, d *Decoder) (_ []uint slh.End() return v, changed } + d.depthIncr() hasLen := containerLenS > 0 var xlen int if hasLen && canChange { @@ -18198,6 +18207,7 @@ func (_ fastpathT) DecSliceUintV(v []uint, canChange bool, d *Decoder) (_ []uint } } slh.End() + d.depthDecr() return v, changed } @@ -18237,6 +18247,7 @@ func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []ui slh.End() return v, changed } + d.depthIncr() hasLen := containerLenS > 0 var xlen int if hasLen && canChange { @@ -18294,6 +18305,7 @@ func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []ui } } slh.End() + d.depthDecr() return v, changed } @@ -18333,6 +18345,7 @@ func (_ fastpathT) DecSliceUint16V(v []uint16, canChange bool, d *Decoder) (_ [] slh.End() return v, changed } + d.depthIncr() hasLen := containerLenS > 0 var xlen int if hasLen && canChange { @@ -18390,6 +18403,7 @@ func (_ fastpathT) DecSliceUint16V(v []uint16, canChange bool, d *Decoder) (_ [] } } slh.End() + d.depthDecr() return v, changed } @@ -18429,6 +18443,7 @@ func (_ fastpathT) DecSliceUint32V(v []uint32, canChange bool, d *Decoder) (_ [] slh.End() return v, changed } + d.depthIncr() hasLen := containerLenS > 0 var xlen int if hasLen && canChange { @@ -18486,6 +18501,7 @@ func (_ fastpathT) DecSliceUint32V(v []uint32, canChange bool, d *Decoder) (_ [] } } slh.End() + d.depthDecr() return v, changed } @@ -18525,6 +18541,7 @@ func (_ fastpathT) DecSliceUint64V(v []uint64, canChange bool, d *Decoder) (_ [] slh.End() return v, changed } + d.depthIncr() hasLen := containerLenS > 0 var xlen int if hasLen && canChange { @@ -18582,6 +18599,7 @@ func (_ fastpathT) DecSliceUint64V(v []uint64, canChange bool, d *Decoder) (_ [] } } slh.End() + d.depthDecr() return v, changed } @@ -18621,6 +18639,7 @@ func (_ fastpathT) DecSliceUintptrV(v []uintptr, canChange bool, d *Decoder) (_ slh.End() return v, changed } + d.depthIncr() hasLen := containerLenS > 0 var xlen int if hasLen && canChange { @@ -18678,6 +18697,7 @@ func (_ fastpathT) DecSliceUintptrV(v []uintptr, canChange bool, d *Decoder) (_ } } slh.End() + d.depthDecr() return v, changed } @@ -18717,6 +18737,7 @@ func (_ fastpathT) DecSliceIntV(v []int, canChange bool, d *Decoder) (_ []int, c slh.End() return v, changed } + d.depthIncr() hasLen := containerLenS > 0 var xlen int if hasLen && canChange { @@ -18774,6 +18795,7 @@ func (_ fastpathT) DecSliceIntV(v []int, canChange bool, d *Decoder) (_ []int, c } } slh.End() + d.depthDecr() return v, changed } @@ -18813,6 +18835,7 @@ func (_ fastpathT) DecSliceInt8V(v []int8, canChange bool, d *Decoder) (_ []int8 slh.End() return v, changed } + d.depthIncr() hasLen := containerLenS > 0 var xlen int if hasLen && canChange { @@ -18870,6 +18893,7 @@ func (_ fastpathT) DecSliceInt8V(v []int8, canChange bool, d *Decoder) (_ []int8 } } slh.End() + d.depthDecr() return v, changed } @@ -18909,6 +18933,7 @@ func (_ fastpathT) DecSliceInt16V(v []int16, canChange bool, d *Decoder) (_ []in slh.End() return v, changed } + d.depthIncr() hasLen := containerLenS > 0 var xlen int if hasLen && canChange { @@ -18966,6 +18991,7 @@ func (_ fastpathT) DecSliceInt16V(v []int16, canChange bool, d *Decoder) (_ []in } } slh.End() + d.depthDecr() return v, changed } @@ -19005,6 +19031,7 @@ func (_ fastpathT) DecSliceInt32V(v []int32, canChange bool, d *Decoder) (_ []in slh.End() return v, changed } + d.depthIncr() hasLen := containerLenS > 0 var xlen int if hasLen && canChange { @@ -19062,6 +19089,7 @@ func (_ fastpathT) DecSliceInt32V(v []int32, canChange bool, d *Decoder) (_ []in } } slh.End() + d.depthDecr() return v, changed } @@ -19101,6 +19129,7 @@ func (_ fastpathT) DecSliceInt64V(v []int64, canChange bool, d *Decoder) (_ []in slh.End() return v, changed } + d.depthIncr() hasLen := containerLenS > 0 var xlen int if hasLen && canChange { @@ -19158,6 +19187,7 @@ func (_ fastpathT) DecSliceInt64V(v []int64, canChange bool, d *Decoder) (_ []in } } slh.End() + d.depthDecr() return v, changed } @@ -19197,6 +19227,7 @@ func (_ fastpathT) DecSliceBoolV(v []bool, canChange bool, d *Decoder) (_ []bool slh.End() return v, changed } + d.depthIncr() hasLen := containerLenS > 0 var xlen int if hasLen && canChange { @@ -19254,6 +19285,7 @@ func (_ fastpathT) DecSliceBoolV(v []bool, canChange bool, d *Decoder) (_ []bool } } slh.End() + d.depthDecr() return v, changed } @@ -19287,6 +19319,7 @@ func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, canChange bool dd.ReadMapEnd() return v, changed } + d.depthIncr() mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset var mk interface{} var mv interface{} @@ -19323,6 +19356,7 @@ func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, canChange bool } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -19356,6 +19390,7 @@ func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk interface{} var mv string hasLen := containerLen > 0 @@ -19386,6 +19421,7 @@ func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -19419,6 +19455,7 @@ func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk interface{} var mv uint hasLen := containerLen > 0 @@ -19449,6 +19486,7 @@ func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -19482,6 +19520,7 @@ func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk interface{} var mv uint8 hasLen := containerLen > 0 @@ -19512,6 +19551,7 @@ func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -19545,6 +19585,7 @@ func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk interface{} var mv uint16 hasLen := containerLen > 0 @@ -19575,6 +19616,7 @@ func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -19608,6 +19650,7 @@ func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk interface{} var mv uint32 hasLen := containerLen > 0 @@ -19638,6 +19681,7 @@ func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -19671,6 +19715,7 @@ func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk interface{} var mv uint64 hasLen := containerLen > 0 @@ -19701,6 +19746,7 @@ func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -19734,6 +19780,7 @@ func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk interface{} var mv uintptr hasLen := containerLen > 0 @@ -19764,6 +19811,7 @@ func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -19797,6 +19845,7 @@ func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk interface{} var mv int hasLen := containerLen > 0 @@ -19827,6 +19876,7 @@ func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -19860,6 +19910,7 @@ func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk interface{} var mv int8 hasLen := containerLen > 0 @@ -19890,6 +19941,7 @@ func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -19923,6 +19975,7 @@ func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk interface{} var mv int16 hasLen := containerLen > 0 @@ -19953,6 +20006,7 @@ func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -19986,6 +20040,7 @@ func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk interface{} var mv int32 hasLen := containerLen > 0 @@ -20016,6 +20071,7 @@ func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -20049,6 +20105,7 @@ func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk interface{} var mv int64 hasLen := containerLen > 0 @@ -20079,6 +20136,7 @@ func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -20112,6 +20170,7 @@ func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk interface{} var mv float32 hasLen := containerLen > 0 @@ -20142,6 +20201,7 @@ func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -20175,6 +20235,7 @@ func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk interface{} var mv float64 hasLen := containerLen > 0 @@ -20205,6 +20266,7 @@ func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -20238,6 +20300,7 @@ func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk interface{} var mv bool hasLen := containerLen > 0 @@ -20268,6 +20331,7 @@ func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -20301,6 +20365,7 @@ func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset var mk string var mv interface{} @@ -20333,6 +20398,7 @@ func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -20366,6 +20432,7 @@ func (_ fastpathT) DecMapStringStringV(v map[string]string, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk string var mv string hasLen := containerLen > 0 @@ -20392,6 +20459,7 @@ func (_ fastpathT) DecMapStringStringV(v map[string]string, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -20425,6 +20493,7 @@ func (_ fastpathT) DecMapStringUintV(v map[string]uint, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk string var mv uint hasLen := containerLen > 0 @@ -20451,6 +20520,7 @@ func (_ fastpathT) DecMapStringUintV(v map[string]uint, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -20484,6 +20554,7 @@ func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk string var mv uint8 hasLen := containerLen > 0 @@ -20510,6 +20581,7 @@ func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -20543,6 +20615,7 @@ func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk string var mv uint16 hasLen := containerLen > 0 @@ -20569,6 +20642,7 @@ func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -20602,6 +20676,7 @@ func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk string var mv uint32 hasLen := containerLen > 0 @@ -20628,6 +20703,7 @@ func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -20661,6 +20737,7 @@ func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk string var mv uint64 hasLen := containerLen > 0 @@ -20687,6 +20764,7 @@ func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -20720,6 +20798,7 @@ func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk string var mv uintptr hasLen := containerLen > 0 @@ -20746,6 +20825,7 @@ func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -20779,6 +20859,7 @@ func (_ fastpathT) DecMapStringIntV(v map[string]int, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk string var mv int hasLen := containerLen > 0 @@ -20805,6 +20886,7 @@ func (_ fastpathT) DecMapStringIntV(v map[string]int, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -20838,6 +20920,7 @@ func (_ fastpathT) DecMapStringInt8V(v map[string]int8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk string var mv int8 hasLen := containerLen > 0 @@ -20864,6 +20947,7 @@ func (_ fastpathT) DecMapStringInt8V(v map[string]int8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -20897,6 +20981,7 @@ func (_ fastpathT) DecMapStringInt16V(v map[string]int16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk string var mv int16 hasLen := containerLen > 0 @@ -20923,6 +21008,7 @@ func (_ fastpathT) DecMapStringInt16V(v map[string]int16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -20956,6 +21042,7 @@ func (_ fastpathT) DecMapStringInt32V(v map[string]int32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk string var mv int32 hasLen := containerLen > 0 @@ -20982,6 +21069,7 @@ func (_ fastpathT) DecMapStringInt32V(v map[string]int32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -21015,6 +21103,7 @@ func (_ fastpathT) DecMapStringInt64V(v map[string]int64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk string var mv int64 hasLen := containerLen > 0 @@ -21041,6 +21130,7 @@ func (_ fastpathT) DecMapStringInt64V(v map[string]int64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -21074,6 +21164,7 @@ func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk string var mv float32 hasLen := containerLen > 0 @@ -21100,6 +21191,7 @@ func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -21133,6 +21225,7 @@ func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk string var mv float64 hasLen := containerLen > 0 @@ -21159,6 +21252,7 @@ func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -21192,6 +21286,7 @@ func (_ fastpathT) DecMapStringBoolV(v map[string]bool, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk string var mv bool hasLen := containerLen > 0 @@ -21218,6 +21313,7 @@ func (_ fastpathT) DecMapStringBoolV(v map[string]bool, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -21251,6 +21347,7 @@ func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset var mk float32 var mv interface{} @@ -21283,6 +21380,7 @@ func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -21316,6 +21414,7 @@ func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float32 var mv string hasLen := containerLen > 0 @@ -21342,6 +21441,7 @@ func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -21375,6 +21475,7 @@ func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float32 var mv uint hasLen := containerLen > 0 @@ -21401,6 +21502,7 @@ func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -21434,6 +21536,7 @@ func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float32 var mv uint8 hasLen := containerLen > 0 @@ -21460,6 +21563,7 @@ func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -21493,6 +21597,7 @@ func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float32 var mv uint16 hasLen := containerLen > 0 @@ -21519,6 +21624,7 @@ func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -21552,6 +21658,7 @@ func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float32 var mv uint32 hasLen := containerLen > 0 @@ -21578,6 +21685,7 @@ func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -21611,6 +21719,7 @@ func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float32 var mv uint64 hasLen := containerLen > 0 @@ -21637,6 +21746,7 @@ func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -21670,6 +21780,7 @@ func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float32 var mv uintptr hasLen := containerLen > 0 @@ -21696,6 +21807,7 @@ func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -21729,6 +21841,7 @@ func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float32 var mv int hasLen := containerLen > 0 @@ -21755,6 +21868,7 @@ func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -21788,6 +21902,7 @@ func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float32 var mv int8 hasLen := containerLen > 0 @@ -21814,6 +21929,7 @@ func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -21847,6 +21963,7 @@ func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float32 var mv int16 hasLen := containerLen > 0 @@ -21873,6 +21990,7 @@ func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -21906,6 +22024,7 @@ func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float32 var mv int32 hasLen := containerLen > 0 @@ -21932,6 +22051,7 @@ func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -21965,6 +22085,7 @@ func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float32 var mv int64 hasLen := containerLen > 0 @@ -21991,6 +22112,7 @@ func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -22024,6 +22146,7 @@ func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float32 var mv float32 hasLen := containerLen > 0 @@ -22050,6 +22173,7 @@ func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -22083,6 +22207,7 @@ func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float32 var mv float64 hasLen := containerLen > 0 @@ -22109,6 +22234,7 @@ func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -22142,6 +22268,7 @@ func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float32 var mv bool hasLen := containerLen > 0 @@ -22168,6 +22295,7 @@ func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -22201,6 +22329,7 @@ func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset var mk float64 var mv interface{} @@ -22233,6 +22362,7 @@ func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -22266,6 +22396,7 @@ func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float64 var mv string hasLen := containerLen > 0 @@ -22292,6 +22423,7 @@ func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -22325,6 +22457,7 @@ func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float64 var mv uint hasLen := containerLen > 0 @@ -22351,6 +22484,7 @@ func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -22384,6 +22518,7 @@ func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float64 var mv uint8 hasLen := containerLen > 0 @@ -22410,6 +22545,7 @@ func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -22443,6 +22579,7 @@ func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float64 var mv uint16 hasLen := containerLen > 0 @@ -22469,6 +22606,7 @@ func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -22502,6 +22640,7 @@ func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float64 var mv uint32 hasLen := containerLen > 0 @@ -22528,6 +22667,7 @@ func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -22561,6 +22701,7 @@ func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float64 var mv uint64 hasLen := containerLen > 0 @@ -22587,6 +22728,7 @@ func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -22620,6 +22762,7 @@ func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float64 var mv uintptr hasLen := containerLen > 0 @@ -22646,6 +22789,7 @@ func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -22679,6 +22823,7 @@ func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float64 var mv int hasLen := containerLen > 0 @@ -22705,6 +22850,7 @@ func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -22738,6 +22884,7 @@ func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float64 var mv int8 hasLen := containerLen > 0 @@ -22764,6 +22911,7 @@ func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -22797,6 +22945,7 @@ func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float64 var mv int16 hasLen := containerLen > 0 @@ -22823,6 +22972,7 @@ func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -22856,6 +23006,7 @@ func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float64 var mv int32 hasLen := containerLen > 0 @@ -22882,6 +23033,7 @@ func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -22915,6 +23067,7 @@ func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float64 var mv int64 hasLen := containerLen > 0 @@ -22941,6 +23094,7 @@ func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -22974,6 +23128,7 @@ func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float64 var mv float32 hasLen := containerLen > 0 @@ -23000,6 +23155,7 @@ func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -23033,6 +23189,7 @@ func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float64 var mv float64 hasLen := containerLen > 0 @@ -23059,6 +23216,7 @@ func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -23092,6 +23250,7 @@ func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk float64 var mv bool hasLen := containerLen > 0 @@ -23118,6 +23277,7 @@ func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -23151,6 +23311,7 @@ func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset var mk uint var mv interface{} @@ -23183,6 +23344,7 @@ func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -23216,6 +23378,7 @@ func (_ fastpathT) DecMapUintStringV(v map[uint]string, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint var mv string hasLen := containerLen > 0 @@ -23242,6 +23405,7 @@ func (_ fastpathT) DecMapUintStringV(v map[uint]string, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -23275,6 +23439,7 @@ func (_ fastpathT) DecMapUintUintV(v map[uint]uint, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint var mv uint hasLen := containerLen > 0 @@ -23301,6 +23466,7 @@ func (_ fastpathT) DecMapUintUintV(v map[uint]uint, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -23334,6 +23500,7 @@ func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint var mv uint8 hasLen := containerLen > 0 @@ -23360,6 +23527,7 @@ func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -23393,6 +23561,7 @@ func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint var mv uint16 hasLen := containerLen > 0 @@ -23419,6 +23588,7 @@ func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -23452,6 +23622,7 @@ func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint var mv uint32 hasLen := containerLen > 0 @@ -23478,6 +23649,7 @@ func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -23511,6 +23683,7 @@ func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint var mv uint64 hasLen := containerLen > 0 @@ -23537,6 +23710,7 @@ func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -23570,6 +23744,7 @@ func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint var mv uintptr hasLen := containerLen > 0 @@ -23596,6 +23771,7 @@ func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -23629,6 +23805,7 @@ func (_ fastpathT) DecMapUintIntV(v map[uint]int, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint var mv int hasLen := containerLen > 0 @@ -23655,6 +23832,7 @@ func (_ fastpathT) DecMapUintIntV(v map[uint]int, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -23688,6 +23866,7 @@ func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint var mv int8 hasLen := containerLen > 0 @@ -23714,6 +23893,7 @@ func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -23747,6 +23927,7 @@ func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint var mv int16 hasLen := containerLen > 0 @@ -23773,6 +23954,7 @@ func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -23806,6 +23988,7 @@ func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint var mv int32 hasLen := containerLen > 0 @@ -23832,6 +24015,7 @@ func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -23865,6 +24049,7 @@ func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint var mv int64 hasLen := containerLen > 0 @@ -23891,6 +24076,7 @@ func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -23924,6 +24110,7 @@ func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint var mv float32 hasLen := containerLen > 0 @@ -23950,6 +24137,7 @@ func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -23983,6 +24171,7 @@ func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint var mv float64 hasLen := containerLen > 0 @@ -24009,6 +24198,7 @@ func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -24042,6 +24232,7 @@ func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint var mv bool hasLen := containerLen > 0 @@ -24068,6 +24259,7 @@ func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -24101,6 +24293,7 @@ func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset var mk uint8 var mv interface{} @@ -24133,6 +24326,7 @@ func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -24166,6 +24360,7 @@ func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint8 var mv string hasLen := containerLen > 0 @@ -24192,6 +24387,7 @@ func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -24225,6 +24421,7 @@ func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint8 var mv uint hasLen := containerLen > 0 @@ -24251,6 +24448,7 @@ func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -24284,6 +24482,7 @@ func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint8 var mv uint8 hasLen := containerLen > 0 @@ -24310,6 +24509,7 @@ func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -24343,6 +24543,7 @@ func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint8 var mv uint16 hasLen := containerLen > 0 @@ -24369,6 +24570,7 @@ func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -24402,6 +24604,7 @@ func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint8 var mv uint32 hasLen := containerLen > 0 @@ -24428,6 +24631,7 @@ func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -24461,6 +24665,7 @@ func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint8 var mv uint64 hasLen := containerLen > 0 @@ -24487,6 +24692,7 @@ func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -24520,6 +24726,7 @@ func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint8 var mv uintptr hasLen := containerLen > 0 @@ -24546,6 +24753,7 @@ func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -24579,6 +24787,7 @@ func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint8 var mv int hasLen := containerLen > 0 @@ -24605,6 +24814,7 @@ func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -24638,6 +24848,7 @@ func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint8 var mv int8 hasLen := containerLen > 0 @@ -24664,6 +24875,7 @@ func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -24697,6 +24909,7 @@ func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint8 var mv int16 hasLen := containerLen > 0 @@ -24723,6 +24936,7 @@ func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -24756,6 +24970,7 @@ func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint8 var mv int32 hasLen := containerLen > 0 @@ -24782,6 +24997,7 @@ func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -24815,6 +25031,7 @@ func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint8 var mv int64 hasLen := containerLen > 0 @@ -24841,6 +25058,7 @@ func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -24874,6 +25092,7 @@ func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint8 var mv float32 hasLen := containerLen > 0 @@ -24900,6 +25119,7 @@ func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -24933,6 +25153,7 @@ func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint8 var mv float64 hasLen := containerLen > 0 @@ -24959,6 +25180,7 @@ func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -24992,6 +25214,7 @@ func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint8 var mv bool hasLen := containerLen > 0 @@ -25018,6 +25241,7 @@ func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -25051,6 +25275,7 @@ func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset var mk uint16 var mv interface{} @@ -25083,6 +25308,7 @@ func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -25116,6 +25342,7 @@ func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint16 var mv string hasLen := containerLen > 0 @@ -25142,6 +25369,7 @@ func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -25175,6 +25403,7 @@ func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint16 var mv uint hasLen := containerLen > 0 @@ -25201,6 +25430,7 @@ func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -25234,6 +25464,7 @@ func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint16 var mv uint8 hasLen := containerLen > 0 @@ -25260,6 +25491,7 @@ func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -25293,6 +25525,7 @@ func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint16 var mv uint16 hasLen := containerLen > 0 @@ -25319,6 +25552,7 @@ func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -25352,6 +25586,7 @@ func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint16 var mv uint32 hasLen := containerLen > 0 @@ -25378,6 +25613,7 @@ func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -25411,6 +25647,7 @@ func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint16 var mv uint64 hasLen := containerLen > 0 @@ -25437,6 +25674,7 @@ func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -25470,6 +25708,7 @@ func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint16 var mv uintptr hasLen := containerLen > 0 @@ -25496,6 +25735,7 @@ func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -25529,6 +25769,7 @@ func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint16 var mv int hasLen := containerLen > 0 @@ -25555,6 +25796,7 @@ func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -25588,6 +25830,7 @@ func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint16 var mv int8 hasLen := containerLen > 0 @@ -25614,6 +25857,7 @@ func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -25647,6 +25891,7 @@ func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint16 var mv int16 hasLen := containerLen > 0 @@ -25673,6 +25918,7 @@ func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -25706,6 +25952,7 @@ func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint16 var mv int32 hasLen := containerLen > 0 @@ -25732,6 +25979,7 @@ func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -25765,6 +26013,7 @@ func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint16 var mv int64 hasLen := containerLen > 0 @@ -25791,6 +26040,7 @@ func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -25824,6 +26074,7 @@ func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint16 var mv float32 hasLen := containerLen > 0 @@ -25850,6 +26101,7 @@ func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -25883,6 +26135,7 @@ func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint16 var mv float64 hasLen := containerLen > 0 @@ -25909,6 +26162,7 @@ func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -25942,6 +26196,7 @@ func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint16 var mv bool hasLen := containerLen > 0 @@ -25968,6 +26223,7 @@ func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -26001,6 +26257,7 @@ func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset var mk uint32 var mv interface{} @@ -26033,6 +26290,7 @@ func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -26066,6 +26324,7 @@ func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint32 var mv string hasLen := containerLen > 0 @@ -26092,6 +26351,7 @@ func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -26125,6 +26385,7 @@ func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint32 var mv uint hasLen := containerLen > 0 @@ -26151,6 +26412,7 @@ func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -26184,6 +26446,7 @@ func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint32 var mv uint8 hasLen := containerLen > 0 @@ -26210,6 +26473,7 @@ func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -26243,6 +26507,7 @@ func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint32 var mv uint16 hasLen := containerLen > 0 @@ -26269,6 +26534,7 @@ func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -26302,6 +26568,7 @@ func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint32 var mv uint32 hasLen := containerLen > 0 @@ -26328,6 +26595,7 @@ func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -26361,6 +26629,7 @@ func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint32 var mv uint64 hasLen := containerLen > 0 @@ -26387,6 +26656,7 @@ func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -26420,6 +26690,7 @@ func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint32 var mv uintptr hasLen := containerLen > 0 @@ -26446,6 +26717,7 @@ func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -26479,6 +26751,7 @@ func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint32 var mv int hasLen := containerLen > 0 @@ -26505,6 +26778,7 @@ func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -26538,6 +26812,7 @@ func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint32 var mv int8 hasLen := containerLen > 0 @@ -26564,6 +26839,7 @@ func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -26597,6 +26873,7 @@ func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint32 var mv int16 hasLen := containerLen > 0 @@ -26623,6 +26900,7 @@ func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -26656,6 +26934,7 @@ func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint32 var mv int32 hasLen := containerLen > 0 @@ -26682,6 +26961,7 @@ func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -26715,6 +26995,7 @@ func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint32 var mv int64 hasLen := containerLen > 0 @@ -26741,6 +27022,7 @@ func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -26774,6 +27056,7 @@ func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint32 var mv float32 hasLen := containerLen > 0 @@ -26800,6 +27083,7 @@ func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -26833,6 +27117,7 @@ func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint32 var mv float64 hasLen := containerLen > 0 @@ -26859,6 +27144,7 @@ func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -26892,6 +27178,7 @@ func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint32 var mv bool hasLen := containerLen > 0 @@ -26918,6 +27205,7 @@ func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -26951,6 +27239,7 @@ func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset var mk uint64 var mv interface{} @@ -26983,6 +27272,7 @@ func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -27016,6 +27306,7 @@ func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint64 var mv string hasLen := containerLen > 0 @@ -27042,6 +27333,7 @@ func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -27075,6 +27367,7 @@ func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint64 var mv uint hasLen := containerLen > 0 @@ -27101,6 +27394,7 @@ func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -27134,6 +27428,7 @@ func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint64 var mv uint8 hasLen := containerLen > 0 @@ -27160,6 +27455,7 @@ func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -27193,6 +27489,7 @@ func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint64 var mv uint16 hasLen := containerLen > 0 @@ -27219,6 +27516,7 @@ func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -27252,6 +27550,7 @@ func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint64 var mv uint32 hasLen := containerLen > 0 @@ -27278,6 +27577,7 @@ func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -27311,6 +27611,7 @@ func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint64 var mv uint64 hasLen := containerLen > 0 @@ -27337,6 +27638,7 @@ func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -27370,6 +27672,7 @@ func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint64 var mv uintptr hasLen := containerLen > 0 @@ -27396,6 +27699,7 @@ func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -27429,6 +27733,7 @@ func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint64 var mv int hasLen := containerLen > 0 @@ -27455,6 +27760,7 @@ func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -27488,6 +27794,7 @@ func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint64 var mv int8 hasLen := containerLen > 0 @@ -27514,6 +27821,7 @@ func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -27547,6 +27855,7 @@ func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint64 var mv int16 hasLen := containerLen > 0 @@ -27573,6 +27882,7 @@ func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -27606,6 +27916,7 @@ func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint64 var mv int32 hasLen := containerLen > 0 @@ -27632,6 +27943,7 @@ func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -27665,6 +27977,7 @@ func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint64 var mv int64 hasLen := containerLen > 0 @@ -27691,6 +28004,7 @@ func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -27724,6 +28038,7 @@ func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint64 var mv float32 hasLen := containerLen > 0 @@ -27750,6 +28065,7 @@ func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -27783,6 +28099,7 @@ func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint64 var mv float64 hasLen := containerLen > 0 @@ -27809,6 +28126,7 @@ func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -27842,6 +28160,7 @@ func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uint64 var mv bool hasLen := containerLen > 0 @@ -27868,6 +28187,7 @@ func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -27901,6 +28221,7 @@ func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset var mk uintptr var mv interface{} @@ -27933,6 +28254,7 @@ func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -27966,6 +28288,7 @@ func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uintptr var mv string hasLen := containerLen > 0 @@ -27992,6 +28315,7 @@ func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -28025,6 +28349,7 @@ func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uintptr var mv uint hasLen := containerLen > 0 @@ -28051,6 +28376,7 @@ func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -28084,6 +28410,7 @@ func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uintptr var mv uint8 hasLen := containerLen > 0 @@ -28110,6 +28437,7 @@ func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -28143,6 +28471,7 @@ func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uintptr var mv uint16 hasLen := containerLen > 0 @@ -28169,6 +28498,7 @@ func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -28202,6 +28532,7 @@ func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uintptr var mv uint32 hasLen := containerLen > 0 @@ -28228,6 +28559,7 @@ func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -28261,6 +28593,7 @@ func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uintptr var mv uint64 hasLen := containerLen > 0 @@ -28287,6 +28620,7 @@ func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -28320,6 +28654,7 @@ func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uintptr var mv uintptr hasLen := containerLen > 0 @@ -28346,6 +28681,7 @@ func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -28379,6 +28715,7 @@ func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uintptr var mv int hasLen := containerLen > 0 @@ -28405,6 +28742,7 @@ func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -28438,6 +28776,7 @@ func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uintptr var mv int8 hasLen := containerLen > 0 @@ -28464,6 +28803,7 @@ func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -28497,6 +28837,7 @@ func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uintptr var mv int16 hasLen := containerLen > 0 @@ -28523,6 +28864,7 @@ func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -28556,6 +28898,7 @@ func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uintptr var mv int32 hasLen := containerLen > 0 @@ -28582,6 +28925,7 @@ func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -28615,6 +28959,7 @@ func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uintptr var mv int64 hasLen := containerLen > 0 @@ -28641,6 +28986,7 @@ func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -28674,6 +29020,7 @@ func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uintptr var mv float32 hasLen := containerLen > 0 @@ -28700,6 +29047,7 @@ func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -28733,6 +29081,7 @@ func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uintptr var mv float64 hasLen := containerLen > 0 @@ -28759,6 +29108,7 @@ func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -28792,6 +29142,7 @@ func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk uintptr var mv bool hasLen := containerLen > 0 @@ -28818,6 +29169,7 @@ func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -28851,6 +29203,7 @@ func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset var mk int var mv interface{} @@ -28883,6 +29236,7 @@ func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -28916,6 +29270,7 @@ func (_ fastpathT) DecMapIntStringV(v map[int]string, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int var mv string hasLen := containerLen > 0 @@ -28942,6 +29297,7 @@ func (_ fastpathT) DecMapIntStringV(v map[int]string, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -28975,6 +29331,7 @@ func (_ fastpathT) DecMapIntUintV(v map[int]uint, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int var mv uint hasLen := containerLen > 0 @@ -29001,6 +29358,7 @@ func (_ fastpathT) DecMapIntUintV(v map[int]uint, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -29034,6 +29392,7 @@ func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int var mv uint8 hasLen := containerLen > 0 @@ -29060,6 +29419,7 @@ func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -29093,6 +29453,7 @@ func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int var mv uint16 hasLen := containerLen > 0 @@ -29119,6 +29480,7 @@ func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -29152,6 +29514,7 @@ func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int var mv uint32 hasLen := containerLen > 0 @@ -29178,6 +29541,7 @@ func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -29211,6 +29575,7 @@ func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int var mv uint64 hasLen := containerLen > 0 @@ -29237,6 +29602,7 @@ func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -29270,6 +29636,7 @@ func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int var mv uintptr hasLen := containerLen > 0 @@ -29296,6 +29663,7 @@ func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -29329,6 +29697,7 @@ func (_ fastpathT) DecMapIntIntV(v map[int]int, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int var mv int hasLen := containerLen > 0 @@ -29355,6 +29724,7 @@ func (_ fastpathT) DecMapIntIntV(v map[int]int, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -29388,6 +29758,7 @@ func (_ fastpathT) DecMapIntInt8V(v map[int]int8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int var mv int8 hasLen := containerLen > 0 @@ -29414,6 +29785,7 @@ func (_ fastpathT) DecMapIntInt8V(v map[int]int8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -29447,6 +29819,7 @@ func (_ fastpathT) DecMapIntInt16V(v map[int]int16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int var mv int16 hasLen := containerLen > 0 @@ -29473,6 +29846,7 @@ func (_ fastpathT) DecMapIntInt16V(v map[int]int16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -29506,6 +29880,7 @@ func (_ fastpathT) DecMapIntInt32V(v map[int]int32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int var mv int32 hasLen := containerLen > 0 @@ -29532,6 +29907,7 @@ func (_ fastpathT) DecMapIntInt32V(v map[int]int32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -29565,6 +29941,7 @@ func (_ fastpathT) DecMapIntInt64V(v map[int]int64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int var mv int64 hasLen := containerLen > 0 @@ -29591,6 +29968,7 @@ func (_ fastpathT) DecMapIntInt64V(v map[int]int64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -29624,6 +30002,7 @@ func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int var mv float32 hasLen := containerLen > 0 @@ -29650,6 +30029,7 @@ func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -29683,6 +30063,7 @@ func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int var mv float64 hasLen := containerLen > 0 @@ -29709,6 +30090,7 @@ func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -29742,6 +30124,7 @@ func (_ fastpathT) DecMapIntBoolV(v map[int]bool, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int var mv bool hasLen := containerLen > 0 @@ -29768,6 +30151,7 @@ func (_ fastpathT) DecMapIntBoolV(v map[int]bool, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -29801,6 +30185,7 @@ func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset var mk int8 var mv interface{} @@ -29833,6 +30218,7 @@ func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -29866,6 +30252,7 @@ func (_ fastpathT) DecMapInt8StringV(v map[int8]string, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int8 var mv string hasLen := containerLen > 0 @@ -29892,6 +30279,7 @@ func (_ fastpathT) DecMapInt8StringV(v map[int8]string, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -29925,6 +30313,7 @@ func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int8 var mv uint hasLen := containerLen > 0 @@ -29951,6 +30340,7 @@ func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -29984,6 +30374,7 @@ func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int8 var mv uint8 hasLen := containerLen > 0 @@ -30010,6 +30401,7 @@ func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -30043,6 +30435,7 @@ func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int8 var mv uint16 hasLen := containerLen > 0 @@ -30069,6 +30462,7 @@ func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -30102,6 +30496,7 @@ func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int8 var mv uint32 hasLen := containerLen > 0 @@ -30128,6 +30523,7 @@ func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -30161,6 +30557,7 @@ func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int8 var mv uint64 hasLen := containerLen > 0 @@ -30187,6 +30584,7 @@ func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -30220,6 +30618,7 @@ func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int8 var mv uintptr hasLen := containerLen > 0 @@ -30246,6 +30645,7 @@ func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -30279,6 +30679,7 @@ func (_ fastpathT) DecMapInt8IntV(v map[int8]int, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int8 var mv int hasLen := containerLen > 0 @@ -30305,6 +30706,7 @@ func (_ fastpathT) DecMapInt8IntV(v map[int8]int, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -30338,6 +30740,7 @@ func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int8 var mv int8 hasLen := containerLen > 0 @@ -30364,6 +30767,7 @@ func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -30397,6 +30801,7 @@ func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int8 var mv int16 hasLen := containerLen > 0 @@ -30423,6 +30828,7 @@ func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -30456,6 +30862,7 @@ func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int8 var mv int32 hasLen := containerLen > 0 @@ -30482,6 +30889,7 @@ func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -30515,6 +30923,7 @@ func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int8 var mv int64 hasLen := containerLen > 0 @@ -30541,6 +30950,7 @@ func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -30574,6 +30984,7 @@ func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int8 var mv float32 hasLen := containerLen > 0 @@ -30600,6 +31011,7 @@ func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -30633,6 +31045,7 @@ func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int8 var mv float64 hasLen := containerLen > 0 @@ -30659,6 +31072,7 @@ func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -30692,6 +31106,7 @@ func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int8 var mv bool hasLen := containerLen > 0 @@ -30718,6 +31133,7 @@ func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -30751,6 +31167,7 @@ func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset var mk int16 var mv interface{} @@ -30783,6 +31200,7 @@ func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -30816,6 +31234,7 @@ func (_ fastpathT) DecMapInt16StringV(v map[int16]string, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int16 var mv string hasLen := containerLen > 0 @@ -30842,6 +31261,7 @@ func (_ fastpathT) DecMapInt16StringV(v map[int16]string, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -30875,6 +31295,7 @@ func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int16 var mv uint hasLen := containerLen > 0 @@ -30901,6 +31322,7 @@ func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -30934,6 +31356,7 @@ func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int16 var mv uint8 hasLen := containerLen > 0 @@ -30960,6 +31383,7 @@ func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -30993,6 +31417,7 @@ func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int16 var mv uint16 hasLen := containerLen > 0 @@ -31019,6 +31444,7 @@ func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -31052,6 +31478,7 @@ func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int16 var mv uint32 hasLen := containerLen > 0 @@ -31078,6 +31505,7 @@ func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -31111,6 +31539,7 @@ func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int16 var mv uint64 hasLen := containerLen > 0 @@ -31137,6 +31566,7 @@ func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -31170,6 +31600,7 @@ func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int16 var mv uintptr hasLen := containerLen > 0 @@ -31196,6 +31627,7 @@ func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -31229,6 +31661,7 @@ func (_ fastpathT) DecMapInt16IntV(v map[int16]int, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int16 var mv int hasLen := containerLen > 0 @@ -31255,6 +31688,7 @@ func (_ fastpathT) DecMapInt16IntV(v map[int16]int, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -31288,6 +31722,7 @@ func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int16 var mv int8 hasLen := containerLen > 0 @@ -31314,6 +31749,7 @@ func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -31347,6 +31783,7 @@ func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int16 var mv int16 hasLen := containerLen > 0 @@ -31373,6 +31810,7 @@ func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -31406,6 +31844,7 @@ func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int16 var mv int32 hasLen := containerLen > 0 @@ -31432,6 +31871,7 @@ func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -31465,6 +31905,7 @@ func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int16 var mv int64 hasLen := containerLen > 0 @@ -31491,6 +31932,7 @@ func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -31524,6 +31966,7 @@ func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int16 var mv float32 hasLen := containerLen > 0 @@ -31550,6 +31993,7 @@ func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -31583,6 +32027,7 @@ func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int16 var mv float64 hasLen := containerLen > 0 @@ -31609,6 +32054,7 @@ func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -31642,6 +32088,7 @@ func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int16 var mv bool hasLen := containerLen > 0 @@ -31668,6 +32115,7 @@ func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -31701,6 +32149,7 @@ func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset var mk int32 var mv interface{} @@ -31733,6 +32182,7 @@ func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -31766,6 +32216,7 @@ func (_ fastpathT) DecMapInt32StringV(v map[int32]string, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int32 var mv string hasLen := containerLen > 0 @@ -31792,6 +32243,7 @@ func (_ fastpathT) DecMapInt32StringV(v map[int32]string, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -31825,6 +32277,7 @@ func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int32 var mv uint hasLen := containerLen > 0 @@ -31851,6 +32304,7 @@ func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -31884,6 +32338,7 @@ func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int32 var mv uint8 hasLen := containerLen > 0 @@ -31910,6 +32365,7 @@ func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -31943,6 +32399,7 @@ func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int32 var mv uint16 hasLen := containerLen > 0 @@ -31969,6 +32426,7 @@ func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -32002,6 +32460,7 @@ func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int32 var mv uint32 hasLen := containerLen > 0 @@ -32028,6 +32487,7 @@ func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -32061,6 +32521,7 @@ func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int32 var mv uint64 hasLen := containerLen > 0 @@ -32087,6 +32548,7 @@ func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -32120,6 +32582,7 @@ func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int32 var mv uintptr hasLen := containerLen > 0 @@ -32146,6 +32609,7 @@ func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -32179,6 +32643,7 @@ func (_ fastpathT) DecMapInt32IntV(v map[int32]int, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int32 var mv int hasLen := containerLen > 0 @@ -32205,6 +32670,7 @@ func (_ fastpathT) DecMapInt32IntV(v map[int32]int, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -32238,6 +32704,7 @@ func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int32 var mv int8 hasLen := containerLen > 0 @@ -32264,6 +32731,7 @@ func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -32297,6 +32765,7 @@ func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int32 var mv int16 hasLen := containerLen > 0 @@ -32323,6 +32792,7 @@ func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -32356,6 +32826,7 @@ func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int32 var mv int32 hasLen := containerLen > 0 @@ -32382,6 +32853,7 @@ func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -32415,6 +32887,7 @@ func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int32 var mv int64 hasLen := containerLen > 0 @@ -32441,6 +32914,7 @@ func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -32474,6 +32948,7 @@ func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int32 var mv float32 hasLen := containerLen > 0 @@ -32500,6 +32975,7 @@ func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -32533,6 +33009,7 @@ func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int32 var mv float64 hasLen := containerLen > 0 @@ -32559,6 +33036,7 @@ func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -32592,6 +33070,7 @@ func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int32 var mv bool hasLen := containerLen > 0 @@ -32618,6 +33097,7 @@ func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -32651,6 +33131,7 @@ func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset var mk int64 var mv interface{} @@ -32683,6 +33164,7 @@ func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -32716,6 +33198,7 @@ func (_ fastpathT) DecMapInt64StringV(v map[int64]string, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int64 var mv string hasLen := containerLen > 0 @@ -32742,6 +33225,7 @@ func (_ fastpathT) DecMapInt64StringV(v map[int64]string, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -32775,6 +33259,7 @@ func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int64 var mv uint hasLen := containerLen > 0 @@ -32801,6 +33286,7 @@ func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -32834,6 +33320,7 @@ func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int64 var mv uint8 hasLen := containerLen > 0 @@ -32860,6 +33347,7 @@ func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -32893,6 +33381,7 @@ func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int64 var mv uint16 hasLen := containerLen > 0 @@ -32919,6 +33408,7 @@ func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -32952,6 +33442,7 @@ func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int64 var mv uint32 hasLen := containerLen > 0 @@ -32978,6 +33469,7 @@ func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -33011,6 +33503,7 @@ func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int64 var mv uint64 hasLen := containerLen > 0 @@ -33037,6 +33530,7 @@ func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -33070,6 +33564,7 @@ func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int64 var mv uintptr hasLen := containerLen > 0 @@ -33096,6 +33591,7 @@ func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -33129,6 +33625,7 @@ func (_ fastpathT) DecMapInt64IntV(v map[int64]int, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int64 var mv int hasLen := containerLen > 0 @@ -33155,6 +33652,7 @@ func (_ fastpathT) DecMapInt64IntV(v map[int64]int, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -33188,6 +33686,7 @@ func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int64 var mv int8 hasLen := containerLen > 0 @@ -33214,6 +33713,7 @@ func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -33247,6 +33747,7 @@ func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int64 var mv int16 hasLen := containerLen > 0 @@ -33273,6 +33774,7 @@ func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -33306,6 +33808,7 @@ func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int64 var mv int32 hasLen := containerLen > 0 @@ -33332,6 +33835,7 @@ func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -33365,6 +33869,7 @@ func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int64 var mv int64 hasLen := containerLen > 0 @@ -33391,6 +33896,7 @@ func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -33424,6 +33930,7 @@ func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int64 var mv float32 hasLen := containerLen > 0 @@ -33450,6 +33957,7 @@ func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -33483,6 +33991,7 @@ func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int64 var mv float64 hasLen := containerLen > 0 @@ -33509,6 +34018,7 @@ func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -33542,6 +34052,7 @@ func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk int64 var mv bool hasLen := containerLen > 0 @@ -33568,6 +34079,7 @@ func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -33601,6 +34113,7 @@ func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset var mk bool var mv interface{} @@ -33633,6 +34146,7 @@ func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -33666,6 +34180,7 @@ func (_ fastpathT) DecMapBoolStringV(v map[bool]string, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk bool var mv string hasLen := containerLen > 0 @@ -33692,6 +34207,7 @@ func (_ fastpathT) DecMapBoolStringV(v map[bool]string, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -33725,6 +34241,7 @@ func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk bool var mv uint hasLen := containerLen > 0 @@ -33751,6 +34268,7 @@ func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -33784,6 +34302,7 @@ func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk bool var mv uint8 hasLen := containerLen > 0 @@ -33810,6 +34329,7 @@ func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -33843,6 +34363,7 @@ func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk bool var mv uint16 hasLen := containerLen > 0 @@ -33869,6 +34390,7 @@ func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -33902,6 +34424,7 @@ func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk bool var mv uint32 hasLen := containerLen > 0 @@ -33928,6 +34451,7 @@ func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -33961,6 +34485,7 @@ func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk bool var mv uint64 hasLen := containerLen > 0 @@ -33987,6 +34512,7 @@ func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -34020,6 +34546,7 @@ func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk bool var mv uintptr hasLen := containerLen > 0 @@ -34046,6 +34573,7 @@ func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -34079,6 +34607,7 @@ func (_ fastpathT) DecMapBoolIntV(v map[bool]int, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk bool var mv int hasLen := containerLen > 0 @@ -34105,6 +34634,7 @@ func (_ fastpathT) DecMapBoolIntV(v map[bool]int, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -34138,6 +34668,7 @@ func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk bool var mv int8 hasLen := containerLen > 0 @@ -34164,6 +34695,7 @@ func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -34197,6 +34729,7 @@ func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk bool var mv int16 hasLen := containerLen > 0 @@ -34223,6 +34756,7 @@ func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -34256,6 +34790,7 @@ func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk bool var mv int32 hasLen := containerLen > 0 @@ -34282,6 +34817,7 @@ func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -34315,6 +34851,7 @@ func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk bool var mv int64 hasLen := containerLen > 0 @@ -34341,6 +34878,7 @@ func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -34374,6 +34912,7 @@ func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk bool var mv float32 hasLen := containerLen > 0 @@ -34400,6 +34939,7 @@ func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -34433,6 +34973,7 @@ func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk bool var mv float64 hasLen := containerLen > 0 @@ -34459,6 +35000,7 @@ func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } @@ -34492,6 +35034,7 @@ func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, canChange bool, dd.ReadMapEnd() return v, changed } + d.depthIncr() var mk bool var mv bool hasLen := containerLen > 0 @@ -34518,5 +35061,6 @@ func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, canChange bool, } } dd.ReadMapEnd() + d.depthDecr() return v, changed } diff --git a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl index 2023e05d3..3ccce6a70 100644 --- a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl +++ b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl @@ -423,6 +423,7 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, canChange slh.End() return v, changed } + d.depthIncr() hasLen := containerLenS > 0 var xlen int if hasLen && canChange { @@ -480,6 +481,7 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, canChange } } slh.End() + d.depthDecr() return v, changed } {{end}}{{end}}{{end}} @@ -518,6 +520,7 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Ele dd.ReadMapEnd() return v, changed } + d.depthIncr() {{ if eq .Elem "interface{}" }}mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset {{end}}var mk {{ .MapKey }} var mv {{ .Elem }} @@ -539,6 +542,7 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Ele if v != nil { v[mk] = mv } } dd.ReadMapEnd() + d.depthDecr() return v, changed } {{end}}{{end}}{{end}} diff --git a/vendor/github.com/ugorji/go/codec/json.go b/vendor/github.com/ugorji/go/codec/json.go index 7c3b76055..388641d81 100644 --- a/vendor/github.com/ugorji/go/codec/json.go +++ b/vendor/github.com/ugorji/go/codec/json.go @@ -707,6 +707,7 @@ func (d *jsonDecDriver) ReadMapEnd() { } func (d *jsonDecDriver) readLit(length, fromIdx uint8) { + // length here is always less than 8 (literals are: null, true, false) bs := d.r.readx(int(length)) d.tok = 0 if jsonValidateSymbols && !bytes.Equal(bs, jsonLiterals[fromIdx:fromIdx+length]) { diff --git a/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go index b716d1487..786840562 100644 --- a/vendor/github.com/ugorji/go/codec/msgpack.go +++ b/vendor/github.com/ugorji/go/codec/msgpack.go @@ -517,8 +517,10 @@ func (d *msgpackDecDriver) DecodeNaked() { if n.u == uint64(mpTimeExtTagU) { n.v = valueTypeTime n.t = d.decodeTime(clen) - } else { + } else if d.br { n.l = d.r.readx(clen) + } else { + n.l = decByteSlice(d.r, clen, d.d.h.MaxInitLen, d.d.b[:]) } default: d.d.errorf("cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd)) @@ -911,7 +913,11 @@ func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs d.d.errorf("wrong extension tag - got %b, expecting %v", xtag, tag) return } - xbs = d.r.readx(clen) + if d.br { + xbs = d.r.readx(clen) + } else { + xbs = decByteSlice(d.r, clen, d.d.h.MaxInitLen, d.d.b[:]) + } } d.bdRead = false return diff --git a/vendor/github.com/ugorji/go/codec/simple.go b/vendor/github.com/ugorji/go/codec/simple.go index 6a81ca416..605dfbe1a 100644 --- a/vendor/github.com/ugorji/go/codec/simple.go +++ b/vendor/github.com/ugorji/go/codec/simple.go @@ -509,7 +509,11 @@ func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs [ d.d.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag) return } - xbs = d.r.readx(l) + if d.br { + xbs = d.r.readx(l) + } else { + xbs = decByteSlice(d.r, l, d.d.h.MaxInitLen, d.d.b[:]) + } case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: xbs = d.DecodeBytes(nil, true) @@ -570,7 +574,11 @@ func (d *simpleDecDriver) DecodeNaked() { n.v = valueTypeExt l := d.decLen() n.u = uint64(d.r.readn1()) - n.l = d.r.readx(l) + if d.br { + n.l = d.r.readx(l) + } else { + n.l = decByteSlice(d.r, l, d.d.h.MaxInitLen, d.d.b[:]) + } case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: n.v = valueTypeArray diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go index 162e70364..013534193 100644 --- a/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go +++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go @@ -68,11 +68,10 @@ func (m *Mutex) Lock(ctx context.Context) error { // wait for deletion revisions prior to myKey hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) - // release lock key if cancelled - select { - case <-ctx.Done(): + // release lock key if wait failed + if werr != nil { m.Unlock(client.Ctx()) - default: + } else { m.hdr = hdr } return werr diff --git a/vendor/go.etcd.io/etcd/clientv3/lease.go b/vendor/go.etcd.io/etcd/clientv3/lease.go index 68a8ae6b3..380de02a8 100644 --- a/vendor/go.etcd.io/etcd/clientv3/lease.go +++ b/vendor/go.etcd.io/etcd/clientv3/lease.go @@ -118,19 +118,18 @@ type Lease interface { // Leases retrieves all leases. Leases(ctx context.Context) (*LeaseLeasesResponse, error) - // KeepAlive keeps the given lease alive forever. If the keepalive response - // posted to the channel is not consumed immediately, the lease client will - // continue sending keep alive requests to the etcd server at least every - // second until latest response is consumed. + // KeepAlive attempts to keep the given lease alive forever. If the keepalive responses posted + // to the channel are not consumed promptly the channel may become full. When full, the lease + // client will continue sending keep alive requests to the etcd server, but will drop responses + // until there is capacity on the channel to send more responses. + // + // If client keep alive loop halts with an unexpected error (e.g. "etcdserver: no leader") or + // canceled by the caller (e.g. context.Canceled), KeepAlive returns a ErrKeepAliveHalted error + // containing the error reason. // // The returned "LeaseKeepAliveResponse" channel closes if underlying keep // alive stream is interrupted in some way the client cannot handle itself; - // given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse" - // from this closed channel is nil. - // - // If client keep alive loop halts with an unexpected error (e.g. "etcdserver: - // no leader") or canceled by the caller (e.g. context.Canceled), the error - // is returned. Otherwise, it retries. + // given context "ctx" is canceled or timed out. // // TODO(v4.0): post errors to last keep alive message before closing // (see https://go.etcd.io/etcd/pull/7866) diff --git a/vendor/go.etcd.io/etcd/raft/README.md b/vendor/go.etcd.io/etcd/raft/README.md index 0ddf3f48f..a78e5f720 100644 --- a/vendor/go.etcd.io/etcd/raft/README.md +++ b/vendor/go.etcd.io/etcd/raft/README.md @@ -41,6 +41,7 @@ This raft implementation also includes a few optional enhancements: - Writing to leader's disk in parallel - Internal proposal redirection from followers to leader - Automatic stepping down when the leader loses quorum +- Protection against unbounded log growth when quorum is lost ## Notable Users diff --git a/vendor/go.etcd.io/etcd/raft/doc.go b/vendor/go.etcd.io/etcd/raft/doc.go index 2c10c0f5d..c30d88445 100644 --- a/vendor/go.etcd.io/etcd/raft/doc.go +++ b/vendor/go.etcd.io/etcd/raft/doc.go @@ -87,7 +87,7 @@ large). Note: Marshalling messages is not thread-safe; it is important that you make sure that no new entries are persisted while marshalling. -The easiest way to achieve this is to serialise the messages directly inside +The easiest way to achieve this is to serialize the messages directly inside your main raft loop. 3. Apply Snapshot (if any) and CommittedEntries to the state machine. @@ -153,7 +153,7 @@ If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; you may have to re-propose after a timeout. -To add or remove node in a cluster, build ConfChange struct 'cc' and call: +To add or remove a node in a cluster, build ConfChange struct 'cc' and call: n.ProposeConfChange(ctx, cc) @@ -260,7 +260,7 @@ stale log entries: 'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election protocol. When Config.PreVote is true, a pre-election is carried out first (using the same rules as a regular election), and no node increases its term - number unless the pre-election indicates that the campaigining node would win. + number unless the pre-election indicates that the campaigning node would win. This minimizes disruption when a partitioned node rejoins the cluster. 'MsgSnap' requests to install a snapshot message. When a node has just diff --git a/vendor/go.etcd.io/etcd/raft/node.go b/vendor/go.etcd.io/etcd/raft/node.go index 9e7c209ce..f67628fd3 100644 --- a/vendor/go.etcd.io/etcd/raft/node.go +++ b/vendor/go.etcd.io/etcd/raft/node.go @@ -401,6 +401,7 @@ func (n *node) run(r *raft) { r.msgs = nil r.readStates = nil + r.reduceUncommittedSize(rd.CommittedEntries) advancec = n.advancec case <-advancec: if applyingToI != 0 { diff --git a/vendor/go.etcd.io/etcd/raft/raft.go b/vendor/go.etcd.io/etcd/raft/raft.go index 81bad3bec..bf0a8983c 100644 --- a/vendor/go.etcd.io/etcd/raft/raft.go +++ b/vendor/go.etcd.io/etcd/raft/raft.go @@ -148,12 +148,17 @@ type Config struct { // applied entries. This is a very application dependent configuration. Applied uint64 - // MaxSizePerMsg limits the max size of each append message. Smaller value - // lowers the raft recovery cost(initial probing and message lost during normal - // operation). On the other side, it might affect the throughput during normal - // replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per - // message. + // MaxSizePerMsg limits the max byte size of each append message. Smaller + // value lowers the raft recovery cost(initial probing and message lost + // during normal operation). On the other side, it might affect the + // throughput during normal replication. Note: math.MaxUint64 for unlimited, + // 0 for at most one entry per message. MaxSizePerMsg uint64 + // MaxUncommittedEntriesSize limits the aggregate byte size of the + // uncommitted entries that may be appended to a leader's log. Once this + // limit is exceeded, proposals will begin to return ErrProposalDropped + // errors. Note: 0 for no limit. + MaxUncommittedEntriesSize uint64 // MaxInflightMsgs limits the max number of in-flight append messages during // optimistic replication phase. The application transportation layer usually // has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid @@ -215,6 +220,10 @@ func (c *Config) validate() error { return errors.New("storage cannot be nil") } + if c.MaxUncommittedEntriesSize == 0 { + c.MaxUncommittedEntriesSize = noLimit + } + if c.MaxInflightMsgs <= 0 { return errors.New("max inflight messages must be greater than 0") } @@ -241,11 +250,12 @@ type raft struct { // the log raftLog *raftLog - maxInflight int - maxMsgSize uint64 - prs map[uint64]*Progress - learnerPrs map[uint64]*Progress - matchBuf uint64Slice + maxMsgSize uint64 + maxUncommittedSize uint64 + maxInflight int + prs map[uint64]*Progress + learnerPrs map[uint64]*Progress + matchBuf uint64Slice state StateType @@ -268,6 +278,10 @@ type raft struct { // be proposed if the leader's applied index is greater than this // value. pendingConfIndex uint64 + // an estimate of the size of the uncommitted tail of the Raft log. Used to + // prevent unbounded log growth. Only maintained by the leader. Reset on + // term changes. + uncommittedSize uint64 readOnly *readOnly @@ -326,6 +340,7 @@ func newRaft(c *Config) *raft { raftLog: raftlog, maxMsgSize: c.MaxSizePerMsg, maxInflight: c.MaxInflightMsgs, + maxUncommittedSize: c.MaxUncommittedEntriesSize, prs: make(map[uint64]*Progress), learnerPrs: make(map[uint64]*Progress), electionTimeout: c.ElectionTick, @@ -514,7 +529,7 @@ func (r *raft) maybeSendAppend(to uint64, sendIfEmpty bool) bool { return true } -// sendHeartbeat sends an empty MsgApp +// sendHeartbeat sends a heartbeat RPC to the given peer. func (r *raft) sendHeartbeat(to uint64, ctx []byte) { // Attach the commit as min(to.matched, r.committed). // When the leader sends out heartbeat message, @@ -616,6 +631,7 @@ func (r *raft) reset(term uint64) { }) r.pendingConfIndex = 0 + r.uncommittedSize = 0 r.readOnly = newReadOnly(r.readOnly.option) } @@ -954,6 +970,10 @@ func stepLeader(r *raft, m pb.Message) error { r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee) return ErrProposalDropped } + if !r.increaseUncommittedSize(m.Entries) { + r.logger.Debugf("%x appending new entries to log would exceed uncommitted entry size limit; dropping proposal", r.id) + return ErrProposalDropped + } for i, e := range m.Entries { if e.Type == pb.EntryConfChange { @@ -1462,6 +1482,49 @@ func (r *raft) abortLeaderTransfer() { r.leadTransferee = None } +// increaseUncommittedSize computes the size of the proposed entries and +// determines whether they would push leader over its maxUncommittedSize limit. +// If the new entries would exceed the limit, the method returns false. If not, +// the increase in uncommitted entry size is recorded and the method returns +// true. +func (r *raft) increaseUncommittedSize(ents []pb.Entry) bool { + var s uint64 + for _, e := range ents { + s += uint64(e.Size()) + } + + if r.uncommittedSize > 0 && r.uncommittedSize+s > r.maxUncommittedSize { + // If the uncommitted tail of the Raft log is empty, allow any size + // proposal. Otherwise, limit the size of the uncommitted tail of the + // log and drop any proposal that would push the size over the limit. + return false + } + r.uncommittedSize += s + return true +} + +// reduceUncommittedSize accounts for the newly committed entries by decreasing +// the uncommitted entry size limit. +func (r *raft) reduceUncommittedSize(ents []pb.Entry) { + if r.uncommittedSize == 0 { + // Fast-path for followers, who do not track or enforce the limit. + return + } + + var s uint64 + for _, e := range ents { + s += uint64(e.Size()) + } + if s > r.uncommittedSize { + // uncommittedSize may underestimate the size of the uncommitted Raft + // log tail but will never overestimate it. Saturate at 0 instead of + // allowing overflow. + r.uncommittedSize = 0 + } else { + r.uncommittedSize -= s + } +} + func numOfPendingConf(ents []pb.Entry) int { n := 0 for i := range ents { diff --git a/vendor/go.etcd.io/etcd/raft/rawnode.go b/vendor/go.etcd.io/etcd/raft/rawnode.go index 5f8a116dd..4a4ec2e94 100644 --- a/vendor/go.etcd.io/etcd/raft/rawnode.go +++ b/vendor/go.etcd.io/etcd/raft/rawnode.go @@ -198,6 +198,7 @@ func (rn *RawNode) Step(m pb.Message) error { func (rn *RawNode) Ready() Ready { rd := rn.newReady() rn.raft.msgs = nil + rn.raft.reduceUncommittedSize(rd.CommittedEntries) return rd } diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md index e3d3770ce..97d66983d 100644 --- a/vendor/go.opencensus.io/README.md +++ b/vendor/go.opencensus.io/README.md @@ -7,7 +7,7 @@ OpenCensus Go is a Go implementation of OpenCensus, a toolkit for collecting application performance and behavior monitoring data. -Currently it consists of three major components: tags, stats, and tracing. +Currently it consists of three major components: tags, stats and tracing. ## Installation @@ -38,7 +38,7 @@ integration with your RPC framework: * [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis) * [Memcache](https://godoc.org/github.com/orijtech/gomemcache) -If you're a framework not listed here, you could either implement your own middleware for your +If you're using a framework not listed here, you could either implement your own middleware for your framework or use [custom stats](#stats) and [spans](#spans) directly in your application. ## Exporters @@ -74,7 +74,7 @@ in the same process or can be encoded to be transmitted on the wire. Usually, th be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler` for gRPC. -Package tag allows adding or modifying tags in the current context. +Package `tag` allows adding or modifying tags in the current context. [embedmd]:# (internal/readme/tags.go new) ```go @@ -178,8 +178,8 @@ Spans can have parents or can be root spans if they don't have any parents. The current span is propagated in-process and across the network to allow associating new child spans with the parent. -In the same process, context.Context is used to propagate spans. -trace.StartSpan creates a new span as a root if the current context +In the same process, `context.Context` is used to propagate spans. +`trace.StartSpan` creates a new span as a root if the current context doesn't contain a span. Or, it creates a child of the span that is already in current context. The returned context can be used to keep propagating the newly created span in the current context. @@ -195,8 +195,8 @@ defer span.End() Across the network, OpenCensus provides different propagation methods for different protocols. -* gRPC integrations uses the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation). -* HTTP integrations uses Zipkin's [B3](https://github.com/openzipkin/b3-propagation) +* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation). +* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation) by default but can be configured to use a custom propagation method by setting another [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat). diff --git a/vendor/go.opencensus.io/exemplar/exemplar.go b/vendor/go.opencensus.io/exemplar/exemplar.go new file mode 100644 index 000000000..e676df837 --- /dev/null +++ b/vendor/go.opencensus.io/exemplar/exemplar.go @@ -0,0 +1,78 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package exemplar implements support for exemplars. Exemplars are additional +// data associated with each measurement. +// +// Their purpose it to provide an example of the kind of thing +// (request, RPC, trace span, etc.) that resulted in that measurement. +package exemplar + +import ( + "context" + "time" +) + +const ( + KeyTraceID = "trace_id" + KeySpanID = "span_id" + KeyPrefixTag = "tag:" +) + +// Exemplar is an example data point associated with each bucket of a +// distribution type aggregation. +type Exemplar struct { + Value float64 // the value that was recorded + Timestamp time.Time // the time the value was recorded + Attachments Attachments // attachments (if any) +} + +// Attachments is a map of extra values associated with a recorded data point. +// The map should only be mutated from AttachmentExtractor functions. +type Attachments map[string]string + +// AttachmentExtractor is a function capable of extracting exemplar attachments +// from the context used to record measurements. +// The map passed to the function should be mutated and returned. It will +// initially be nil: the first AttachmentExtractor that would like to add keys to the +// map is responsible for initializing it. +type AttachmentExtractor func(ctx context.Context, a Attachments) Attachments + +var extractors []AttachmentExtractor + +// RegisterAttachmentExtractor registers the given extractor associated with the exemplar +// type name. +// +// Extractors will be used to attempt to extract exemplars from the context +// associated with each recorded measurement. +// +// Packages that support exemplars should register their extractor functions on +// initialization. +// +// RegisterAttachmentExtractor should not be called after any measurements have +// been recorded. +func RegisterAttachmentExtractor(e AttachmentExtractor) { + extractors = append(extractors, e) +} + +// NewFromContext extracts exemplars from the given context. +// Each registered AttachmentExtractor (see RegisterAttachmentExtractor) is called in an +// unspecified order to add attachments to the exemplar. +func AttachmentsFromContext(ctx context.Context) Attachments { + var a Attachments + for _, extractor := range extractors { + a = extractor(ctx, a) + } + return a +} diff --git a/vendor/go.opencensus.io/go.sum b/vendor/go.opencensus.io/go.sum index 6765bc031..3e0bab884 100644 --- a/vendor/go.opencensus.io/go.sum +++ b/vendor/go.opencensus.io/go.sum @@ -31,6 +31,7 @@ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180821140842-3b58ed4ad339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go index 68faf24f5..da815b2a7 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/client.go +++ b/vendor/go.opencensus.io/plugin/ochttp/client.go @@ -47,6 +47,10 @@ type Transport struct { // for spans started by this transport. StartOptions trace.StartOptions + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + // NameFromRequest holds the function to use for generating the span name // from the information found in the outgoing HTTP Request. By default the // name equals the URL Path. @@ -75,11 +79,17 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { if spanNameFormatter == nil { spanNameFormatter = spanNameFromURL } + + startOpts := t.StartOptions + if t.GetStartOptions != nil { + startOpts = t.GetStartOptions(req) + } + rt = &traceTransport{ base: rt, format: format, startOptions: trace.StartOptions{ - Sampler: t.StartOptions.Sampler, + Sampler: startOpts.Sampler, SpanKind: trace.SpanKindClient, }, formatSpanName: spanNameFormatter, diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go index 492b74b9e..ff72de97a 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -56,6 +56,10 @@ type Handler struct { // for spans started by this transport. StartOptions trace.StartOptions + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) // servers. If true, any trace metadata set on the incoming request will // be added as a linked trace instead of being added as a parent of the @@ -93,15 +97,21 @@ func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Requ name = h.FormatSpanName(r) } ctx := r.Context() + + startOpts := h.StartOptions + if h.GetStartOptions != nil { + startOpts = h.GetStartOptions(r) + } + var span *trace.Span sc, ok := h.extractSpanContext(r) if ok && !h.IsPublicEndpoint { ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, - trace.WithSampler(h.StartOptions.Sampler), + trace.WithSampler(startOpts.Sampler), trace.WithSpanKind(trace.SpanKindServer)) } else { ctx, span = trace.StartSpan(ctx, name, - trace.WithSampler(h.StartOptions.Sampler), + trace.WithSampler(startOpts.Sampler), trace.WithSpanKind(trace.SpanKindServer), ) if ok { diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go index 7aa03cd5d..05c6c56cc 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go +++ b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go @@ -29,6 +29,8 @@ type spanAnnotator struct { // TODO: Remove NewSpanAnnotator at the next release. +// NewSpanAnnotator returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. // Deprecated: Use NewSpanAnnotatingClientTrace instead func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { return NewSpanAnnotatingClientTrace(r, s) diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go index d99b98d12..46dcc8e57 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/stats.go +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -32,6 +32,7 @@ var ( ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds) ) +// Client measures supported for use in custom views. var ( ClientSentBytes = stats.Int64( "opencensus.io/http/client/sent_bytes", diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go index 7a8a62c14..00d473ee0 100644 --- a/vendor/go.opencensus.io/stats/doc.go +++ b/vendor/go.opencensus.io/stats/doc.go @@ -21,35 +21,49 @@ aggregate the collected data, and export the aggregated data. Measures -A measure represents a type of metric to be tracked and recorded. +A measure represents a type of data point to be tracked and recorded. For example, latency, request Mb/s, and response Mb/s are measures to collect from a server. -Each measure needs to be registered before being used. Measure -constructors such as Int64 and Float64 automatically +Measure constructors such as Int64 and Float64 automatically register the measure by the given name. Each registered measure needs to be unique by name. Measures also have a description and a unit. -Libraries can define and export measures for their end users to -create views and collect instrumentation data. +Libraries can define and export measures. Application authors can then +create views and collect and break down measures by the tags they are +interested in. Recording measurements Measurement is a data point to be collected for a measure. For example, for a latency (ms) measure, 100 is a measurement that represents a 100ms -latency event. Users collect data points on the existing measures with +latency event. Measurements are created from measures with the current context. Tags from the current context are recorded with the measurements if they are any. -Recorded measurements are dropped immediately if user is not aggregating -them via views. Users don't necessarily need to conditionally enable/disable +Recorded measurements are dropped immediately if no views are registered for them. +There is usually no need to conditionally enable and disable recording to reduce cost. Recording of measurements is cheap. -Libraries can always record measurements, and end-users can later decide +Libraries can always record measurements, and applications can later decide on which measurements they want to collect by registering views. This allows libraries to turn on the instrumentation by default. + +Exemplars + +For a given recorded measurement, the associated exemplar is a diagnostic map +that gives more information about the measurement. + +When aggregated using a Distribution aggregation, an exemplar is kept for each +bucket in the Distribution. This allows you to easily find an example of a +measurement that fell into each bucket. + +For example, if you also use the OpenCensus trace package and you +record a measurement with a context that contains a sampled trace span, +then the trace span will be added to the exemplar associated with the measurement. + +When exported to a supporting back end, you should be able to easily navigate +to example traces that fell into each bucket in the Distribution. + */ package stats // import "go.opencensus.io/stats" - -// TODO(acetechnologist): Add a link to the language independent OpenCensus -// spec when it is available. diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go index 6341eb2ad..ed5455205 100644 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -19,7 +19,7 @@ import ( ) // DefaultRecorder will be called for each Record call. -var DefaultRecorder func(*tag.Map, interface{}) +var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]string) // SubscriptionReporter reports when a view subscribed with a measure. var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go index e489009cb..0aced02c3 100644 --- a/vendor/go.opencensus.io/stats/record.go +++ b/vendor/go.opencensus.io/stats/record.go @@ -18,6 +18,7 @@ package stats import ( "context" + "go.opencensus.io/exemplar" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" ) @@ -50,7 +51,7 @@ func Record(ctx context.Context, ms ...Measurement) { if !record { return } - recorder(tag.FromContext(ctx), ms) + recorder(tag.FromContext(ctx), ms, exemplar.AttachmentsFromContext(ctx)) } // RecordWithTags records one or multiple measurements at once. diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go index 88c500bff..960b94601 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation_data.go +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -17,6 +17,8 @@ package view import ( "math" + + "go.opencensus.io/exemplar" ) // AggregationData represents an aggregated value from a collection. @@ -24,7 +26,7 @@ import ( // Mosts users won't directly access aggregration data. type AggregationData interface { isAggregationData() bool - addSample(v float64) + addSample(e *exemplar.Exemplar) clone() AggregationData equal(other AggregationData) bool } @@ -41,7 +43,7 @@ type CountData struct { func (a *CountData) isAggregationData() bool { return true } -func (a *CountData) addSample(v float64) { +func (a *CountData) addSample(_ *exemplar.Exemplar) { a.Value = a.Value + 1 } @@ -68,8 +70,8 @@ type SumData struct { func (a *SumData) isAggregationData() bool { return true } -func (a *SumData) addSample(f float64) { - a.Value += f +func (a *SumData) addSample(e *exemplar.Exemplar) { + a.Value += e.Value } func (a *SumData) clone() AggregationData { @@ -88,22 +90,30 @@ func (a *SumData) equal(other AggregationData) bool { // Distribution aggregation. // // Most users won't directly access distribution data. +// +// For a distribution with N bounds, the associated DistributionData will have +// N+1 buckets. type DistributionData struct { - Count int64 // number of data points aggregated - Min float64 // minimum value in the distribution - Max float64 // max value in the distribution - Mean float64 // mean of the distribution - SumOfSquaredDev float64 // sum of the squared deviation from the mean - CountPerBucket []int64 // number of occurrences per bucket - bounds []float64 // histogram distribution of the values + Count int64 // number of data points aggregated + Min float64 // minimum value in the distribution + Max float64 // max value in the distribution + Mean float64 // mean of the distribution + SumOfSquaredDev float64 // sum of the squared deviation from the mean + CountPerBucket []int64 // number of occurrences per bucket + // ExemplarsPerBucket is slice the same length as CountPerBucket containing + // an exemplar for the associated bucket, or nil. + ExemplarsPerBucket []*exemplar.Exemplar + bounds []float64 // histogram distribution of the values } func newDistributionData(bounds []float64) *DistributionData { + bucketCount := len(bounds) + 1 return &DistributionData{ - CountPerBucket: make([]int64, len(bounds)+1), - bounds: bounds, - Min: math.MaxFloat64, - Max: math.SmallestNonzeroFloat64, + CountPerBucket: make([]int64, bucketCount), + ExemplarsPerBucket: make([]*exemplar.Exemplar, bucketCount), + bounds: bounds, + Min: math.MaxFloat64, + Max: math.SmallestNonzeroFloat64, } } @@ -119,7 +129,8 @@ func (a *DistributionData) variance() float64 { func (a *DistributionData) isAggregationData() bool { return true } -func (a *DistributionData) addSample(f float64) { +func (a *DistributionData) addSample(e *exemplar.Exemplar) { + f := e.Value if f < a.Min { a.Min = f } @@ -127,7 +138,7 @@ func (a *DistributionData) addSample(f float64) { a.Max = f } a.Count++ - a.incrementBucketCount(f) + a.addToBucket(e) if a.Count == 1 { a.Mean = f @@ -139,26 +150,43 @@ func (a *DistributionData) addSample(f float64) { a.SumOfSquaredDev = a.SumOfSquaredDev + (f-oldMean)*(f-a.Mean) } -func (a *DistributionData) incrementBucketCount(f float64) { - if len(a.bounds) == 0 { - a.CountPerBucket[0]++ - return - } - +func (a *DistributionData) addToBucket(e *exemplar.Exemplar) { + var count *int64 + var ex **exemplar.Exemplar for i, b := range a.bounds { - if f < b { - a.CountPerBucket[i]++ - return + if e.Value < b { + count = &a.CountPerBucket[i] + ex = &a.ExemplarsPerBucket[i] + break } } - a.CountPerBucket[len(a.bounds)]++ + if count == nil { + count = &a.CountPerBucket[len(a.bounds)] + ex = &a.ExemplarsPerBucket[len(a.bounds)] + } + *count++ + *ex = maybeRetainExemplar(*ex, e) +} + +func maybeRetainExemplar(old, cur *exemplar.Exemplar) *exemplar.Exemplar { + if old == nil { + return cur + } + + // Heuristic to pick the "better" exemplar: first keep the one with a + // sampled trace attachment, if neither have a trace attachment, pick the + // one with more attachments. + _, haveTraceID := cur.Attachments[exemplar.KeyTraceID] + if haveTraceID || len(cur.Attachments) >= len(old.Attachments) { + return cur + } + return old } func (a *DistributionData) clone() AggregationData { - counts := make([]int64, len(a.CountPerBucket)) - copy(counts, a.CountPerBucket) c := *a - c.CountPerBucket = counts + c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) + c.ExemplarsPerBucket = append([]*exemplar.Exemplar(nil), a.ExemplarsPerBucket...) return &c } @@ -190,8 +218,8 @@ func (l *LastValueData) isAggregationData() bool { return true } -func (l *LastValueData) addSample(v float64) { - l.Value = v +func (l *LastValueData) addSample(e *exemplar.Exemplar) { + l.Value = e.Value } func (l *LastValueData) clone() AggregationData { diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go index 250395db2..32415d485 100644 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -18,6 +18,8 @@ package view import ( "sort" + "go.opencensus.io/exemplar" + "go.opencensus.io/internal/tagencoding" "go.opencensus.io/tag" ) @@ -31,13 +33,13 @@ type collector struct { a *Aggregation } -func (c *collector) addSample(s string, v float64) { +func (c *collector) addSample(s string, e *exemplar.Exemplar) { aggregator, ok := c.signatures[s] if !ok { aggregator = c.a.newData() c.signatures[s] = aggregator } - aggregator.addSample(v) + aggregator.addSample(e) } // collectRows returns a snapshot of the collected Row values. diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go index 22323e2c5..c2a08af67 100644 --- a/vendor/go.opencensus.io/stats/view/view.go +++ b/vendor/go.opencensus.io/stats/view/view.go @@ -23,6 +23,8 @@ import ( "sync/atomic" "time" + "go.opencensus.io/exemplar" + "go.opencensus.io/stats" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" @@ -127,12 +129,12 @@ func (v *viewInternal) collectedRows() []*Row { return v.collector.collectedRows(v.view.TagKeys) } -func (v *viewInternal) addSample(m *tag.Map, val float64) { +func (v *viewInternal) addSample(m *tag.Map, e *exemplar.Exemplar) { if !v.isSubscribed() { return } sig := string(encodeWithKeys(m, v.view.TagKeys)) - v.collector.addSample(sig, val) + v.collector.addSample(sig, e) } // A Data is a set of rows about usage of the single measure associated diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index 9255d27d2..63b0ee3cc 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -107,10 +107,12 @@ func RetrieveData(viewName string) ([]*Row, error) { return resp.rows, resp.err } -func record(tags *tag.Map, ms interface{}) { +func record(tags *tag.Map, ms interface{}, attachments map[string]string) { req := &recordReq{ - tm: tags, - ms: ms.([]stats.Measurement), + tm: tags, + ms: ms.([]stats.Measurement), + attachments: attachments, + t: time.Now(), } defaultWorker.c <- req } diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go index 06c3c5464..b38f26f42 100644 --- a/vendor/go.opencensus.io/stats/view/worker_commands.go +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -21,6 +21,8 @@ import ( "strings" "time" + "go.opencensus.io/exemplar" + "go.opencensus.io/stats" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" @@ -140,8 +142,10 @@ func (cmd *retrieveDataReq) handleCommand(w *worker) { // recordReq is the command to record data related to multiple measures // at once. type recordReq struct { - tm *tag.Map - ms []stats.Measurement + tm *tag.Map + ms []stats.Measurement + attachments map[string]string + t time.Time } func (cmd *recordReq) handleCommand(w *worker) { @@ -151,7 +155,12 @@ func (cmd *recordReq) handleCommand(w *worker) { } ref := w.getMeasureRef(m.Measure().Name()) for v := range ref.views { - v.addSample(cmd.tm, m.Value()) + e := &exemplar.Exemplar{ + Value: m.Value(), + Timestamp: cmd.t, + Attachments: cmd.attachments, + } + v.addSample(cmd.tm, e) } } } diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go index ed528bcb3..dcc13f498 100644 --- a/vendor/go.opencensus.io/tag/context.go +++ b/vendor/go.opencensus.io/tag/context.go @@ -15,7 +15,11 @@ package tag -import "context" +import ( + "context" + + "go.opencensus.io/exemplar" +) // FromContext returns the tag map stored in the context. func FromContext(ctx context.Context) *Map { @@ -39,3 +43,25 @@ func NewContext(ctx context.Context, m *Map) context.Context { type ctxKey struct{} var mapCtxKey = ctxKey{} + +func init() { + exemplar.RegisterAttachmentExtractor(extractTagsAttachments) +} + +func extractTagsAttachments(ctx context.Context, a exemplar.Attachments) exemplar.Attachments { + m := FromContext(ctx) + if m == nil { + return a + } + if len(m.m) == 0 { + return a + } + if a == nil { + a = make(map[string]string) + } + + for k, v := range m.m { + a[exemplar.KeyPrefixTag+k.Name()] = v + } + return a +} diff --git a/vendor/go.opencensus.io/trace/exemplar.go b/vendor/go.opencensus.io/trace/exemplar.go new file mode 100644 index 000000000..416d80590 --- /dev/null +++ b/vendor/go.opencensus.io/trace/exemplar.go @@ -0,0 +1,43 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + "encoding/hex" + + "go.opencensus.io/exemplar" +) + +func init() { + exemplar.RegisterAttachmentExtractor(attachSpanContext) +} + +func attachSpanContext(ctx context.Context, a exemplar.Attachments) exemplar.Attachments { + span := FromContext(ctx) + if span == nil { + return a + } + sc := span.SpanContext() + if !sc.IsSampled() { + return a + } + if a == nil { + a = make(exemplar.Attachments) + } + a[exemplar.KeyTraceID] = hex.EncodeToString(sc.TraceID[:]) + a[exemplar.KeySpanID] = hex.EncodeToString(sc.SpanID[:]) + return a +} diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go index 77578a3c5..9e5e5f033 100644 --- a/vendor/go.opencensus.io/trace/trace.go +++ b/vendor/go.opencensus.io/trace/trace.go @@ -243,13 +243,16 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa // End ends the span. func (s *Span) End() { + if s == nil { + return + } + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } if !s.IsRecordingEvents() { return } s.endOnce.Do(func() { - if s.executionTracerTaskEnd != nil { - s.executionTracerTaskEnd() - } exp, _ := exporters.Load().(exportersMap) mustExport := s.spanContext.IsSampled() && len(exp) > 0 if s.spanStore != nil || mustExport { diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go index 2345dd379..2d6c713eb 100644 --- a/vendor/go.opencensus.io/trace/tracestate/tracestate.go +++ b/vendor/go.opencensus.io/trace/tracestate/tracestate.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package tracestate implements support for the Tracestate header of the +// W3C TraceContext propagation format. package tracestate import ( diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index d0f482531..122c03e70 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -404,7 +404,7 @@ userAuthLoop: perms, authErr = config.PasswordCallback(s, password) case "keyboard-interactive": if config.KeyboardInteractiveCallback == nil { - authErr = errors.New("ssh: keyboard-interactive auth not configubred") + authErr = errors.New("ssh: keyboard-interactive auth not configured") break } diff --git a/vendor/golang.org/x/net/bpf/constants.go b/vendor/golang.org/x/net/bpf/constants.go index b89ca3523..12f3ee835 100644 --- a/vendor/golang.org/x/net/bpf/constants.go +++ b/vendor/golang.org/x/net/bpf/constants.go @@ -38,6 +38,7 @@ const ( type JumpTest uint16 // Supported operators for conditional jumps. +// K can be RegX for JumpIfX const ( // K == A JumpEqual JumpTest = iota @@ -134,12 +135,9 @@ const ( opMaskLoadDest = 0x01 opMaskLoadWidth = 0x18 opMaskLoadMode = 0xe0 - // opClsALU - opMaskOperandSrc = 0x08 - opMaskOperator = 0xf0 - // opClsJump - opMaskJumpConst = 0x0f - opMaskJumpCond = 0xf0 + // opClsALU & opClsJump + opMaskOperand = 0x08 + opMaskOperator = 0xf0 ) const ( @@ -192,15 +190,21 @@ const ( opLoadWidth1 ) -// Operator defined by ALUOp* +// Operand for ALU and Jump instructions +type opOperand uint16 +// Supported operand sources. const ( - opALUSrcConstant uint16 = iota << 3 - opALUSrcX + opOperandConstant opOperand = iota << 3 + opOperandX ) +// An jumpOp is a conditional jump condition. +type jumpOp uint16 + +// Supported jump conditions. const ( - opJumpAlways = iota << 4 + opJumpAlways jumpOp = iota << 4 opJumpEqual opJumpGT opJumpGE diff --git a/vendor/golang.org/x/net/bpf/instructions.go b/vendor/golang.org/x/net/bpf/instructions.go index f9dc0e8ee..3cffcaa01 100644 --- a/vendor/golang.org/x/net/bpf/instructions.go +++ b/vendor/golang.org/x/net/bpf/instructions.go @@ -89,10 +89,14 @@ func (ri RawInstruction) Disassemble() Instruction { case opClsALU: switch op := ALUOp(ri.Op & opMaskOperator); op { case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor: - if ri.Op&opMaskOperandSrc != 0 { + switch operand := opOperand(ri.Op & opMaskOperand); operand { + case opOperandX: return ALUOpX{Op: op} + case opOperandConstant: + return ALUOpConstant{Op: op, Val: ri.K} + default: + return ri } - return ALUOpConstant{Op: op, Val: ri.K} case aluOpNeg: return NegateA{} default: @@ -100,63 +104,18 @@ func (ri RawInstruction) Disassemble() Instruction { } case opClsJump: - if ri.Op&opMaskJumpConst != opClsJump { - return ri - } - switch ri.Op & opMaskJumpCond { + switch op := jumpOp(ri.Op & opMaskOperator); op { case opJumpAlways: return Jump{Skip: ri.K} - case opJumpEqual: - if ri.Jt == 0 { - return JumpIf{ - Cond: JumpNotEqual, - Val: ri.K, - SkipTrue: ri.Jf, - SkipFalse: 0, - } - } - return JumpIf{ - Cond: JumpEqual, - Val: ri.K, - SkipTrue: ri.Jt, - SkipFalse: ri.Jf, - } - case opJumpGT: - if ri.Jt == 0 { - return JumpIf{ - Cond: JumpLessOrEqual, - Val: ri.K, - SkipTrue: ri.Jf, - SkipFalse: 0, - } - } - return JumpIf{ - Cond: JumpGreaterThan, - Val: ri.K, - SkipTrue: ri.Jt, - SkipFalse: ri.Jf, - } - case opJumpGE: - if ri.Jt == 0 { - return JumpIf{ - Cond: JumpLessThan, - Val: ri.K, - SkipTrue: ri.Jf, - SkipFalse: 0, - } - } - return JumpIf{ - Cond: JumpGreaterOrEqual, - Val: ri.K, - SkipTrue: ri.Jt, - SkipFalse: ri.Jf, - } - case opJumpSet: - return JumpIf{ - Cond: JumpBitsSet, - Val: ri.K, - SkipTrue: ri.Jt, - SkipFalse: ri.Jf, + case opJumpEqual, opJumpGT, opJumpGE, opJumpSet: + cond, skipTrue, skipFalse := jumpOpToTest(op, ri.Jt, ri.Jf) + switch operand := opOperand(ri.Op & opMaskOperand); operand { + case opOperandX: + return JumpIfX{Cond: cond, SkipTrue: skipTrue, SkipFalse: skipFalse} + case opOperandConstant: + return JumpIf{Cond: cond, Val: ri.K, SkipTrue: skipTrue, SkipFalse: skipFalse} + default: + return ri } default: return ri @@ -187,6 +146,41 @@ func (ri RawInstruction) Disassemble() Instruction { } } +func jumpOpToTest(op jumpOp, skipTrue uint8, skipFalse uint8) (JumpTest, uint8, uint8) { + var test JumpTest + + // Decode "fake" jump conditions that don't appear in machine code + // Ensures the Assemble -> Disassemble stage recreates the same instructions + // See https://github.com/golang/go/issues/18470 + if skipTrue == 0 { + switch op { + case opJumpEqual: + test = JumpNotEqual + case opJumpGT: + test = JumpLessOrEqual + case opJumpGE: + test = JumpLessThan + case opJumpSet: + test = JumpBitsNotSet + } + + return test, skipFalse, 0 + } + + switch op { + case opJumpEqual: + test = JumpEqual + case opJumpGT: + test = JumpGreaterThan + case opJumpGE: + test = JumpGreaterOrEqual + case opJumpSet: + test = JumpBitsSet + } + + return test, skipTrue, skipFalse +} + // LoadConstant loads Val into register Dst. type LoadConstant struct { Dst Register @@ -413,7 +407,7 @@ type ALUOpConstant struct { // Assemble implements the Instruction Assemble method. func (a ALUOpConstant) Assemble() (RawInstruction, error) { return RawInstruction{ - Op: opClsALU | opALUSrcConstant | uint16(a.Op), + Op: opClsALU | uint16(opOperandConstant) | uint16(a.Op), K: a.Val, }, nil } @@ -454,7 +448,7 @@ type ALUOpX struct { // Assemble implements the Instruction Assemble method. func (a ALUOpX) Assemble() (RawInstruction, error) { return RawInstruction{ - Op: opClsALU | opALUSrcX | uint16(a.Op), + Op: opClsALU | uint16(opOperandX) | uint16(a.Op), }, nil } @@ -509,7 +503,7 @@ type Jump struct { // Assemble implements the Instruction Assemble method. func (a Jump) Assemble() (RawInstruction, error) { return RawInstruction{ - Op: opClsJump | opJumpAlways, + Op: opClsJump | uint16(opJumpAlways), K: a.Skip, }, nil } @@ -530,11 +524,39 @@ type JumpIf struct { // Assemble implements the Instruction Assemble method. func (a JumpIf) Assemble() (RawInstruction, error) { + return jumpToRaw(a.Cond, opOperandConstant, a.Val, a.SkipTrue, a.SkipFalse) +} + +// String returns the instruction in assembler notation. +func (a JumpIf) String() string { + return jumpToString(a.Cond, fmt.Sprintf("#%d", a.Val), a.SkipTrue, a.SkipFalse) +} + +// JumpIfX skips the following Skip instructions in the program if A +// X is true. +type JumpIfX struct { + Cond JumpTest + SkipTrue uint8 + SkipFalse uint8 +} + +// Assemble implements the Instruction Assemble method. +func (a JumpIfX) Assemble() (RawInstruction, error) { + return jumpToRaw(a.Cond, opOperandX, 0, a.SkipTrue, a.SkipFalse) +} + +// String returns the instruction in assembler notation. +func (a JumpIfX) String() string { + return jumpToString(a.Cond, "x", a.SkipTrue, a.SkipFalse) +} + +// jumpToRaw assembles a jump instruction into a RawInstruction +func jumpToRaw(test JumpTest, operand opOperand, k uint32, skipTrue, skipFalse uint8) (RawInstruction, error) { var ( - cond uint16 + cond jumpOp flip bool ) - switch a.Cond { + switch test { case JumpEqual: cond = opJumpEqual case JumpNotEqual: @@ -552,63 +574,63 @@ func (a JumpIf) Assemble() (RawInstruction, error) { case JumpBitsNotSet: cond, flip = opJumpSet, true default: - return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", a.Cond) + return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", test) } - jt, jf := a.SkipTrue, a.SkipFalse + jt, jf := skipTrue, skipFalse if flip { jt, jf = jf, jt } return RawInstruction{ - Op: opClsJump | cond, + Op: opClsJump | uint16(cond) | uint16(operand), Jt: jt, Jf: jf, - K: a.Val, + K: k, }, nil } -// String returns the instruction in assembler notation. -func (a JumpIf) String() string { - switch a.Cond { +// jumpToString converts a jump instruction to assembler notation +func jumpToString(cond JumpTest, operand string, skipTrue, skipFalse uint8) string { + switch cond { // K == A case JumpEqual: - return conditionalJump(a, "jeq", "jneq") + return conditionalJump(operand, skipTrue, skipFalse, "jeq", "jneq") // K != A case JumpNotEqual: - return fmt.Sprintf("jneq #%d,%d", a.Val, a.SkipTrue) + return fmt.Sprintf("jneq %s,%d", operand, skipTrue) // K > A case JumpGreaterThan: - return conditionalJump(a, "jgt", "jle") + return conditionalJump(operand, skipTrue, skipFalse, "jgt", "jle") // K < A case JumpLessThan: - return fmt.Sprintf("jlt #%d,%d", a.Val, a.SkipTrue) + return fmt.Sprintf("jlt %s,%d", operand, skipTrue) // K >= A case JumpGreaterOrEqual: - return conditionalJump(a, "jge", "jlt") + return conditionalJump(operand, skipTrue, skipFalse, "jge", "jlt") // K <= A case JumpLessOrEqual: - return fmt.Sprintf("jle #%d,%d", a.Val, a.SkipTrue) + return fmt.Sprintf("jle %s,%d", operand, skipTrue) // K & A != 0 case JumpBitsSet: - if a.SkipFalse > 0 { - return fmt.Sprintf("jset #%d,%d,%d", a.Val, a.SkipTrue, a.SkipFalse) + if skipFalse > 0 { + return fmt.Sprintf("jset %s,%d,%d", operand, skipTrue, skipFalse) } - return fmt.Sprintf("jset #%d,%d", a.Val, a.SkipTrue) + return fmt.Sprintf("jset %s,%d", operand, skipTrue) // K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips case JumpBitsNotSet: - return JumpIf{Cond: JumpBitsSet, SkipTrue: a.SkipFalse, SkipFalse: a.SkipTrue, Val: a.Val}.String() + return jumpToString(JumpBitsSet, operand, skipFalse, skipTrue) default: - return fmt.Sprintf("unknown instruction: %#v", a) + return fmt.Sprintf("unknown JumpTest %#v", cond) } } -func conditionalJump(inst JumpIf, positiveJump, negativeJump string) string { - if inst.SkipTrue > 0 { - if inst.SkipFalse > 0 { - return fmt.Sprintf("%s #%d,%d,%d", positiveJump, inst.Val, inst.SkipTrue, inst.SkipFalse) +func conditionalJump(operand string, skipTrue, skipFalse uint8, positiveJump, negativeJump string) string { + if skipTrue > 0 { + if skipFalse > 0 { + return fmt.Sprintf("%s %s,%d,%d", positiveJump, operand, skipTrue, skipFalse) } - return fmt.Sprintf("%s #%d,%d", positiveJump, inst.Val, inst.SkipTrue) + return fmt.Sprintf("%s %s,%d", positiveJump, operand, skipTrue) } - return fmt.Sprintf("%s #%d,%d", negativeJump, inst.Val, inst.SkipFalse) + return fmt.Sprintf("%s %s,%d", negativeJump, operand, skipFalse) } // RetA exits the BPF program, returning the value of register A. diff --git a/vendor/golang.org/x/net/bpf/vm.go b/vendor/golang.org/x/net/bpf/vm.go index 4c656f1e1..73f57f1f7 100644 --- a/vendor/golang.org/x/net/bpf/vm.go +++ b/vendor/golang.org/x/net/bpf/vm.go @@ -35,6 +35,13 @@ func NewVM(filter []Instruction) (*VM, error) { if check <= int(ins.SkipFalse) { return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse) } + case JumpIfX: + if check <= int(ins.SkipTrue) { + return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue) + } + if check <= int(ins.SkipFalse) { + return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse) + } // Check for division or modulus by zero case ALUOpConstant: if ins.Val != 0 { @@ -109,6 +116,9 @@ func (v *VM) Run(in []byte) (int, error) { case JumpIf: jump := jumpIf(ins, regA) i += jump + case JumpIfX: + jump := jumpIfX(ins, regA, regX) + i += jump case LoadAbsolute: regA, ok = loadAbsolute(ins, in) case LoadConstant: diff --git a/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/golang.org/x/net/bpf/vm_instructions.go index 516f9462b..f0d2e55bd 100644 --- a/vendor/golang.org/x/net/bpf/vm_instructions.go +++ b/vendor/golang.org/x/net/bpf/vm_instructions.go @@ -55,34 +55,41 @@ func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 { } } -func jumpIf(ins JumpIf, value uint32) int { - var ok bool - inV := uint32(ins.Val) +func jumpIf(ins JumpIf, regA uint32) int { + return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, ins.Val) +} - switch ins.Cond { +func jumpIfX(ins JumpIfX, regA uint32, regX uint32) int { + return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, regX) +} + +func jumpIfCommon(cond JumpTest, skipTrue, skipFalse uint8, regA uint32, value uint32) int { + var ok bool + + switch cond { case JumpEqual: - ok = value == inV + ok = regA == value case JumpNotEqual: - ok = value != inV + ok = regA != value case JumpGreaterThan: - ok = value > inV + ok = regA > value case JumpLessThan: - ok = value < inV + ok = regA < value case JumpGreaterOrEqual: - ok = value >= inV + ok = regA >= value case JumpLessOrEqual: - ok = value <= inV + ok = regA <= value case JumpBitsSet: - ok = (value & inV) != 0 + ok = (regA & value) != 0 case JumpBitsNotSet: - ok = (value & inV) == 0 + ok = (regA & value) == 0 } if ok { - return int(ins.SkipTrue) + return int(skipTrue) } - return int(ins.SkipFalse) + return int(skipFalse) } func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) { diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 5c5451ad8..711bc9366 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -110,6 +110,7 @@ var brokenAuthHeaderProviders = []string{ "https://login.salesforce.com/", "https://login.windows.net", "https://login.live.com/", + "https://login.live-int.com/", "https://oauth.sandbox.trainingpeaks.com/", "https://oauth.trainingpeaks.com/", "https://oauth.vk.com/", diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index ec8e61322..7943853ff 100755 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -193,6 +193,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -445,6 +446,7 @@ ccflags="$@" $2 ~ /^(MS|MNT|UMOUNT)_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || $2 ~ /^(O|F|E?FD|NAME|S|PTRACE|PT)_/ || + $2 ~ /^KEXEC_/ || $2 ~ /^LINUX_REBOOT_CMD_/ || $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || $2 !~ "NLA_TYPE_MASK" && diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl b/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl index 3e6ed9df8..a354df5a6 100755 --- a/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl +++ b/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl @@ -92,6 +92,11 @@ while(<>) { my @in = parseparamlist($in); my @out = parseparamlist($out); + # Try in vain to keep people from editing this file. + # The theory is that they jump into the middle of the file + # without reading the header. + $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"; + # So file name. if($modname eq "") { $modname = "libc"; diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index f153c0673..9dd2f32f5 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -12,7 +12,7 @@ import "unsafe" // Round the length of a raw sockaddr up to align it properly. func cmsgAlignOf(salen int) int { - salign := sizeofPtr + salign := SizeofPtr // NOTE: It seems like 64-bit Darwin, DragonFly BSD and // Solaris kernels still require 32-bit aligned access to // network subsystem. diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 02cf204a1..bfa20a971 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1122,7 +1122,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro // The ptrace syscall differs from glibc's ptrace. // Peeks returns the word in *data, not as the return value. - var buf [sizeofPtr]byte + var buf [SizeofPtr]byte // Leading edge. PEEKTEXT/PEEKDATA don't require aligned // access (PEEKUSER warns that it might), but if we don't @@ -1130,12 +1130,12 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro // boundary and not get the bytes leading up to the page // boundary. n := 0 - if addr%sizeofPtr != 0 { - err = ptrace(req, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + if addr%SizeofPtr != 0 { + err = ptrace(req, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) if err != nil { return 0, err } - n += copy(out, buf[addr%sizeofPtr:]) + n += copy(out, buf[addr%SizeofPtr:]) out = out[n:] } @@ -1173,15 +1173,15 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c // Leading edge. n := 0 - if addr%sizeofPtr != 0 { - var buf [sizeofPtr]byte - err = ptrace(peekReq, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + if addr%SizeofPtr != 0 { + var buf [SizeofPtr]byte + err = ptrace(peekReq, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) if err != nil { return 0, err } - n += copy(buf[addr%sizeofPtr:], data) + n += copy(buf[addr%SizeofPtr:], data) word := *((*uintptr)(unsafe.Pointer(&buf[0]))) - err = ptrace(pokeReq, pid, addr-addr%sizeofPtr, word) + err = ptrace(pokeReq, pid, addr-addr%SizeofPtr, word) if err != nil { return 0, err } @@ -1189,19 +1189,19 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c } // Interior. - for len(data) > sizeofPtr { + for len(data) > SizeofPtr { word := *((*uintptr)(unsafe.Pointer(&data[0]))) err = ptrace(pokeReq, pid, addr+uintptr(n), word) if err != nil { return n, err } - n += sizeofPtr - data = data[sizeofPtr:] + n += SizeofPtr + data = data[SizeofPtr:] } // Trailing edge. if len(data) > 0 { - var buf [sizeofPtr]byte + var buf [SizeofPtr]byte err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) if err != nil { return n, err diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 5f9b2454a..5247d9f90 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -160,3 +160,16 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { } return poll(&fds[0], len(fds), timeout) } + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 6a38dfd5b..41451854b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -136,3 +136,16 @@ func SyncFileRange(fd int, off int64, n int64, flags int) error { // order of their arguments. return syncFileRange2(fd, flags, off, n) } + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 6e4ee0cf2..f52f148f9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -322,3 +322,16 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { } return poll(&fds[0], len(fds), timeout) } + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 13956b795..64fcda4ae 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -22,10 +22,10 @@ var ( ) const ( - darwin64Bit = runtime.GOOS == "darwin" && sizeofPtr == 8 - dragonfly64Bit = runtime.GOOS == "dragonfly" && sizeofPtr == 8 - netbsd32Bit = runtime.GOOS == "netbsd" && sizeofPtr == 4 - solaris64Bit = runtime.GOOS == "solaris" && sizeofPtr == 8 + darwin64Bit = runtime.GOOS == "darwin" && SizeofPtr == 8 + dragonfly64Bit = runtime.GOOS == "dragonfly" && SizeofPtr == 8 + netbsd32Bit = runtime.GOOS == "netbsd" && SizeofPtr == 4 + solaris64Bit = runtime.GOOS == "solaris" && SizeofPtr == 8 ) // Do the interface allocations only once for common diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index fe564160b..86b980a5a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -879,6 +879,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index dcfa66749..286311572 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -879,6 +879,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c2ef50c65..1b58da1e7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 1a820d668..08377eb4f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -881,6 +881,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index b515b2a63..5de2c7aa4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 29a88f0e5..51015f354 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 0767ac1b0..fdd388deb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 269b81318..2d1504612 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index eb52e9689..cd8fcd35c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x400 IXON = 0x200 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 0563d34b3..cdb608876 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x400 IXON = 0x200 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index e95e3f677..9e9472bec 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index bad17418e..f33d031ad 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -878,6 +878,26 @@ const ( IXOFF = 0x1000 IXON = 0x400 JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 7fdc85b17..ba93f3e53 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -1,10 +1,10 @@ -// mkerrors.sh -m64 -// Code generated by the command above; DO NOT EDIT. +// mkerrors.sh -Wall -Werror -static -I/tmp/include +// Code generated by the command above; see README.md. DO NOT EDIT. // +build sparc64,linux -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs -- -m64 _const.go +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -1969,174 +1969,182 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol", - 48: "address already in use", - 49: "cannot assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "transport endpoint is already connected", - 57: "transport endpoint is not connected", - 58: "cannot send after transport endpoint shutdown", - 59: "too many references: cannot splice", - 60: "connection timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disk quota exceeded", - 70: "stale file handle", - 71: "object is remote", - 72: "device not a stream", - 73: "timer expired", - 74: "out of streams resources", - 75: "no message of desired type", - 76: "bad message", - 77: "identifier removed", - 78: "resource deadlock avoided", - 79: "no locks available", - 80: "machine is not on the network", - 81: "unknown error 81", - 82: "link has been severed", - 83: "advertise error", - 84: "srmount error", - 85: "communication error on send", - 86: "protocol error", - 87: "multihop attempted", - 88: "RFS specific error", - 89: "remote address changed", - 90: "function not implemented", - 91: "streams pipe error", - 92: "value too large for defined data type", - 93: "file descriptor in bad state", - 94: "channel number out of range", - 95: "level 2 not synchronized", - 96: "level 3 halted", - 97: "level 3 reset", - 98: "link number out of range", - 99: "protocol driver not attached", - 100: "no CSI structure available", - 101: "level 2 halted", - 102: "invalid exchange", - 103: "invalid request descriptor", - 104: "exchange full", - 105: "no anode", - 106: "invalid request code", - 107: "invalid slot", - 108: "file locking deadlock error", - 109: "bad font file format", - 110: "cannot exec a shared library directly", - 111: "no data available", - 112: "accessing a corrupted shared library", - 113: "package not installed", - 114: "can not access a needed shared library", - 115: "name not unique on network", - 116: "interrupted system call should be restarted", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "invalid or incomplete multibyte or wide character", - 123: "attempting to link in too many shared libraries", - 124: ".lib section in a.out corrupted", - 125: "no medium found", - 126: "wrong medium type", - 127: "operation canceled", - 128: "required key not available", - 129: "key has expired", - 130: "key has been revoked", - 131: "key was rejected by service", - 132: "owner died", - 133: "state not recoverable", - 134: "operation not possible due to RF-kill", - 135: "memory page has hardware error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "ENOTSUP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "cannot assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "transport endpoint is already connected"}, + {57, "ENOTCONN", "transport endpoint is not connected"}, + {58, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {59, "ETOOMANYREFS", "too many references: cannot splice"}, + {60, "ETIMEDOUT", "connection timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale file handle"}, + {71, "EREMOTE", "object is remote"}, + {72, "ENOSTR", "device not a stream"}, + {73, "ETIME", "timer expired"}, + {74, "ENOSR", "out of streams resources"}, + {75, "ENOMSG", "no message of desired type"}, + {76, "EBADMSG", "bad message"}, + {77, "EIDRM", "identifier removed"}, + {78, "EDEADLK", "resource deadlock avoided"}, + {79, "ENOLCK", "no locks available"}, + {80, "ENONET", "machine is not on the network"}, + {81, "ERREMOTE", "unknown error 81"}, + {82, "ENOLINK", "link has been severed"}, + {83, "EADV", "advertise error"}, + {84, "ESRMNT", "srmount error"}, + {85, "ECOMM", "communication error on send"}, + {86, "EPROTO", "protocol error"}, + {87, "EMULTIHOP", "multihop attempted"}, + {88, "EDOTDOT", "RFS specific error"}, + {89, "EREMCHG", "remote address changed"}, + {90, "ENOSYS", "function not implemented"}, + {91, "ESTRPIPE", "streams pipe error"}, + {92, "EOVERFLOW", "value too large for defined data type"}, + {93, "EBADFD", "file descriptor in bad state"}, + {94, "ECHRNG", "channel number out of range"}, + {95, "EL2NSYNC", "level 2 not synchronized"}, + {96, "EL3HLT", "level 3 halted"}, + {97, "EL3RST", "level 3 reset"}, + {98, "ELNRNG", "link number out of range"}, + {99, "EUNATCH", "protocol driver not attached"}, + {100, "ENOCSI", "no CSI structure available"}, + {101, "EL2HLT", "level 2 halted"}, + {102, "EBADE", "invalid exchange"}, + {103, "EBADR", "invalid request descriptor"}, + {104, "EXFULL", "exchange full"}, + {105, "ENOANO", "no anode"}, + {106, "EBADRQC", "invalid request code"}, + {107, "EBADSLT", "invalid slot"}, + {108, "EDEADLOCK", "file locking deadlock error"}, + {109, "EBFONT", "bad font file format"}, + {110, "ELIBEXEC", "cannot exec a shared library directly"}, + {111, "ENODATA", "no data available"}, + {112, "ELIBBAD", "accessing a corrupted shared library"}, + {113, "ENOPKG", "package not installed"}, + {114, "ELIBACC", "can not access a needed shared library"}, + {115, "ENOTUNIQ", "name not unique on network"}, + {116, "ERESTART", "interrupted system call should be restarted"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {123, "ELIBMAX", "attempting to link in too many shared libraries"}, + {124, "ELIBSCN", ".lib section in a.out corrupted"}, + {125, "ENOMEDIUM", "no medium found"}, + {126, "EMEDIUMTYPE", "wrong medium type"}, + {127, "ECANCELED", "operation canceled"}, + {128, "ENOKEY", "required key not available"}, + {129, "EKEYEXPIRED", "key has expired"}, + {130, "EKEYREVOKED", "key has been revoked"}, + {131, "EKEYREJECTED", "key was rejected by service"}, + {132, "EOWNERDEAD", "owner died"}, + {133, "ENOTRECOVERABLE", "state not recoverable"}, + {134, "ERFKILL", "operation not possible due to RF-kill"}, + {135, "EHWPOISON", "memory page has hardware error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "stopped (signal)", - 18: "stopped", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "resource lost", - 30: "user defined signal 1", - 31: "user defined signal 2", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "stopped (signal)"}, + {18, "SIGTSTP", "stopped"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGLOST", "power failure"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 11a30786c..46e9ddfb5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -2296,3 +2296,18 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 8300814d2..c8ca4279e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -2343,3 +2343,18 @@ func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 002b4e175..870c8f6db 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -2343,3 +2343,18 @@ func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 1a9ba9992..55e79d640 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -2113,3 +2113,18 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index e2e5fc5e0..97b22a499 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -399,6 +399,8 @@ var ( procrecvfrom syscallFunc ) +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (n int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe)), 1, uintptr(unsafe.Pointer(p)), 0, 0, 0, 0, 0) n = int(r0) @@ -408,6 +410,8 @@ func pipe(p *[2]_C_int) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e1 != 0 { @@ -416,6 +420,8 @@ func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getcwd(buf []byte) (n int, err error) { var _p0 *byte if len(buf) > 0 { @@ -429,6 +435,8 @@ func Getcwd(buf []byte) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getgroups(ngid int, gid *_Gid_t) (n int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) n = int(r0) @@ -438,6 +446,8 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func setgroups(ngid int, gid *_Gid_t) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procsetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) if e1 != 0 { @@ -446,6 +456,8 @@ func setgroups(ngid int, gid *_Gid_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func wait4(pid int32, statusp *_C_int, options int, rusage *Rusage) (wpid int32, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwait4)), 4, uintptr(pid), uintptr(unsafe.Pointer(statusp)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int32(r0) @@ -455,6 +467,8 @@ func wait4(pid int32, statusp *_C_int, options int, rusage *Rusage) (wpid int32, return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gethostname(buf []byte) (n int, err error) { var _p0 *byte if len(buf) > 0 { @@ -468,6 +482,8 @@ func gethostname(buf []byte) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimes(path string, times *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -481,6 +497,8 @@ func utimes(path string, times *[2]Timeval) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(fd int, path string, times *[2]Timespec, flag int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -494,6 +512,8 @@ func utimensat(fd int, path string, times *[2]Timespec, flag int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func fcntl(fd int, cmd int, arg int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) val = int(r0) @@ -503,6 +523,8 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func futimesat(fildes int, path *byte, times *[2]Timeval) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfutimesat)), 3, uintptr(fildes), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)), 0, 0, 0) if e1 != 0 { @@ -511,6 +533,8 @@ func futimesat(fildes int, path *byte, times *[2]Timeval) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept)), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) fd = int(r0) @@ -520,6 +544,8 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_recvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) n = int(r0) @@ -529,6 +555,8 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_sendmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) n = int(r0) @@ -538,6 +566,8 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func acct(path *byte) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procacct)), 1, uintptr(unsafe.Pointer(path)), 0, 0, 0, 0, 0) if e1 != 0 { @@ -546,24 +576,32 @@ func acct(path *byte) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func __makedev(version int, major uint, minor uint) (val uint64) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__makedev)), 3, uintptr(version), uintptr(major), uintptr(minor), 0, 0, 0) val = uint64(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func __major(version int, dev uint64) (val uint) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__major)), 2, uintptr(version), uintptr(dev), 0, 0, 0, 0) val = uint(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func __minor(version int, dev uint64) (val uint) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__minor)), 2, uintptr(version), uintptr(dev), 0, 0, 0, 0) val = uint(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) if e1 != 0 { @@ -572,6 +610,8 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpoll)), 3, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout), 0, 0, 0) n = int(r0) @@ -581,6 +621,8 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -594,6 +636,8 @@ func Access(path string, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAdjtime)), 2, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0, 0, 0, 0) if e1 != 0 { @@ -602,6 +646,8 @@ func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -615,6 +661,8 @@ func Chdir(path string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chmod(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -628,6 +676,8 @@ func Chmod(path string, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chown(path string, uid int, gid int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -641,6 +691,8 @@ func Chown(path string, uid int, gid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chroot(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -654,6 +706,8 @@ func Chroot(path string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClose)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { @@ -662,6 +716,8 @@ func Close(fd int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Creat(path string, mode uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -676,6 +732,8 @@ func Creat(path string, mode uint32) (fd int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(fd int) (nfd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup)), 1, uintptr(fd), 0, 0, 0, 0, 0) nfd = int(r0) @@ -685,6 +743,8 @@ func Dup(fd int) (nfd int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup2)), 2, uintptr(oldfd), uintptr(newfd), 0, 0, 0, 0) if e1 != 0 { @@ -693,11 +753,15 @@ func Dup2(oldfd int, newfd int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { sysvicall6(uintptr(unsafe.Pointer(&procExit)), 1, uintptr(code), 0, 0, 0, 0, 0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -711,6 +775,8 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchdir(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchdir)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { @@ -719,6 +785,8 @@ func Fchdir(fd int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchmod(fd int, mode uint32) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmod)), 2, uintptr(fd), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { @@ -727,6 +795,8 @@ func Fchmod(fd int, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -740,6 +810,8 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchown)), 3, uintptr(fd), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { @@ -748,6 +820,8 @@ func Fchown(fd int, uid int, gid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -761,6 +835,8 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fdatasync(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFdatasync)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { @@ -769,6 +845,8 @@ func Fdatasync(fd int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Flock(fd int, how int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFlock)), 2, uintptr(fd), uintptr(how), 0, 0, 0, 0) if e1 != 0 { @@ -777,6 +855,8 @@ func Flock(fd int, how int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fpathconf(fd int, name int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFpathconf)), 2, uintptr(fd), uintptr(name), 0, 0, 0, 0) val = int(r0) @@ -786,6 +866,8 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstat)), 2, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { @@ -794,6 +876,8 @@ func Fstat(fd int, stat *Stat_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -807,6 +891,8 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatvfs)), 2, uintptr(fd), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0) if e1 != 0 { @@ -815,6 +901,8 @@ func Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 *byte if len(buf) > 0 { @@ -828,18 +916,24 @@ func Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getgid() (gid int) { r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&procGetgid)), 0, 0, 0, 0, 0, 0, 0) gid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getpid() (pid int) { r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getpgid(pid int) (pgid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0) pgid = int(r0) @@ -849,6 +943,8 @@ func Getpgid(pid int) (pgid int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getpgrp() (pgid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgrp)), 0, 0, 0, 0, 0, 0, 0) pgid = int(r0) @@ -858,24 +954,32 @@ func Getpgrp() (pgid int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Geteuid() (euid int) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procGeteuid)), 0, 0, 0, 0, 0, 0, 0) euid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getegid() (egid int) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procGetegid)), 0, 0, 0, 0, 0, 0, 0) egid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getppid() (ppid int) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procGetppid)), 0, 0, 0, 0, 0, 0, 0) ppid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getpriority(which int, who int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetpriority)), 2, uintptr(which), uintptr(who), 0, 0, 0, 0) n = int(r0) @@ -885,6 +989,8 @@ func Getpriority(which int, who int) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getrlimit(which int, lim *Rlimit) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) if e1 != 0 { @@ -893,6 +999,8 @@ func Getrlimit(which int, lim *Rlimit) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrusage)), 2, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0, 0, 0, 0) if e1 != 0 { @@ -901,6 +1009,8 @@ func Getrusage(who int, rusage *Rusage) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) if e1 != 0 { @@ -909,12 +1019,16 @@ func Gettimeofday(tv *Timeval) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&procGetuid)), 0, 0, 0, 0, 0, 0, 0) uid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Kill(pid int, signum syscall.Signal) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procKill)), 2, uintptr(pid), uintptr(signum), 0, 0, 0, 0) if e1 != 0 { @@ -923,6 +1037,8 @@ func Kill(pid int, signum syscall.Signal) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Lchown(path string, uid int, gid int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -936,6 +1052,8 @@ func Lchown(path string, uid int, gid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Link(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -954,6 +1072,8 @@ func Link(path string, link string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, backlog int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) if e1 != 0 { @@ -962,6 +1082,8 @@ func Listen(s int, backlog int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Lstat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -975,6 +1097,8 @@ func Lstat(path string, stat *Stat_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Madvise(b []byte, advice int) (err error) { var _p0 *byte if len(b) > 0 { @@ -987,6 +1111,8 @@ func Madvise(b []byte, advice int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1000,6 +1126,8 @@ func Mkdir(path string, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1013,6 +1141,8 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1026,6 +1156,8 @@ func Mkfifo(path string, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkfifoat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1039,6 +1171,8 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1052,6 +1186,8 @@ func Mknod(path string, mode uint32, dev int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1065,6 +1201,8 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mlock(b []byte) (err error) { var _p0 *byte if len(b) > 0 { @@ -1077,6 +1215,8 @@ func Mlock(b []byte) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mlockall(flags int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlockall)), 1, uintptr(flags), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1085,6 +1225,8 @@ func Mlockall(flags int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mprotect(b []byte, prot int) (err error) { var _p0 *byte if len(b) > 0 { @@ -1097,6 +1239,8 @@ func Mprotect(b []byte, prot int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Msync(b []byte, flags int) (err error) { var _p0 *byte if len(b) > 0 { @@ -1109,6 +1253,8 @@ func Msync(b []byte, flags int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Munlock(b []byte) (err error) { var _p0 *byte if len(b) > 0 { @@ -1121,6 +1267,8 @@ func Munlock(b []byte) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Munlockall() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlockall)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { @@ -1129,6 +1277,8 @@ func Munlockall() (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procNanosleep)), 2, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0, 0, 0, 0) if e1 != 0 { @@ -1137,6 +1287,8 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1151,6 +1303,8 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1165,6 +1319,8 @@ func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pathconf(path string, name int) (val int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1179,6 +1335,8 @@ func Pathconf(path string, name int) (val int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pause() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPause)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { @@ -1187,6 +1345,8 @@ func Pause() (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { @@ -1200,6 +1360,8 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { @@ -1213,6 +1375,8 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 *byte if len(p) > 0 { @@ -1226,6 +1390,8 @@ func read(fd int, p []byte) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Readlink(path string, buf []byte) (n int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1244,6 +1410,8 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -1262,6 +1430,8 @@ func Rename(from string, to string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -1280,6 +1450,8 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rmdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1293,6 +1465,8 @@ func Rmdir(path string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proclseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) newoffset = int64(r0) @@ -1302,6 +1476,8 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSelect)), 5, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) if e1 != 0 { @@ -1310,6 +1486,8 @@ func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetegid)), 1, uintptr(egid), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1318,6 +1496,8 @@ func Setegid(egid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Seteuid(euid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSeteuid)), 1, uintptr(euid), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1326,6 +1506,8 @@ func Seteuid(euid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setgid(gid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetgid)), 1, uintptr(gid), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1334,6 +1516,8 @@ func Setgid(gid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sethostname(p []byte) (err error) { var _p0 *byte if len(p) > 0 { @@ -1346,6 +1530,8 @@ func Sethostname(p []byte) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setpgid(pid int, pgid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetpgid)), 2, uintptr(pid), uintptr(pgid), 0, 0, 0, 0) if e1 != 0 { @@ -1354,6 +1540,8 @@ func Setpgid(pid int, pgid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setpriority(which int, who int, prio int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSetpriority)), 3, uintptr(which), uintptr(who), uintptr(prio), 0, 0, 0) if e1 != 0 { @@ -1362,6 +1550,8 @@ func Setpriority(which int, who int, prio int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setregid(rgid int, egid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetregid)), 2, uintptr(rgid), uintptr(egid), 0, 0, 0, 0) if e1 != 0 { @@ -1370,6 +1560,8 @@ func Setregid(rgid int, egid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setreuid(ruid int, euid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetreuid)), 2, uintptr(ruid), uintptr(euid), 0, 0, 0, 0) if e1 != 0 { @@ -1378,6 +1570,8 @@ func Setreuid(ruid int, euid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setrlimit(which int, lim *Rlimit) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) if e1 != 0 { @@ -1386,6 +1580,8 @@ func Setrlimit(which int, lim *Rlimit) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setsid() (pid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetsid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) @@ -1395,6 +1591,8 @@ func Setsid() (pid int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setuid(uid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetuid)), 1, uintptr(uid), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1403,6 +1601,8 @@ func Setuid(uid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Shutdown(s int, how int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procshutdown)), 2, uintptr(s), uintptr(how), 0, 0, 0, 0) if e1 != 0 { @@ -1411,6 +1611,8 @@ func Shutdown(s int, how int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Stat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1424,6 +1626,8 @@ func Stat(path string, stat *Stat_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Statvfs(path string, vfsstat *Statvfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1437,6 +1641,8 @@ func Statvfs(path string, vfsstat *Statvfs_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1455,6 +1661,8 @@ func Symlink(path string, link string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSync)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { @@ -1463,6 +1671,8 @@ func Sync() (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Times(tms *Tms) (ticks uintptr, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procTimes)), 1, uintptr(unsafe.Pointer(tms)), 0, 0, 0, 0, 0) ticks = uintptr(r0) @@ -1472,6 +1682,8 @@ func Times(tms *Tms) (ticks uintptr, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Truncate(path string, length int64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1485,6 +1697,8 @@ func Truncate(path string, length int64) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFsync)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1493,6 +1707,8 @@ func Fsync(fd int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Ftruncate(fd int, length int64) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFtruncate)), 2, uintptr(fd), uintptr(length), 0, 0, 0, 0) if e1 != 0 { @@ -1501,12 +1717,16 @@ func Ftruncate(fd int, length int64) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Umask(mask int) (oldmask int) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procUmask)), 1, uintptr(mask), 0, 0, 0, 0, 0) oldmask = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Uname(buf *Utsname) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procUname)), 1, uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1515,6 +1735,8 @@ func Uname(buf *Utsname) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unmount(target string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(target) @@ -1528,6 +1750,8 @@ func Unmount(target string, flags int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unlink(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1541,6 +1765,8 @@ func Unlink(path string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unlinkat(dirfd int, path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1554,6 +1780,8 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Ustat(dev int, ubuf *Ustat_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUstat)), 2, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0, 0, 0, 0) if e1 != 0 { @@ -1562,6 +1790,8 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Utime(path string, buf *Utimbuf) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1575,6 +1805,8 @@ func Utime(path string, buf *Utimbuf) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_bind)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e1 != 0 { @@ -1583,6 +1815,8 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_connect)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e1 != 0 { @@ -1591,6 +1825,8 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmmap)), 6, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) @@ -1600,6 +1836,8 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmunmap)), 2, uintptr(addr), uintptr(length), 0, 0, 0, 0) if e1 != 0 { @@ -1608,6 +1846,8 @@ func munmap(addr uintptr, length uintptr) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsendfile)), 4, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) @@ -1617,6 +1857,8 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { var _p0 *byte if len(buf) > 0 { @@ -1629,6 +1871,8 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func socket(domain int, typ int, proto int) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socket)), 3, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0) fd = int(r0) @@ -1638,6 +1882,8 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socketpair)), 4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { @@ -1646,6 +1892,8 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func write(fd int, p []byte) (n int, err error) { var _p0 *byte if len(p) > 0 { @@ -1659,6 +1907,8 @@ func write(fd int, p []byte) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_getsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { @@ -1667,6 +1917,8 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetpeername)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e1 != 0 { @@ -1675,6 +1927,8 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsetsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { @@ -1683,6 +1937,8 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { var _p0 *byte if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index f1cfe7db1..cedc9b0f2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 PathMax = 0x3ff ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index 95581a3bc..f46482d27 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x3ff ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go index 327af5fba..2aeb52a88 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 116e6e075..0d0d9f2cc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go index 2750ad760..04e344b78 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go @@ -7,11 +7,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 8cead0996..9fec185c1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index c01ae6701..7b34e2e2c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 8006c5638..11380294a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 716774ded..a6fc12718 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 92e07b00f..6b3006d6b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 0f3106fa3..3879002a9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 PathMax = 0x1000 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 24d6c6fd4..cbc2c7d07 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 77c7a3e19..6ed804fa3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 PathMax = 0x1000 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 044d97ba9..b5fe7ddf7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 46e6fdcc8..7379ad2d8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 PathMax = 0x1000 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index a66849e3e..0b131a24e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index bf3eca7fe..9191020cc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 4aefec3f0..8fcad32bf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 PathMax = 0x1000 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 81b322628..a9d1b6c9f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 9ba53b4e8..f0f5214a5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index cf39aa019..09c905866 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 6b27e16b5..5e86e496c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 8e7384b89..1fc7f7dea 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -5,11 +5,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x1000 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 9e9088de1..1fdc5fd21 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index ed3f17366..711f78067 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index d263b6147..fa1a16bae 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 412db97c1..c8509bf0e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index c45df92a0..200575d94 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index 927bf1047..3e20cdf09 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x4 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x4 - sizeofLongLong = 0x8 + SizeofPtr = 0x4 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x4 + SizeofLongLong = 0x8 ) type ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 0543e1a49..8531a190f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -6,11 +6,11 @@ package unix const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 PathMax = 0x400 MaxHostNameLen = 0x100 ) diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 0bae19fbe..b1dfbf21a 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -29,7 +29,7 @@ "description": "Creates and runs virtual machines on Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", - "etag": "\"J3WqvAcMk4eQjJXvfSI4Yr8VouA/koIjeaqQT2U71TcPliz06UnwrgA\"", + "etag": "\"J3WqvAcMk4eQjJXvfSI4Yr8VouA/VLlcuW_H_TYnthWtTxma7o2oqnM\"", "icons": { "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" @@ -16497,7 +16497,7 @@ } } }, - "revision": "20180906", + "revision": "20180916", "rootUrl": "https://www.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -26043,7 +26043,7 @@ "type": "object" }, "Network": { - "description": "Represents a Network resource. Read Networks and Firewalls for more information. (== resource_for v1.networks ==) (== resource_for beta.networks ==)", + "description": "Represents a Network resource. Read Virtual Private Cloud (VPC) Network Overview for more information. (== resource_for v1.networks ==) (== resource_for beta.networks ==)", "id": "Network", "properties": { "IPv4Range": { @@ -26052,7 +26052,7 @@ "type": "string" }, "autoCreateSubnetworks": { - "description": "When set to true, the network is created in \"auto subnet mode\". When set to false, the network is in \"custom subnet mode\".\n\nIn \"auto subnet mode\", a newly created network is assigned the default CIDR of 10.128.0.0/9 and it automatically creates one subnetwork per region.", + "description": "When set to true, the VPC network is created in \"auto\" mode. When set to false, the VPC network is created in \"custom\" mode.\n\nAn auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges.", "type": "boolean" }, "creationTimestamp": { @@ -26064,7 +26064,7 @@ "type": "string" }, "gatewayIPv4": { - "description": "A gateway address for default routing to other networks. This value is read only and is selected by the Google Compute Engine, typically as the first usable address in the IPv4Range.", + "description": "[Output Only] The gateway address for default routing out of the network. This value is read only and is selected by GCP.", "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", "type": "string" }, @@ -26104,7 +26104,7 @@ "type": "string" }, "subnetworks": { - "description": "[Output Only] Server-defined fully-qualified URLs for all subnetworks in this network.", + "description": "[Output Only] Server-defined fully-qualified URLs for all subnetworks in this VPC network.", "items": { "type": "string" }, @@ -26312,7 +26312,7 @@ "id": "NetworkRoutingConfig", "properties": { "routingMode": { - "description": "The network-wide routing mode to use. If set to REGIONAL, this network's cloud routers will only advertise routes with subnetworks of this network in the same region as the router. If set to GLOBAL, this network's cloud routers will advertise routes with all subnetworks of this network, across regions.", + "description": "The network-wide routing mode to use. If set to REGIONAL, this network's cloud routers will only advertise routes with subnets of this network in the same region as the router. If set to GLOBAL, this network's cloud routers will advertise routes with all subnets of this network, across regions.", "enum": [ "GLOBAL", "REGIONAL" diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index 33bc6b292..0f666513d 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -14582,22 +14582,22 @@ func (s *NamedPort) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Network: Represents a Network resource. Read Networks and Firewalls -// for more information. (== resource_for v1.networks ==) (== -// resource_for beta.networks ==) +// Network: Represents a Network resource. Read Virtual Private Cloud +// (VPC) Network Overview for more information. (== resource_for +// v1.networks ==) (== resource_for beta.networks ==) type Network struct { // IPv4Range: The range of internal addresses that are legal on this // network. This range is a CIDR specification, for example: // 192.168.0.0/16. Provided by the client when the network is created. IPv4Range string `json:"IPv4Range,omitempty"` - // AutoCreateSubnetworks: When set to true, the network is created in - // "auto subnet mode". When set to false, the network is in "custom - // subnet mode". + // AutoCreateSubnetworks: When set to true, the VPC network is created + // in "auto" mode. When set to false, the VPC network is created in + // "custom" mode. // - // In "auto subnet mode", a newly created network is assigned the - // default CIDR of 10.128.0.0/9 and it automatically creates one - // subnetwork per region. + // An auto mode VPC network starts with one subnet per region. Each + // subnet has a predetermined range as described in Auto mode VPC + // network IP ranges. AutoCreateSubnetworks bool `json:"autoCreateSubnetworks,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -14608,9 +14608,8 @@ type Network struct { // property when you create the resource. Description string `json:"description,omitempty"` - // GatewayIPv4: A gateway address for default routing to other networks. - // This value is read only and is selected by the Google Compute Engine, - // typically as the first usable address in the IPv4Range. + // GatewayIPv4: [Output Only] The gateway address for default routing + // out of the network. This value is read only and is selected by GCP. GatewayIPv4 string `json:"gatewayIPv4,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -14642,7 +14641,7 @@ type Network struct { SelfLink string `json:"selfLink,omitempty"` // Subnetworks: [Output Only] Server-defined fully-qualified URLs for - // all subnetworks in this network. + // all subnetworks in this VPC network. Subnetworks []string `json:"subnetworks,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -14982,9 +14981,9 @@ func (s *NetworkPeering) MarshalJSON() ([]byte, error) { type NetworkRoutingConfig struct { // RoutingMode: The network-wide routing mode to use. If set to // REGIONAL, this network's cloud routers will only advertise routes - // with subnetworks of this network in the same region as the router. If - // set to GLOBAL, this network's cloud routers will advertise routes - // with all subnetworks of this network, across regions. + // with subnets of this network in the same region as the router. If set + // to GLOBAL, this network's cloud routers will advertise routes with + // all subnets of this network, across regions. // // Possible values: // "GLOBAL" diff --git a/vendor/google.golang.org/api/iam/v1/iam-api.json b/vendor/google.golang.org/api/iam/v1/iam-api.json index e5230f575..4536f018d 100644 --- a/vendor/google.golang.org/api/iam/v1/iam-api.json +++ b/vendor/google.golang.org/api/iam/v1/iam-api.json @@ -1081,7 +1081,7 @@ } } }, - "revision": "20180920", + "revision": "20181005", "rootUrl": "https://iam.googleapis.com/", "schemas": { "AuditConfig": { @@ -1737,7 +1737,7 @@ "id": "ServiceAccount", "properties": { "displayName": { - "description": "Optional. A user-specified description of the service account. Must be\nfewer than 100 UTF-8 bytes.", + "description": "Optional. A user-specified name for the service account.\nMust be less than or equal to 100 UTF-8 bytes.", "type": "string" }, "email": { @@ -1745,7 +1745,7 @@ "type": "string" }, "etag": { - "description": "Used to perform a consistent read-modify-write.", + "description": "Optional. Note: `etag` is an inoperable legacy field that is only returned\nfor backwards compatibility.", "format": "byte", "type": "string" }, diff --git a/vendor/google.golang.org/api/iam/v1/iam-gen.go b/vendor/google.golang.org/api/iam/v1/iam-gen.go index 7797779a0..b75aadc67 100644 --- a/vendor/google.golang.org/api/iam/v1/iam-gen.go +++ b/vendor/google.golang.org/api/iam/v1/iam-gen.go @@ -1622,15 +1622,17 @@ func (s *Role) MarshalJSON() ([]byte, error) { // the // `unique_id` of the service account. type ServiceAccount struct { - // DisplayName: Optional. A user-specified description of the service - // account. Must be - // fewer than 100 UTF-8 bytes. + // DisplayName: Optional. A user-specified name for the service + // account. + // Must be less than or equal to 100 UTF-8 bytes. DisplayName string `json:"displayName,omitempty"` // Email: @OutputOnly The email address of the service account. Email string `json:"email,omitempty"` - // Etag: Used to perform a consistent read-modify-write. + // Etag: Optional. Note: `etag` is an inoperable legacy field that is + // only returned + // for backwards compatibility. Etag string `json:"etag,omitempty"` // Name: The resource name of the service account in the following diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index c79b835b6..e14f492b4 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -59,7 +59,9 @@ func dial(ctx context.Context, insecure bool, opts []option.ClientOption) (*grpc if o.GRPCConn != nil { return o.GRPCConn, nil } - var grpcOpts []grpc.DialOption + grpcOpts := []grpc.DialOption{ + grpc.WithDisableRetry(), // We don't want to have two methods of retry until we're ready for it. + } if insecure { grpcOpts = []grpc.DialOption{grpc.WithInsecure()} } else if !o.NoAuth { diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go index 1fc8aaecb..7e9e63c42 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go @@ -5,10 +5,9 @@ package annotations import ( fmt "fmt" - math "math" - proto "github.com/golang/protobuf/proto" descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index ac3366c40..abe688ec7 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -5,9 +5,8 @@ package annotations import ( fmt "fmt" - math "math" - proto "github.com/golang/protobuf/proto" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go index 499a7ef49..7cf960468 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go @@ -5,12 +5,11 @@ package iam import ( fmt "fmt" - math "math" - proto "github.com/golang/protobuf/proto" context "golang.org/x/net/context" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go index cc153e99a..5d966b067 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go @@ -5,10 +5,9 @@ package iam import ( fmt "fmt" - math "math" - proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go index 36e884591..40ea63ab1 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -5,9 +5,8 @@ package code import ( fmt "fmt" - math "math" - proto "github.com/golang/protobuf/proto" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index b4cd5aae9..29db5f7cd 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -5,10 +5,9 @@ package errdetails import ( fmt "fmt" - math "math" - proto "github.com/golang/protobuf/proto" duration "github.com/golang/protobuf/ptypes/duration" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 9c9d90833..d13bcbaf4 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -5,10 +5,9 @@ package status import ( fmt "fmt" - math "math" - proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/keys.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/keys.pb.go index b3afc7093..c61d87e80 100644 --- a/vendor/google.golang.org/genproto/googleapis/spanner/v1/keys.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/keys.pb.go @@ -5,11 +5,10 @@ package spanner import ( fmt "fmt" - math "math" - proto "github.com/golang/protobuf/proto" _struct "github.com/golang/protobuf/ptypes/struct" _ "google.golang.org/genproto/googleapis/api/annotations" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/mutation.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/mutation.pb.go index 4862acddd..0f83aea93 100644 --- a/vendor/google.golang.org/genproto/googleapis/spanner/v1/mutation.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/mutation.pb.go @@ -5,11 +5,10 @@ package spanner import ( fmt "fmt" - math "math" - proto "github.com/golang/protobuf/proto" _struct "github.com/golang/protobuf/ptypes/struct" _ "google.golang.org/genproto/googleapis/api/annotations" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/query_plan.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/query_plan.pb.go index 60fe4ce65..4f19cac96 100644 --- a/vendor/google.golang.org/genproto/googleapis/spanner/v1/query_plan.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/query_plan.pb.go @@ -5,11 +5,10 @@ package spanner import ( fmt "fmt" - math "math" - proto "github.com/golang/protobuf/proto" _struct "github.com/golang/protobuf/ptypes/struct" _ "google.golang.org/genproto/googleapis/api/annotations" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/result_set.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/result_set.pb.go index 71f5e764e..a3ac61d2e 100644 --- a/vendor/google.golang.org/genproto/googleapis/spanner/v1/result_set.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/result_set.pb.go @@ -5,11 +5,10 @@ package spanner import ( fmt "fmt" - math "math" - proto "github.com/golang/protobuf/proto" _struct "github.com/golang/protobuf/ptypes/struct" _ "google.golang.org/genproto/googleapis/api/annotations" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go index 4ae419210..79246d03c 100644 --- a/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go @@ -5,8 +5,6 @@ package spanner import ( fmt "fmt" - math "math" - proto "github.com/golang/protobuf/proto" empty "github.com/golang/protobuf/ptypes/empty" _struct "github.com/golang/protobuf/ptypes/struct" @@ -14,6 +12,7 @@ import ( context "golang.org/x/net/context" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/transaction.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/transaction.pb.go index 5ebfe357b..1bf354535 100644 --- a/vendor/google.golang.org/genproto/googleapis/spanner/v1/transaction.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/transaction.pb.go @@ -5,12 +5,11 @@ package spanner import ( fmt "fmt" - math "math" - proto "github.com/golang/protobuf/proto" duration "github.com/golang/protobuf/ptypes/duration" timestamp "github.com/golang/protobuf/ptypes/timestamp" _ "google.golang.org/genproto/googleapis/api/annotations" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/type.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/type.pb.go index d60737cce..0021b0ae7 100644 --- a/vendor/google.golang.org/genproto/googleapis/spanner/v1/type.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/type.pb.go @@ -5,10 +5,9 @@ package spanner import ( fmt "fmt" - math "math" - proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 789adfd65..29ffb00d3 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -43,3 +43,25 @@ Please update proto package, gRPC package and rebuild the proto files: - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` - `go get -u google.golang.org/grpc` - `protoc --go_out=plugins=grpc:. *.proto` + +#### How to turn on logging + +The default logger is controlled by the environment variables. Turn everything +on by setting: + +``` +GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info +``` + +#### The RPC failed with error `"code = Unavailable desc = transport is closing"` + +This error means the connection the RPC is using was closed, and there are many +possible reasons, including: + 1. mis-configured transport credentials, connection failed on handshaking + 1. bytes disrupted, possibly by a proxy in between + 1. server shutdown + +It can be tricky to debug this because the error happens on the client side but +the root cause of the connection being closed is on the server side. Turn on +logging on __both client and server__, and see if there are any transport +errors. diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index eb2231a4c..ee1703f03 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -28,6 +28,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -154,12 +155,17 @@ type PickOptions struct { // FullMethodName is the method name that NewClientStream() is called // with. The canonical format is /service/Method. FullMethodName string + // Header contains the metadata from the RPC's client header. The metadata + // should not be modified; make a copy first if needed. + Header metadata.MD } // DoneInfo contains additional information for done. type DoneInfo struct { // Err is the rpc error the RPC finished with. It could be nil. Err error + // Trailer contains the metadata from the RPC's trailer, if present. + Trailer metadata.MD // BytesSent indicates if any bytes have been sent to the server. BytesSent bool // BytesReceived indicates if any byte has been received from the server. diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 4f2c77ffc..a31703201 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -41,6 +41,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver. @@ -643,11 +644,9 @@ func (cc *ClientConn) incrCallsFailed() { atomic.AddInt64(&cc.czData.callsFailed, 1) } -// connect starts to creating transport and also starts the transport monitor -// goroutine for this ac. +// connect starts creating a transport. // It does nothing if the ac is not IDLE. // TODO(bar) Move this to the addrConn section. -// This was part of resetAddrConn, keep it here to make the diff look clean. func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { @@ -718,8 +717,10 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { + hdr, _ := metadata.FromOutgoingContext(ctx) t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{ FullMethodName: method, + Header: hdr, }) if err != nil { return nil, nil, toRPCErr(err) @@ -749,6 +750,13 @@ func (cc *ClientConn) handleServiceConfig(js string) error { return err } cc.mu.Lock() + // Check if the ClientConn is already closed. Some fields (e.g. + // balancerWrapper) are set to nil when closing the ClientConn, and could + // cause nil pointer panic if we don't have this check. + if cc.conns == nil { + cc.mu.Unlock() + return nil + } cc.scRaw = js cc.sc = sc @@ -880,9 +888,6 @@ type addrConn struct { // Use updateConnectivityState for updating addrConn's connectivity state. state connectivity.State - // ready is closed and becomes nil when a new transport is up or failed - // due to timeout. - ready chan struct{} tearDownErr error // The reason this addrConn is torn down. @@ -963,6 +968,23 @@ func (ac *addrConn) resetTransport(resolveNow bool) { ac.mu.Unlock() } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + + // If the connection is READY, a failure must have occurred. + // Otherwise, we'll consider this is a transient failure when: + // We've exhausted all addresses + // We're in CONNECTING + // And it's not the very first addr to try TODO(deklerk) find a better way to do this than checking ac.successfulHandshake + if ac.state == connectivity.Ready || (ac.addrIdx == len(ac.addrs)-1 && ac.state == connectivity.Connecting && !ac.successfulHandshake) { + ac.updateConnectivityState(connectivity.TransientFailure) + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + } + ac.mu.Unlock() + if err := ac.nextAddr(); err != nil { return } @@ -972,10 +994,6 @@ func (ac *addrConn) resetTransport(resolveNow bool) { ac.mu.Unlock() return } - if ac.ready != nil { - close(ac.ready) - ac.ready = nil - } ac.transport = nil backoffIdx := ac.backoffIdx @@ -1109,6 +1127,8 @@ func (ac *addrConn) createTransport(backoffNum int, addr resolver.Address, copts // We got the preface - huzzah! things are good. case <-onCloseCalled: // The transport has already closed - noop. + close(allowedToReset) + return nil } } else { go func() { @@ -1170,10 +1190,6 @@ func (ac *addrConn) createTransport(backoffNum int, addr resolver.Address, copts ac.cc.handleSubConnStateChange(ac.acbw, ac.state) ac.transport = newTr ac.curAddr = addr - if ac.ready != nil { - close(ac.ready) - ac.ready = nil - } ac.mu.Unlock() @@ -1213,13 +1229,7 @@ func (ac *addrConn) nextAddr() error { ac.mu.Unlock() return errConnClosing } - ac.updateConnectivityState(connectivity.TransientFailure) - ac.cc.handleSubConnStateChange(ac.acbw, ac.state) ac.cc.resolveNow(resolver.ResolveNowOption{}) - if ac.ready != nil { - close(ac.ready) - ac.ready = nil - } backoffDeadline := ac.backoffDeadline b := ac.resetBackoff ac.mu.Unlock() @@ -1297,10 +1307,6 @@ func (ac *addrConn) tearDown(err error) { ac.events.Finish() ac.events = nil } - if ac.ready != nil { - close(ac.ready) - ac.ready = nil - } if channelz.IsOn() { channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ Desc: "Subchannel Deleted", diff --git a/vendor/google.golang.org/grpc/health/health.go b/vendor/google.golang.org/grpc/health/health.go index 4c671f6a3..6a349b2f5 100644 --- a/vendor/google.golang.org/grpc/health/health.go +++ b/vendor/google.golang.org/grpc/health/health.go @@ -55,9 +55,9 @@ func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*h Status: healthpb.HealthCheckResponse_SERVING, }, nil } - if status, ok := s.statusMap[in.Service]; ok { + if servingStatus, ok := s.statusMap[in.Service]; ok { return &healthpb.HealthCheckResponse{ - Status: status, + Status: servingStatus, }, nil } return nil, status.Error(codes.NotFound, "unknown service") diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go index 884910c4e..b24600480 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -20,11 +20,13 @@ package channelz -import "google.golang.org/grpc/grpclog" +import ( + "sync" -func init() { - grpclog.Infof("Channelz: socket options are not supported on non-linux os and appengine.") -} + "google.golang.org/grpc/grpclog" +) + +var once sync.Once // SocketOptionData defines the struct to hold socket option data, and related // getter function to obtain info from fd. @@ -35,4 +37,8 @@ type SocketOptionData struct { // Getsockopt defines the function to get socket options requested by channelz. // It is to be passed to syscall.RawConn.Control(). // Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) {} +func (s *SocketOptionData) Getsockopt(fd uintptr) { + once.Do(func() { + grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.") + }) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 3299fe9cb..0c3c47e2a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -682,7 +682,9 @@ func (t *http2Client) CloseStream(s *Stream, err error) { func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { // Set stream status to done. if s.swapState(streamDone) == streamDone { - // If it was already done, return. + // If it was already done, return. If multiple closeStream calls + // happen simultaneously, wait for the first to finish. + <-s.done return } // status and trailers can be updated here without any synchronization because the stream goroutine will @@ -696,8 +698,6 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // This will unblock reads eventually. s.write(recvMsg{err: err}) } - // This will unblock write. - close(s.done) // If headerChan isn't closed, then close it. if atomic.SwapUint32(&s.headerDone, 1) == 0 { s.noHeaders = true @@ -733,6 +733,8 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. return true } t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) + // This will unblock write. + close(s.done) } // Close kicks off the shutdown process of the transport. This should be called @@ -1171,7 +1173,9 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if !endStream { return } - t.closeStream(s, io.EOF, false, http2.ErrCodeNo, state.status(), state.mdata, true) + // if client received END_STREAM from server while stream was still active, send RST_STREAM + rst := s.getState() == streamActive + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.mdata, true) } // reader runs as a separate goroutine in charge of reading data from network diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index d076af691..6849e37a5 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -598,20 +598,17 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool return nil } -// For the two compressor parameters, both should not be set, but if they are, -// dc takes precedence over compressor. -// TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error { +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) ([]byte, error) { pf, d, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return err + return nil, err } if inPayload != nil { inPayload.WireLength = len(d) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return st.Err() + return nil, st.Err() } if pf == compressionMade { @@ -620,23 +617,34 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf if dc != nil { d, err = dc.Do(bytes.NewReader(d)) if err != nil { - return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } } else { dcReader, err := compressor.Decompress(bytes.NewReader(d)) if err != nil { - return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } d, err = ioutil.ReadAll(dcReader) if err != nil { - return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } } } if len(d) > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) + } + return d, nil +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error { + d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, inPayload, compressor) + if err != nil { + return err } if err := c.Unmarshal(d, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 5c7d5b635..920da5e01 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -19,7 +19,6 @@ package grpc import ( - "bytes" "errors" "fmt" "io" @@ -33,8 +32,6 @@ import ( "sync/atomic" "time" - "io/ioutil" - "golang.org/x/net/context" "golang.org/x/net/trace" @@ -901,76 +898,32 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } } - p := &parser{r: stream} - pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize) - if err == io.EOF { - // The entire stream is done (for unary RPC only). - return err - } - if err == io.ErrUnexpectedEOF { - err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + var inPayload *stats.InPayload + if sh != nil { + inPayload = &stats.InPayload{ + RecvTime: time.Now(), + } } + d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, inPayload, decomp) if err != nil { if st, ok := status.FromError(err); ok { if e := t.WriteStatus(stream, st); e != nil { grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) } - } else { - switch st := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - default: - panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st)) - } } return err } if channelz.IsOn() { t.IncrMsgRecv() } - if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil { - if e := t.WriteStatus(stream, st); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - return st.Err() - } - var inPayload *stats.InPayload - if sh != nil { - inPayload = &stats.InPayload{ - RecvTime: time.Now(), - } - } df := func(v interface{}) error { - if inPayload != nil { - inPayload.WireLength = len(req) - } - if pf == compressionMade { - var err error - if dc != nil { - req, err = dc.Do(bytes.NewReader(req)) - if err != nil { - return status.Errorf(codes.Internal, err.Error()) - } - } else { - tmp, _ := decomp.Decompress(bytes.NewReader(req)) - req, err = ioutil.ReadAll(tmp) - if err != nil { - return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) - } - } - } - if len(req) > s.opts.maxReceiveMessageSize { - // TODO: Revisit the error code. Currently keep it consistent with - // java implementation. - return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize) - } - if err := s.getCodec(stream.ContentSubtype()).Unmarshal(req, v); err != nil { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } if inPayload != nil { inPayload.Payload = v - inPayload.Data = req - inPayload.Length = len(req) + inPayload.Data = d + inPayload.Length = len(d) sh.HandleRPC(stream.Context(), inPayload) } if trInfo != nil { @@ -1180,47 +1133,27 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } service := sm[:pos] method := sm[pos+1:] - srv, ok := s.m[service] - if !ok { - if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + + if srv, ok := s.m[service]; ok { + if md, ok := srv.md[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) return } - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) - trInfo.tr.SetError() + if sd, ok := srv.sd[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return } - errDesc := fmt.Sprintf("unknown service %v", service) - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() - } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) - } - if trInfo != nil { - trInfo.tr.Finish() - } - return - } - // Unary RPC or Streaming RPC? - if md, ok := srv.md[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) - return - } - if sd, ok := srv.sd[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) - return - } - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) - trInfo.tr.SetError() } + // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) return } - errDesc := fmt.Sprintf("unknown method %v", method) + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("unknown service %v", service) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index e0d735265..a305fe0a4 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -229,6 +229,9 @@ type jsonSC struct { } func parseServiceConfig(js string) (ServiceConfig, error) { + if len(js) == 0 { + return ServiceConfig{}, fmt.Errorf("no JSON service config provided") + } var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 492b650d5..b71eb3112 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -816,11 +816,14 @@ func (a *csAttempt) finish(err error) { if a.done != nil { br := false + var tr metadata.MD if a.s != nil { br = a.s.BytesReceived() + tr = a.s.Trailer() } a.done(balancer.DoneInfo{ Err: err, + Trailer: tr, BytesSent: a.s != nil, BytesReceived: br, }) diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go index 2ce2e2d78..dcea42283 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -469,24 +468,6 @@ func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1861,51 +1842,14 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1915,46 +1859,85 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue } + m.Extra[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 802f22a63..9d7835bc2 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto -// DO NOT EDIT! /* Package resource is a generated protocol buffer package. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index b7508f033..81320c9c8 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto -// DO NOT EDIT! /* Package v1 is a generated protocol buffer package. @@ -1768,24 +1767,6 @@ func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -5043,51 +5024,14 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.MatchLabels == nil { m.MatchLabels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5097,41 +5041,80 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.MatchLabels[mapkey] = mapvalue - } else { - var mapvalue string - m.MatchLabels[mapkey] = mapvalue } + m.MatchLabels[mapkey] = mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -6146,51 +6129,14 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6200,41 +6146,80 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 12: if wireType != 2 { @@ -6262,51 +6247,14 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6316,41 +6264,80 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 13: if wireType != 2 { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go index 967e0f530..9b15989c8 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto -// DO NOT EDIT! /* Package runtime is a generated protocol buffer package. @@ -158,24 +157,6 @@ func (m *Unknown) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go index 5c9934c73..28a61d5fb 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto -// DO NOT EDIT! /* Package schema is a generated protocol buffer package. diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index 5c2ac4f23..48dd7d9c5 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto -// DO NOT EDIT! /* Package intstr is a generated protocol buffer package. @@ -81,24 +80,6 @@ func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) diff --git a/vendor/vendor.json b/vendor/vendor.json index 70fa95b16..3458395e0 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -21,104 +21,104 @@ { "checksumSHA1": "NkpNQgmrdzGr9HUDL8Bp5YAxt0k=", "path": "cloud.google.com/go/civil", - "revision": "92de86defea02b7a63301634831563b0342ca7cb", - "revisionTime": "2018-10-03T12:25:48Z" + "revision": "5f0ffe7729373c98a5801de148565a893a0d2899", + "revisionTime": "2018-10-10T17:36:19Z" }, { "checksumSHA1": "j/0+O4eJ99Jrb9un/bwi5npLyOM=", "path": "cloud.google.com/go/compute/metadata", - "revision": "92de86defea02b7a63301634831563b0342ca7cb", - "revisionTime": "2018-10-03T12:25:48Z" + "revision": "5f0ffe7729373c98a5801de148565a893a0d2899", + "revisionTime": "2018-10-10T17:36:19Z" }, { "checksumSHA1": "8ngwydk354oUgPBkDhoQbXdOAb4=", "path": "cloud.google.com/go/iam", - "revision": "92de86defea02b7a63301634831563b0342ca7cb", - "revisionTime": "2018-10-03T12:25:48Z" + "revision": "5f0ffe7729373c98a5801de148565a893a0d2899", + "revisionTime": "2018-10-10T17:36:19Z" }, { "checksumSHA1": "vKDFB3PiL/TxkdcQFQeSaDuWx2k=", "path": "cloud.google.com/go/internal", - "revision": "92de86defea02b7a63301634831563b0342ca7cb", - "revisionTime": "2018-10-03T12:25:48Z" + "revision": "5f0ffe7729373c98a5801de148565a893a0d2899", + "revisionTime": "2018-10-10T17:36:19Z" }, { "checksumSHA1": "WoCKqzHYdJLh49JjXVz4GVGG8/w=", "path": "cloud.google.com/go/internal/atomiccache", - "revision": "92de86defea02b7a63301634831563b0342ca7cb", - "revisionTime": "2018-10-03T12:25:48Z" + "revision": "5f0ffe7729373c98a5801de148565a893a0d2899", + "revisionTime": "2018-10-10T17:36:19Z" }, { "checksumSHA1": "WjXSEFt9029Hy8oo9qSx619Vg2M=", "path": "cloud.google.com/go/internal/fields", - "revision": "92de86defea02b7a63301634831563b0342ca7cb", - "revisionTime": "2018-10-03T12:25:48Z" + "revision": "5f0ffe7729373c98a5801de148565a893a0d2899", + "revisionTime": "2018-10-10T17:36:19Z" }, { "checksumSHA1": "wQ4uGuRwMb24vG16pPQDOOCPkFo=", "path": "cloud.google.com/go/internal/optional", - "revision": "92de86defea02b7a63301634831563b0342ca7cb", - "revisionTime": "2018-10-03T12:25:48Z" + "revision": "5f0ffe7729373c98a5801de148565a893a0d2899", + "revisionTime": "2018-10-10T17:36:19Z" }, { "checksumSHA1": "lcbZjG55uLL4Syq+zH6S6CK0OsI=", "path": "cloud.google.com/go/internal/protostruct", - "revision": "92de86defea02b7a63301634831563b0342ca7cb", - "revisionTime": "2018-10-03T12:25:48Z" + "revision": "5f0ffe7729373c98a5801de148565a893a0d2899", + "revisionTime": "2018-10-10T17:36:19Z" }, { "checksumSHA1": "Ekiae3zc5Z77a75tHBxHq8n/AGU=", "path": "cloud.google.com/go/internal/trace", - "revision": "92de86defea02b7a63301634831563b0342ca7cb", - "revisionTime": "2018-10-03T12:25:48Z" + "revision": "5f0ffe7729373c98a5801de148565a893a0d2899", + "revisionTime": "2018-10-10T17:36:19Z" }, { "checksumSHA1": "oIsDazjda0HX8LUfgnW/USVYx/k=", "path": "cloud.google.com/go/internal/version", - "revision": "92de86defea02b7a63301634831563b0342ca7cb", - "revisionTime": "2018-10-03T12:25:48Z" + "revision": "5f0ffe7729373c98a5801de148565a893a0d2899", + "revisionTime": "2018-10-10T17:36:19Z" }, { - "checksumSHA1": "ftS+HNUgZV1Z478PwMeVZIjk+S8=", + "checksumSHA1": "PlVe7eNWApFNGnmLn2NsX9WV31c=", "path": "cloud.google.com/go/spanner", - "revision": "e19e9a0629de2f16b8e1db5dd44f2c2b53313099", - "revisionTime": "2018-10-02T19:39:52Z" + "revision": "5f0ffe7729373c98a5801de148565a893a0d2899", + "revisionTime": "2018-10-10T17:36:19Z" }, { "checksumSHA1": "LJ6MctI3jzGcYFNLKj2fFQkOScs=", "path": "cloud.google.com/go/storage", - "revision": "e19e9a0629de2f16b8e1db5dd44f2c2b53313099", - "revisionTime": "2018-10-02T19:39:52Z" + "revision": "5f0ffe7729373c98a5801de148565a893a0d2899", + "revisionTime": "2018-10-10T17:36:19Z" }, { "checksumSHA1": "giyBgcgqsXnR5yMNZ6msWxHonq0=", "path": "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute", - "revision": "6d20bdbae88c06c36d72eb512295417693bfdf4e", - "revisionTime": "2018-09-27T22:44:43Z" + "revision": "ef9744da754d0cf00d0cfeae7d1a83f2245a4b1c", + "revisionTime": "2018-10-15T17:46:20Z" }, { "checksumSHA1": "R/wi+9kJaTVXwJDbeqSVEmmdzbU=", "path": "github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac", - "revision": "6d20bdbae88c06c36d72eb512295417693bfdf4e", - "revisionTime": "2018-09-27T22:44:43Z" + "revision": "ef9744da754d0cf00d0cfeae7d1a83f2245a4b1c", + "revisionTime": "2018-10-15T17:46:20Z" }, { "checksumSHA1": "E7n1e1+L/fY7TVayjtwOXaMilD4=", "path": "github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization", - "revision": "6d20bdbae88c06c36d72eb512295417693bfdf4e", - "revisionTime": "2018-09-27T22:44:43Z" + "revision": "ef9744da754d0cf00d0cfeae7d1a83f2245a4b1c", + "revisionTime": "2018-10-15T17:46:20Z" }, { - "checksumSHA1": "pbHi9xc373inHdf5me73LGIFV1w=", + "checksumSHA1": "Fo0yAWjaIGDb4GNhqnLCgyaPzNI=", "path": "github.com/Azure/azure-sdk-for-go/storage", - "revision": "6d20bdbae88c06c36d72eb512295417693bfdf4e", - "revisionTime": "2018-09-27T22:44:43Z" + "revision": "ef9744da754d0cf00d0cfeae7d1a83f2245a4b1c", + "revisionTime": "2018-10-15T17:46:20Z" }, { - "checksumSHA1": "cAJGUx9EvLR1S1x0ohcW8MbVe1A=", + "checksumSHA1": "YkkwtYnJMafdyEkOUtE1YasMoqs=", "path": "github.com/Azure/azure-sdk-for-go/version", - "revision": "6d20bdbae88c06c36d72eb512295417693bfdf4e", - "revisionTime": "2018-09-27T22:44:43Z" + "revision": "ef9744da754d0cf00d0cfeae7d1a83f2245a4b1c", + "revisionTime": "2018-10-15T17:46:20Z" }, { "checksumSHA1": "9NFR6RG8H2fNyKHscGmuGLQhRm4=", @@ -135,56 +135,62 @@ { "checksumSHA1": "L4fDMZadX7zBb8jPrtZteOVT+SY=", "path": "github.com/Azure/go-autorest/autorest", - "revision": "87f168d7649781e529653290e223c9c84b789c00", - "revisionTime": "2018-09-29T02:23:26Z" + "revision": "4b7f49dc5db2e1e6d528524d269b4181981a7ebf", + "revisionTime": "2018-10-15T21:13:47Z" }, { - "checksumSHA1": "LM1DXwMxkHA5srchZO1MGkyqp1I=", + "checksumSHA1": "ua1k5OMVO+I13Pl0sp04vA9+440=", "path": "github.com/Azure/go-autorest/autorest/adal", - "revision": "87f168d7649781e529653290e223c9c84b789c00", - "revisionTime": "2018-09-29T02:23:26Z" + "revision": "4b7f49dc5db2e1e6d528524d269b4181981a7ebf", + "revisionTime": "2018-10-15T21:13:47Z" }, { - "checksumSHA1": "vetwXbWpF1wf61SjGa8UfM01kws=", + "checksumSHA1": "2MqWWawO0CazxJJRmkD4KaFwBTc=", "path": "github.com/Azure/go-autorest/autorest/azure", - "revision": "87f168d7649781e529653290e223c9c84b789c00", - "revisionTime": "2018-09-29T02:23:26Z" + "revision": "4b7f49dc5db2e1e6d528524d269b4181981a7ebf", + "revisionTime": "2018-10-15T21:13:47Z" }, { - "checksumSHA1": "k2IyOrvev4k4QyZzvYuRPy5wDHg=", + "checksumSHA1": "j5aBk8CqJ6uo1Uvm0gUp+vH6QDA=", "path": "github.com/Azure/go-autorest/autorest/azure/auth", - "revision": "87f168d7649781e529653290e223c9c84b789c00", - "revisionTime": "2018-09-29T02:23:26Z" + "revision": "4b7f49dc5db2e1e6d528524d269b4181981a7ebf", + "revisionTime": "2018-10-15T21:13:47Z" + }, + { + "checksumSHA1": "qvPx1wQDl9JT9vm45+bHHU92luw=", + "path": "github.com/Azure/go-autorest/autorest/azure/cli", + "revision": "4b7f49dc5db2e1e6d528524d269b4181981a7ebf", + "revisionTime": "2018-10-15T21:13:47Z" }, { "checksumSHA1": "9nXCi9qQsYjxCeajJKWttxgEt0I=", "path": "github.com/Azure/go-autorest/autorest/date", - "revision": "87f168d7649781e529653290e223c9c84b789c00", - "revisionTime": "2018-09-29T02:23:26Z" + "revision": "4b7f49dc5db2e1e6d528524d269b4181981a7ebf", + "revisionTime": "2018-10-15T21:13:47Z" }, { "checksumSHA1": "SbBb2GcJNm5GjuPKGL2777QywR4=", "path": "github.com/Azure/go-autorest/autorest/to", - "revision": "87f168d7649781e529653290e223c9c84b789c00", - "revisionTime": "2018-09-29T02:23:26Z" + "revision": "4b7f49dc5db2e1e6d528524d269b4181981a7ebf", + "revisionTime": "2018-10-15T21:13:47Z" }, { "checksumSHA1": "HjdLfAF3oA2In8F3FKh/Y+BPyXk=", "path": "github.com/Azure/go-autorest/autorest/validation", - "revision": "87f168d7649781e529653290e223c9c84b789c00", - "revisionTime": "2018-09-29T02:23:26Z" + "revision": "4b7f49dc5db2e1e6d528524d269b4181981a7ebf", + "revisionTime": "2018-10-15T21:13:47Z" }, { "checksumSHA1": "b2lrPJRxf+MEfmMafN40wepi5WM=", "path": "github.com/Azure/go-autorest/logger", - "revision": "87f168d7649781e529653290e223c9c84b789c00", - "revisionTime": "2018-09-29T02:23:26Z" + "revision": "4b7f49dc5db2e1e6d528524d269b4181981a7ebf", + "revisionTime": "2018-10-15T21:13:47Z" }, { - "checksumSHA1": "Ll0oJW9wVJYhsicZFGfWMepKtjA=", + "checksumSHA1": "xlHzrApXTEbHiMZ1vx7I2hg0rmI=", "path": "github.com/Azure/go-autorest/version", - "revision": "87f168d7649781e529653290e223c9c84b789c00", - "revisionTime": "2018-09-29T02:23:26Z" + "revision": "4b7f49dc5db2e1e6d528524d269b4181981a7ebf", + "revisionTime": "2018-10-15T21:13:47Z" }, { "checksumSHA1": "fPjkJSKxgZYvGX9g50J6frwwcyo=", @@ -279,98 +285,98 @@ { "checksumSHA1": "YCtIqzuVlzVA7jh1GQF6BWaom6E=", "path": "github.com/aliyun/alibaba-cloud-sdk-go/sdk", - "revision": "9669db6328e053fefc47bfe8ddf2e82625444fab", - "revisionTime": "2018-09-30T08:30:48Z" + "revision": "c5825d35a94a5a25d936ebe6bed5779255d02cf4", + "revisionTime": "2018-10-12T12:19:43Z" }, { "checksumSHA1": "LsInbLqP985chlySVEfZG72nsqU=", "path": "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth", - "revision": "9669db6328e053fefc47bfe8ddf2e82625444fab", - "revisionTime": "2018-09-30T08:30:48Z" + "revision": "c5825d35a94a5a25d936ebe6bed5779255d02cf4", + "revisionTime": "2018-10-12T12:19:43Z" }, { "checksumSHA1": "mir9zgkLR7muF4aPQtTV9T91B7E=", "path": "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials", - "revision": "9669db6328e053fefc47bfe8ddf2e82625444fab", - "revisionTime": "2018-09-30T08:30:48Z" + "revision": "c5825d35a94a5a25d936ebe6bed5779255d02cf4", + "revisionTime": "2018-10-12T12:19:43Z" }, { "checksumSHA1": "Lbc1eCpbtMykOp4hEFoER5XU8Ds=", "path": "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers", - "revision": "9669db6328e053fefc47bfe8ddf2e82625444fab", - "revisionTime": "2018-09-30T08:30:48Z" + "revision": "c5825d35a94a5a25d936ebe6bed5779255d02cf4", + "revisionTime": "2018-10-12T12:19:43Z" }, { "checksumSHA1": "YqjOfGGBOy3whJudMWLpUaHRT1I=", "path": "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers", - "revision": "9669db6328e053fefc47bfe8ddf2e82625444fab", - "revisionTime": "2018-09-30T08:30:48Z" + "revision": "c5825d35a94a5a25d936ebe6bed5779255d02cf4", + "revisionTime": "2018-10-12T12:19:43Z" }, { "checksumSHA1": "EFYeU1Eewrnk2WWa/Qyih9ShLUI=", "path": "github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints", - "revision": "9669db6328e053fefc47bfe8ddf2e82625444fab", - "revisionTime": "2018-09-30T08:30:48Z" + "revision": "c5825d35a94a5a25d936ebe6bed5779255d02cf4", + "revisionTime": "2018-10-12T12:19:43Z" }, { "checksumSHA1": "skwRtjBE4ucpfqVPwnwtDMlBu2A=", "path": "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors", - "revision": "9669db6328e053fefc47bfe8ddf2e82625444fab", - "revisionTime": "2018-09-30T08:30:48Z" + "revision": "c5825d35a94a5a25d936ebe6bed5779255d02cf4", + "revisionTime": "2018-10-12T12:19:43Z" }, { "checksumSHA1": "BmJNYsPlyEd7IJT7fSz3ZOF3lfA=", "path": "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests", - "revision": "9669db6328e053fefc47bfe8ddf2e82625444fab", - "revisionTime": "2018-09-30T08:30:48Z" + "revision": "c5825d35a94a5a25d936ebe6bed5779255d02cf4", + "revisionTime": "2018-10-12T12:19:43Z" }, { "checksumSHA1": "28lS3r5yG+7xIXBXHH0B513k/D0=", "path": "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses", - "revision": "9669db6328e053fefc47bfe8ddf2e82625444fab", - "revisionTime": "2018-09-30T08:30:48Z" + "revision": "c5825d35a94a5a25d936ebe6bed5779255d02cf4", + "revisionTime": "2018-10-12T12:19:43Z" }, { "checksumSHA1": "vY4yDjkvAE26q2Xn3tCPcfZhnoU=", "path": "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils", - "revision": "9669db6328e053fefc47bfe8ddf2e82625444fab", - "revisionTime": "2018-09-30T08:30:48Z" + "revision": "c5825d35a94a5a25d936ebe6bed5779255d02cf4", + "revisionTime": "2018-10-12T12:19:43Z" }, { "checksumSHA1": "vT5hncrHvGakCmawRJI3JgtTf4s=", "path": "github.com/aliyun/alibaba-cloud-sdk-go/services/ram", - "revision": "9669db6328e053fefc47bfe8ddf2e82625444fab", - "revisionTime": "2018-09-30T08:30:48Z" + "revision": "c5825d35a94a5a25d936ebe6bed5779255d02cf4", + "revisionTime": "2018-10-12T12:19:43Z" }, { "checksumSHA1": "N9A+9VmoMs38kijGWNVv0vsgi54=", "path": "github.com/aliyun/alibaba-cloud-sdk-go/services/sts", - "revision": "9669db6328e053fefc47bfe8ddf2e82625444fab", - "revisionTime": "2018-09-30T08:30:48Z" + "revision": "c5825d35a94a5a25d936ebe6bed5779255d02cf4", + "revisionTime": "2018-10-12T12:19:43Z" }, { - "checksumSHA1": "dOdNzoew2Ktp9THKJSKRFxAE2Yw=", + "checksumSHA1": "x5pRbco8m4cXGS3u/NnbzIZJ1Hg=", "path": "github.com/apple/foundationdb/bindings/go/src/fdb", - "revision": "02f409b235f737a8980aa7ddee86d8c6ebe602a3", - "revisionTime": "2018-10-03T15:34:13Z" + "revision": "c255feb76dedc9ba15e2064bd78465166822c2b5", + "revisionTime": "2018-10-15T20:09:47Z" }, { "checksumSHA1": "VxeCD1wWXix+y3N83t8PHmXDTIo=", "path": "github.com/apple/foundationdb/bindings/go/src/fdb/directory", - "revision": "02f409b235f737a8980aa7ddee86d8c6ebe602a3", - "revisionTime": "2018-10-03T15:34:13Z" + "revision": "c255feb76dedc9ba15e2064bd78465166822c2b5", + "revisionTime": "2018-10-15T20:09:47Z" }, { "checksumSHA1": "im/gTxCCpiNchCB6gNYZez+4pYI=", "path": "github.com/apple/foundationdb/bindings/go/src/fdb/subspace", - "revision": "02f409b235f737a8980aa7ddee86d8c6ebe602a3", - "revisionTime": "2018-10-03T15:34:13Z" + "revision": "c255feb76dedc9ba15e2064bd78465166822c2b5", + "revisionTime": "2018-10-15T20:09:47Z" }, { "checksumSHA1": "EkxFtJERI529/WjdfgoLGzQ/yJM=", "path": "github.com/apple/foundationdb/bindings/go/src/fdb/tuple", - "revision": "02f409b235f737a8980aa7ddee86d8c6ebe602a3", - "revisionTime": "2018-10-03T15:34:13Z" + "revision": "c255feb76dedc9ba15e2064bd78465166822c2b5", + "revisionTime": "2018-10-15T20:09:47Z" }, { "checksumSHA1": "DUX4pOK9NKSAzC6RRXniLviyByA=", @@ -415,250 +421,256 @@ "revisionTime": "2018-07-20T11:50:03Z" }, { - "checksumSHA1": "Kgu6SJIAMKg62XYHNrzWZRzVUpw=", + "checksumSHA1": "NTMpg2d3Yl053jP8gt+Qk7qq9UU=", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "DtuTqKH29YnLjrIJkRYX0HQtXY0=", "path": "github.com/aws/aws-sdk-go/aws/arn", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "EwL79Cq6euk+EV/t/n2E+jzPNmU=", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "uEJU4I6dTKaraQKvrljlYKUZwoc=", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "vVSUnICaD9IaBQisCfw0n8zLwig=", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "I87y3G8r14yKZQ5NlkupFUJ5jW0=", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "JTilCBYWVAfhbKSnrxCNhE8IFns=", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "1pENtl2K9hG7qoB7R6J7dAHa82g=", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "JEYqmF83O5n5bHkupAzA6STm0no=", "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { - "checksumSHA1": "BJte7rLt3vZUYkd455WNUUyCdxY=", + "checksumSHA1": "KZylhHa5CQP8deDHphHMU2tUr3o=", "path": "github.com/aws/aws-sdk-go/aws/csm", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { - "checksumSHA1": "ga8NLl3uroITvUgioRFFgSnSBt0=", + "checksumSHA1": "7AmyyJXVkMdmy8dphC3Nalx5XkI=", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "mYqgKOMSGvLmrt0CoBNbqdcTM3c=", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "WGg5z1n2JGCf7YIwtwlhn30svbo=", "path": "github.com/aws/aws-sdk-go/aws/endpoints", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { - "checksumSHA1": "Ia/AZ2fZp7J4lMO6fpkvfLU/HGY=", + "checksumSHA1": "+pDu3wk2/RDNjelVs3KE+03GqII=", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { - "checksumSHA1": "zx1mZCdOwgbjBV3jMfb0kyDd//Q=", + "checksumSHA1": "Y7aklChDDsoRKhEIX/ACPnarw28=", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "gQ1sGIVnPqvvxa9Ww2g/PGkk16M=", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" + }, + { + "checksumSHA1": "QvKGojx+wCHTDfXQ1aoOYzH3Y88=", + "path": "github.com/aws/aws-sdk-go/internal/s3err", + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "wjxQlU1PYxrDRFoL1Vek8Wch7jk=", "path": "github.com/aws/aws-sdk-go/internal/sdkio", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "MYLldFRnsZh21TfCkgkXCT3maPU=", "path": "github.com/aws/aws-sdk-go/internal/sdkrand", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "tQVg7Sz2zv+KkhbiXxPH0mh9spg=", "path": "github.com/aws/aws-sdk-go/internal/sdkuri", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { - "checksumSHA1": "04ypv4x12l4q0TksA1zEVsmgpvw=", + "checksumSHA1": "LjfJ5ydXdiSuQixC+HrmSZjW3NU=", "path": "github.com/aws/aws-sdk-go/internal/shareddefaults", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { - "checksumSHA1": "ZX5QHZb0PrK4UF45um2kaAEiX+8=", + "checksumSHA1": "NHfa9brYkChSmKiBcKe+xMaJzlc=", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "0cZnOaE1EcFUuiu4bdHV2k7slQg=", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "stsUCJVnZ5yMrmzSExbjbYp5tZ8=", "path": "github.com/aws/aws-sdk-go/private/protocol/eventstream", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "bOQjEfKXaTqe7dZhDDER/wZUzQc=", "path": "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "tXRIRarT7qepHconxydtO7mXod4=", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "v2c4B7IgTyjl7ShytqbTOqhCIoM=", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "lj56XJFI2OSp+hEOrFZ+eiEi/yM=", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "+O6A945eTP9plLpkEMZB0lwBAcg=", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "uRvmEPKcEdv7qc0Ep2zn0E3Xumc=", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "ZZgzuZoMphxAf8wwz9QqpSQdBGc=", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "soXVJWQ/xvEB72Mo6FresaQIxLg=", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { - "checksumSHA1": "42xYT0wxX/R9vu4C9+cHtBZkvxY=", + "checksumSHA1": "Ca+Lj+lYT1bFPmFqRFse3jtH1QA=", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "0pEvMEbeoSBvYbVR5HwTpDcbIEo=", "path": "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { - "checksumSHA1": "TWuInGtqa8MFpVL63wj3sV3qWBo=", + "checksumSHA1": "Cr+HUNBOotDvCYRZOjQJl/vg0yg=", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { - "checksumSHA1": "ae+jhUirSvN0IXPVU7X7xc+EbFE=", + "checksumSHA1": "xke6oymAAPvAuHxEm2eWp+mdaaw=", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "+vRlKT3gC9YfgJllh87JaYa3V9c=", "path": "github.com/aws/aws-sdk-go/service/iam/iamiface", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { - "checksumSHA1": "JgKzClDzo4V5Wh9cEk7h2dHtAZE=", + "checksumSHA1": "yDbIw+lVcsmjyom0xI+8khZNy6o=", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { - "checksumSHA1": "UhIVLDgQc19wjrPj8pP7Fu2UwWc=", + "checksumSHA1": "35a/vm5R/P68l/hQD55GqviO6bg=", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "O1161KyKFmri353MlqGTxZv36fU=", "path": "github.com/aws/aws-sdk-go/service/sts/stsiface", - "revision": "7e0b4d7a9b07c2955d09e92c48e11ac30cc51402", - "revisionTime": "2018-10-02T21:15:42Z" + "revision": "6e42625fcba9345ab67b42b6744284e8f7e3e79d", + "revisionTime": "2018-10-15T21:27:28Z" }, { "checksumSHA1": "0rido7hYHQtfq3UJzVT5LClLAWc=", @@ -783,26 +795,14 @@ { "checksumSHA1": "q1SfUjeNZ2lMjAlzyJPlNOl9OQ0=", "path": "github.com/coreos/etcd/client", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" - }, - { - "checksumSHA1": "p44sdAgW/tNsJgF0x0ZUfH4ltJ8=", - "path": "github.com/coreos/etcd/clientv3", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" - }, - { - "checksumSHA1": "+jrv57/cB14bv2ZGVolu0tzyY9M=", - "path": "github.com/coreos/etcd/clientv3/concurrency", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { "checksumSHA1": "pop9HdnuvjAqCLhFZVmkrffixg4=", "path": "github.com/coreos/etcd/pkg/transport", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { "checksumSHA1": "3fbao10aFaGLkTcNIYanekXqO+g=", @@ -821,8 +821,8 @@ { "checksumSHA1": "3Yy0xwh3nuUl9+hzOWG+k33jbik=", "path": "github.com/coreos/go-systemd/journal", - "revision": "eee3db372b31153ca0b90702e165948699803fd0", - "revisionTime": "2018-08-28T14:03:53Z" + "revision": "c6f51f82210d9608a835763225a5e2a3c87583c6", + "revisionTime": "2018-10-12T12:30:02Z" }, { "checksumSHA1": "2nbRomDbuewPx0gvYTHCIwgtEbM=", @@ -831,16 +831,16 @@ "revisionTime": "2018-09-28T19:01:04Z" }, { - "checksumSHA1": "OcE/jbvEzkLl88hogGLj/RU68dc=", + "checksumSHA1": "0rXUgJC/GgF8XbLxmJiMgC4peQQ=", "path": "github.com/denisenkom/go-mssqldb", - "revision": "1eb28afdf9b6e56cf673badd47545f844fe81103", - "revisionTime": "2018-09-01T17:21:38Z" + "revision": "4e0d7dc8888fbb59764060e99b7b68e77a6f9698", + "revisionTime": "2018-10-14T14:49:52Z" }, { "checksumSHA1": "wu8t19t2rmyrrfDfdu9v7f/+iag=", "path": "github.com/denisenkom/go-mssqldb/internal/cp", - "revision": "1eb28afdf9b6e56cf673badd47545f844fe81103", - "revisionTime": "2018-09-01T17:21:38Z" + "revision": "4e0d7dc8888fbb59764060e99b7b68e77a6f9698", + "revisionTime": "2018-10-14T14:49:52Z" }, { "checksumSHA1": "blOHP3HPYU+IeV6yWCalQuuM+zE=", @@ -885,16 +885,16 @@ "revisionTime": "2018-02-23T16:03:09Z" }, { - "checksumSHA1": "HbOJxa+FsQ1TTsF+BkHAvzqqZv4=", + "checksumSHA1": "BxH9xJUqczhpL57gfKZe2/VlBHY=", "path": "github.com/fatih/color", - "revision": "2d684516a8861da43017284349b7e303e809ac21", - "revisionTime": "2018-05-16T10:03:07Z" + "revision": "3f9d52f7176a6927daacff70a3e8d1dc2025c53e", + "revisionTime": "2018-10-10T23:13:11Z" }, { - "checksumSHA1": "nwSshN6QJr3vSBdz+uPH4mJtK1g=", + "checksumSHA1": "Ps2Bsd1OjhSmAv4uVA2ZIZx7w7k=", "path": "github.com/fatih/structs", - "revision": "4966fc68f5b7593aafa6cbbba2d65ec6e1416047", - "revisionTime": "2018-09-14T09:55:22Z" + "revision": "878a968ab22548362a09bdb3322f98b00f470d46", + "revisionTime": "2018-10-10T23:17:57Z" }, { "checksumSHA1": "gltDeXUix5pTk06uFY9ed/ft3hE=", @@ -915,16 +915,16 @@ "revisionTime": "2018-08-13T16:29:53Z" }, { - "checksumSHA1": "1DHC++cTXWeJnn+ZFLMYcJ2g6/8=", + "checksumSHA1": "1MD5Dz5eVB6XZ24C3P8nlAD6ui0=", "path": "github.com/go-ini/ini", - "revision": "7b294651033cd7d9e7f0d9ffa1b75ed1e198e737", - "revisionTime": "2018-09-30T17:34:52Z" + "revision": "9c8236e659b76e87bf02044d06fde8683008ff3e", + "revisionTime": "2018-10-14T23:42:04Z" }, { - "checksumSHA1": "L8Mp7efUUkIGjlhp5iK34H06rl4=", + "checksumSHA1": "H5pC76pCW3fkir+2JFISc7Nep5s=", "path": "github.com/go-ldap/ldap", - "revision": "dc997b2033d4b4ab6de47b555eaf76888a81d33d", - "revisionTime": "2018-08-10T05:44:51Z" + "revision": "cf78c58932fcb3e1cf19a5f0d03bb77a5f4d2e88", + "revisionTime": "2018-10-15T21:54:33Z" }, { "checksumSHA1": "r89NVOyv2X4KagJNkB7a4rZvMg4=", @@ -939,52 +939,52 @@ "revisionTime": "2018-05-09T20:02:13Z" }, { - "checksumSHA1": "N3dRwyaLopbD/gxftYTHAqW4rbg=", + "checksumSHA1": "R6itMAIo8MvEkf0FYAeXtaBhcfc=", "path": "github.com/gocql/gocql", - "revision": "7ce14ecfedc6040abe54bd0a1dfaed2cf283e0a0", - "revisionTime": "2018-09-29T15:07:53Z" + "revision": "44e29ed5b8a4b4fff39d7ebaa19976c6f852075b", + "revisionTime": "2018-10-12T10:03:15Z" }, { "checksumSHA1": "7RlYIbPYgPkxDDCSEuE6bvYEEeU=", "path": "github.com/gocql/gocql/internal/lru", - "revision": "7ce14ecfedc6040abe54bd0a1dfaed2cf283e0a0", - "revisionTime": "2018-09-29T15:07:53Z" + "revision": "44e29ed5b8a4b4fff39d7ebaa19976c6f852075b", + "revisionTime": "2018-10-12T10:03:15Z" }, { "checksumSHA1": "puCAbQOdeajpTEFssPsgeskXB+8=", "path": "github.com/gocql/gocql/internal/murmur", - "revision": "7ce14ecfedc6040abe54bd0a1dfaed2cf283e0a0", - "revisionTime": "2018-09-29T15:07:53Z" + "revision": "44e29ed5b8a4b4fff39d7ebaa19976c6f852075b", + "revisionTime": "2018-10-12T10:03:15Z" }, { "checksumSHA1": "5GTGKm0C5aLdu9tynZQWQEQqRes=", "path": "github.com/gocql/gocql/internal/streams", - "revision": "7ce14ecfedc6040abe54bd0a1dfaed2cf283e0a0", - "revisionTime": "2018-09-29T15:07:53Z" + "revision": "44e29ed5b8a4b4fff39d7ebaa19976c6f852075b", + "revisionTime": "2018-10-12T10:03:15Z" }, { "checksumSHA1": "FhEC1RwKSm/8jvTI2MKlzDVz+Vs=", "path": "github.com/gogo/protobuf/gogoproto", - "revision": "61dbc136cf5d2f08d68a011382652244990a53a9", - "revisionTime": "2018-09-25T08:36:12Z" + "revision": "fd322a3c49630fe6d05737e2b7d9426e6680e28d", + "revisionTime": "2018-10-10T09:29:45Z" }, { "checksumSHA1": "47nJ3iu1bVvK9jPXwAinFYr4mBU=", "path": "github.com/gogo/protobuf/proto", - "revision": "61dbc136cf5d2f08d68a011382652244990a53a9", - "revisionTime": "2018-09-25T08:36:12Z" + "revision": "fd322a3c49630fe6d05737e2b7d9426e6680e28d", + "revisionTime": "2018-10-10T09:29:45Z" }, { "checksumSHA1": "4R7X2wRYYOxdXsoXOEcyBoCakb8=", "path": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor", - "revision": "61dbc136cf5d2f08d68a011382652244990a53a9", - "revisionTime": "2018-09-25T08:36:12Z" + "revision": "fd322a3c49630fe6d05737e2b7d9426e6680e28d", + "revisionTime": "2018-10-10T09:29:45Z" }, { "checksumSHA1": "HPVQZu059/Rfw2bAWM538bVTcUc=", "path": "github.com/gogo/protobuf/sortkeys", - "revision": "61dbc136cf5d2f08d68a011382652244990a53a9", - "revisionTime": "2018-09-25T08:36:12Z" + "revision": "fd322a3c49630fe6d05737e2b7d9426e6680e28d", + "revisionTime": "2018-10-10T09:29:45Z" }, { "checksumSHA1": "HmbftipkadrLlCfzzVQ+iFHbl6g=", @@ -995,50 +995,50 @@ { "checksumSHA1": "GaJLoEuMGnP5ofXvuweAI4wx06U=", "path": "github.com/golang/protobuf/proto", - "revision": "7011d38ac0d201eeddff4a4085a657c3da322d75", - "revisionTime": "2018-09-28T22:12:48Z" + "revision": "ddf22928ea3c56eb4292a0adbbf5001b1e8e7d0d", + "revisionTime": "2018-10-05T18:17:28Z" }, { "checksumSHA1": "fXNL9CT3MIlkdLUCn83Aon7QVAU=", "path": "github.com/golang/protobuf/protoc-gen-go/descriptor", - "revision": "7011d38ac0d201eeddff4a4085a657c3da322d75", - "revisionTime": "2018-09-28T22:12:48Z" + "revision": "ddf22928ea3c56eb4292a0adbbf5001b1e8e7d0d", + "revisionTime": "2018-10-05T18:17:28Z" }, { "checksumSHA1": "tkJPssYejSjuAwE2tdEnoEIj93Q=", "path": "github.com/golang/protobuf/ptypes", - "revision": "7011d38ac0d201eeddff4a4085a657c3da322d75", - "revisionTime": "2018-09-28T22:12:48Z" + "revision": "ddf22928ea3c56eb4292a0adbbf5001b1e8e7d0d", + "revisionTime": "2018-10-05T18:17:28Z" }, { "checksumSHA1": "UvYEjI10BRTlBOd8fZvQrJbLpC4=", "path": "github.com/golang/protobuf/ptypes/any", - "revision": "7011d38ac0d201eeddff4a4085a657c3da322d75", - "revisionTime": "2018-09-28T22:12:48Z" + "revision": "ddf22928ea3c56eb4292a0adbbf5001b1e8e7d0d", + "revisionTime": "2018-10-05T18:17:28Z" }, { "checksumSHA1": "GKo6mbuEFhhg/SzB4UpvEB64rDA=", "path": "github.com/golang/protobuf/ptypes/duration", - "revision": "7011d38ac0d201eeddff4a4085a657c3da322d75", - "revisionTime": "2018-09-28T22:12:48Z" + "revision": "ddf22928ea3c56eb4292a0adbbf5001b1e8e7d0d", + "revisionTime": "2018-10-05T18:17:28Z" }, { "checksumSHA1": "wApifY9WlmhQrnTQOi6JOiLchts=", "path": "github.com/golang/protobuf/ptypes/empty", - "revision": "7011d38ac0d201eeddff4a4085a657c3da322d75", - "revisionTime": "2018-09-28T22:12:48Z" + "revision": "ddf22928ea3c56eb4292a0adbbf5001b1e8e7d0d", + "revisionTime": "2018-10-05T18:17:28Z" }, { "checksumSHA1": "FVux1MnlZpNEY4Ot/Vw5nX+6N88=", "path": "github.com/golang/protobuf/ptypes/struct", - "revision": "7011d38ac0d201eeddff4a4085a657c3da322d75", - "revisionTime": "2018-09-28T22:12:48Z" + "revision": "ddf22928ea3c56eb4292a0adbbf5001b1e8e7d0d", + "revisionTime": "2018-10-05T18:17:28Z" }, { "checksumSHA1": "T42CEYmGiqIXTe36UJ5jLPO33lY=", "path": "github.com/golang/protobuf/ptypes/timestamp", - "revision": "7011d38ac0d201eeddff4a4085a657c3da322d75", - "revisionTime": "2018-09-28T22:12:48Z" + "revision": "ddf22928ea3c56eb4292a0adbbf5001b1e8e7d0d", + "revisionTime": "2018-10-05T18:17:28Z" }, { "checksumSHA1": "h1d2lPZf6j2dW/mIqVnd1RdykDo=", @@ -1047,10 +1047,10 @@ "revisionTime": "2018-05-18T05:39:59Z" }, { - "checksumSHA1": "EEiZDKkF989T9AtcT8USV7XYpk0=", + "checksumSHA1": "lorBQlh3HnsHpNN91mPGXgje1Ow=", "path": "github.com/google/go-github/github", - "revision": "d9d09bed114a1683fcf81386d2e11f517e7f8fe3", - "revisionTime": "2018-10-03T00:43:15Z" + "revision": "aa7423eb79707e88352b2af2ff5fa5a4493f88c6", + "revisionTime": "2018-10-14T18:33:19Z" }, { "checksumSHA1": "p3IB18uJRs4dL2K5yx24MrLYE9A=", @@ -1095,22 +1095,22 @@ "revisionTime": "2016-01-25T11:53:50Z" }, { - "checksumSHA1": "SB7wK1wwrVUhTK4Ozu1oXpYJkTI=", + "checksumSHA1": "PJDKLws64a9Lb00kp9qgyhAD81s=", "path": "github.com/hashicorp/consul/api", - "revision": "9da8ad8f3d0e64606f4d2e123f6b4cdaaa332a06", - "revisionTime": "2018-10-03T08:54:07Z" + "revision": "34e95314827d336e4893aa23ee651b67d320c3ee", + "revisionTime": "2018-10-11T20:31:27Z" }, { "checksumSHA1": "L/RlKwIgOVgm516yOHXfT4HcGAk=", "path": "github.com/hashicorp/consul/lib", - "revision": "9da8ad8f3d0e64606f4d2e123f6b4cdaaa332a06", - "revisionTime": "2018-10-03T08:54:07Z" + "revision": "34e95314827d336e4893aa23ee651b67d320c3ee", + "revisionTime": "2018-10-11T20:31:27Z" }, { - "checksumSHA1": "xLs0iTDsNeqBV01gN9Kx7Hvxod4=", + "checksumSHA1": "xx1WR46goRPHHhFk75gzyEA/Opw=", "path": "github.com/hashicorp/consul/version", - "revision": "9da8ad8f3d0e64606f4d2e123f6b4cdaaa332a06", - "revisionTime": "2018-10-03T08:54:07Z" + "revision": "34e95314827d336e4893aa23ee651b67d320c3ee", + "revisionTime": "2018-10-11T20:31:27Z" }, { "checksumSHA1": "ByRdQMv2yl16W6Tp9gUW1nNmpuI=", @@ -1161,10 +1161,10 @@ "revisionTime": "2018-08-24T00:40:42Z" }, { - "checksumSHA1": "TM75ZrTZtNHIDvAMf18mItUc87U=", + "checksumSHA1": "xKTQS1nUz2xCWwMI8FTLB6mcjGs=", "path": "github.com/hashicorp/go-plugin", - "revision": "1faddcf740b61468a23dacc67369c28ec96d7fc7", - "revisionTime": "2018-10-02T19:58:11Z" + "revision": "314501b665e0b2cc71bbd829783179fc38840a85", + "revisionTime": "2018-10-04T02:44:35Z" }, { "checksumSHA1": "/yKfFSspjuDzyOe/bBslrPzyfYM=", @@ -1283,56 +1283,56 @@ { "checksumSHA1": "JfDgoMtev66pAMpiMEV3H4fNsnU=", "path": "github.com/hashicorp/nomad/acl", - "revision": "95d9286ad17f745bf861aa9634cbd2a0a7f40cc1", - "revisionTime": "2018-09-26T21:53:15Z" + "revision": "7b1753bd9a48b824927d129e88e6cf07a188800d", + "revisionTime": "2018-10-15T20:37:49Z" }, { - "checksumSHA1": "2xVllybkthBRP5oaU5LB/X8CkCE=", + "checksumSHA1": "FNl0nzjl20FfYSU3Bc+14ghXDLU=", "path": "github.com/hashicorp/nomad/api", - "revision": "95d9286ad17f745bf861aa9634cbd2a0a7f40cc1", - "revisionTime": "2018-09-26T21:53:15Z" + "revision": "7b1753bd9a48b824927d129e88e6cf07a188800d", + "revisionTime": "2018-10-15T20:37:49Z" }, { "checksumSHA1": "fqswK1Rf5F7cRNG+UHgY/gQFC78=", "path": "github.com/hashicorp/nomad/api/contexts", - "revision": "95d9286ad17f745bf861aa9634cbd2a0a7f40cc1", - "revisionTime": "2018-09-26T21:53:15Z" + "revision": "7b1753bd9a48b824927d129e88e6cf07a188800d", + "revisionTime": "2018-10-15T20:37:49Z" }, { - "checksumSHA1": "kc17FtLJc0ZNuYc1bdAoiVSkChc=", + "checksumSHA1": "MTJpXZeMoJPxJDs5GoIbmnq3uXk=", "path": "github.com/hashicorp/nomad/helper", - "revision": "95d9286ad17f745bf861aa9634cbd2a0a7f40cc1", - "revisionTime": "2018-09-26T21:53:15Z" + "revision": "7b1753bd9a48b824927d129e88e6cf07a188800d", + "revisionTime": "2018-10-15T20:37:49Z" }, { "checksumSHA1": "qB1zM2M1IuhWi7L8uNcnOiCrog0=", "path": "github.com/hashicorp/nomad/helper/args", - "revision": "95d9286ad17f745bf861aa9634cbd2a0a7f40cc1", - "revisionTime": "2018-09-26T21:53:15Z" + "revision": "7b1753bd9a48b824927d129e88e6cf07a188800d", + "revisionTime": "2018-10-15T20:37:49Z" }, { "checksumSHA1": "KtK2/d4wBHjMqdR/SAa6cSDmxQ0=", "path": "github.com/hashicorp/nomad/helper/flatmap", - "revision": "95d9286ad17f745bf861aa9634cbd2a0a7f40cc1", - "revisionTime": "2018-09-26T21:53:15Z" + "revision": "7b1753bd9a48b824927d129e88e6cf07a188800d", + "revisionTime": "2018-10-15T20:37:49Z" }, { "checksumSHA1": "mSCo/iZUEOSpeX5NsGZZzFMJqto=", "path": "github.com/hashicorp/nomad/helper/uuid", - "revision": "95d9286ad17f745bf861aa9634cbd2a0a7f40cc1", - "revisionTime": "2018-09-26T21:53:15Z" + "revision": "7b1753bd9a48b824927d129e88e6cf07a188800d", + "revisionTime": "2018-10-15T20:37:49Z" }, { "checksumSHA1": "9EzgE4XLE3Jl8vQh3ml+PWs51Vg=", "path": "github.com/hashicorp/nomad/lib/kheap", - "revision": "95d9286ad17f745bf861aa9634cbd2a0a7f40cc1", - "revisionTime": "2018-09-26T21:53:15Z" + "revision": "7b1753bd9a48b824927d129e88e6cf07a188800d", + "revisionTime": "2018-10-15T20:37:49Z" }, { - "checksumSHA1": "ikFYvnCl+vyXD6Q7JIaLPQeNaZM=", + "checksumSHA1": "UWR5DiE1Zh4u+0+U4t6WNAL49mI=", "path": "github.com/hashicorp/nomad/nomad/structs", - "revision": "95d9286ad17f745bf861aa9634cbd2a0a7f40cc1", - "revisionTime": "2018-09-26T21:53:15Z" + "revision": "7b1753bd9a48b824927d129e88e6cf07a188800d", + "revisionTime": "2018-10-15T20:37:49Z" }, { "checksumSHA1": "+IrfZCO4rLVEOv+Q0aG4WI70SIE=", @@ -1353,16 +1353,16 @@ "revisionTime": "2018-09-07T13:02:40Z" }, { - "checksumSHA1": "pqkqaBRFKL2P/64xpuxj/3J/+sQ=", + "checksumSHA1": "NRGHY73VI2kEgy+FSDOuSsixS4k=", "path": "github.com/hashicorp/vault-plugin-auth-alicloud", - "revision": "9d43fd14d9fe03a23a2434fb1866587f48fbbbfc", - "revisionTime": "2018-09-10T17:20:49Z" + "revision": "1c2bb0abe14ed980276707b1d572dd280da69d86", + "revisionTime": "2018-10-03T22:48:59Z" }, { "checksumSHA1": "xdrSQoX7B7Hr4iWm9T2+5wHVpHQ=", "path": "github.com/hashicorp/vault-plugin-auth-alicloud/tools", - "revision": "9d43fd14d9fe03a23a2434fb1866587f48fbbbfc", - "revisionTime": "2018-09-10T17:20:49Z" + "revision": "1c2bb0abe14ed980276707b1d572dd280da69d86", + "revisionTime": "2018-10-03T22:48:59Z" }, { "checksumSHA1": "ojr0r/jmutGEhftDXiHthCCwpIA=", @@ -1377,16 +1377,16 @@ "revisionTime": "2018-08-16T20:11:31Z" }, { - "checksumSHA1": "DezgN3BHtzu7NO6tiTHaWc3YTkg=", + "checksumSHA1": "/Jn1UXwpf4RVqNhxyUT275WMQzQ=", "path": "github.com/hashicorp/vault-plugin-auth-gcp/plugin", - "revision": "e07498ed4e6674913b3fcdb1cc49cd6bcd2478fd", - "revisionTime": "2018-09-28T18:30:15Z" + "revision": "d57adfb30a2e65659bdb83e632a6d4e6690e2e86", + "revisionTime": "2018-10-12T20:41:23Z" }, { - "checksumSHA1": "q1jmXl9yBvTAWCiF4siIJZ65tEA=", + "checksumSHA1": "nfHZ5lzZ2BUM97WnQ7acdnSEPQo=", "path": "github.com/hashicorp/vault-plugin-auth-jwt", - "revision": "4ff96c88f3c943f278f43000df5eadbd3a1acf7a", - "revisionTime": "2018-09-18T20:12:21Z" + "revision": "bf8970c9734c5d1e9fbab23255456c8272ff354a", + "revisionTime": "2018-10-15T15:58:27Z" }, { "checksumSHA1": "hrJZzU9iG2ixRu2hOdPgN7wa48c=", @@ -1413,22 +1413,22 @@ "revisionTime": "2018-08-20T22:27:10Z" }, { - "checksumSHA1": "quBu8KU8dNidtutgiZ4zunCKAkA=", + "checksumSHA1": "VLXyxS5dEoiWTSFmpMJIz+Pwtmw=", "path": "github.com/hashicorp/vault-plugin-secrets-alicloud", - "revision": "ec1a12d5d9c78af15afc469cb6fe5727ffb2e8ca", - "revisionTime": "2018-09-17T21:26:40Z" + "revision": "4272d944e495635b30fcabf70dd71a9355b798a9", + "revisionTime": "2018-10-03T22:47:18Z" }, { "checksumSHA1": "dqduixICi6NeyLNRCDdw62t1LFU=", "path": "github.com/hashicorp/vault-plugin-secrets-alicloud/clients", - "revision": "ec1a12d5d9c78af15afc469cb6fe5727ffb2e8ca", - "revisionTime": "2018-09-17T21:26:40Z" + "revision": "4272d944e495635b30fcabf70dd71a9355b798a9", + "revisionTime": "2018-10-03T22:47:18Z" }, { - "checksumSHA1": "fTT9z8zhrp0abUWgB326BeEWFbI=", + "checksumSHA1": "6BinFSaXH5g1SSgNSOZRIPbmWkc=", "path": "github.com/hashicorp/vault-plugin-secrets-azure", - "revision": "88fc7b7a5fa189bcae6ca482ba9b7f696e08fe64", - "revisionTime": "2018-10-01T15:42:37Z" + "revision": "01e3797517d6b30e909825fb7c08a7cc1d97809c", + "revisionTime": "2018-10-04T17:44:28Z" }, { "checksumSHA1": "tFP1EEyVlomSSx46NHDZWGPzUz0=", @@ -1457,44 +1457,44 @@ { "checksumSHA1": "ldkAQ1CpiAaQ9sti0qIch+UyRsI=", "path": "github.com/hashicorp/yamux", - "revision": "7221087c3d281fda5f794e28c2ea4c6e4d5c4558", - "revisionTime": "2018-09-17T20:50:41Z" + "revision": "2f1d1f20f75d5404f53b9edf6b53ed5505508675", + "revisionTime": "2018-10-12T17:50:58Z" }, { - "checksumSHA1": "mTOX95mjIovKITjQKPP+EkYjsvk=", + "checksumSHA1": "FHpNxGUqXNu02lBFaei16tXFhU0=", "path": "github.com/jackc/pgx", - "revision": "0b00887bf8e110d6e02e9e82c066cc616bc0ed71", - "revisionTime": "2018-09-22T20:34:18Z" + "revision": "20eaa7963b9d714bfaf678dcb2448fbd1e3671b9", + "revisionTime": "2018-10-10T12:56:47Z" }, { "checksumSHA1": "rsDmBQ/jGFUN2t6G4c3ohkKGKdQ=", "path": "github.com/jackc/pgx/chunkreader", - "revision": "0b00887bf8e110d6e02e9e82c066cc616bc0ed71", - "revisionTime": "2018-09-22T20:34:18Z" + "revision": "20eaa7963b9d714bfaf678dcb2448fbd1e3671b9", + "revisionTime": "2018-10-10T12:56:47Z" }, { "checksumSHA1": "wArf/u0cmHsLafG2tOcjVUUXQBw=", "path": "github.com/jackc/pgx/internal/sanitize", - "revision": "0b00887bf8e110d6e02e9e82c066cc616bc0ed71", - "revisionTime": "2018-09-22T20:34:18Z" + "revision": "20eaa7963b9d714bfaf678dcb2448fbd1e3671b9", + "revisionTime": "2018-10-10T12:56:47Z" }, { "checksumSHA1": "OCtOmvS8BLHQAyQXJ/Ub8QxKz0s=", "path": "github.com/jackc/pgx/pgio", - "revision": "0b00887bf8e110d6e02e9e82c066cc616bc0ed71", - "revisionTime": "2018-09-22T20:34:18Z" + "revision": "20eaa7963b9d714bfaf678dcb2448fbd1e3671b9", + "revisionTime": "2018-10-10T12:56:47Z" }, { "checksumSHA1": "O2maVT2McrV62SKrIXqZh7gHEoM=", "path": "github.com/jackc/pgx/pgproto3", - "revision": "0b00887bf8e110d6e02e9e82c066cc616bc0ed71", - "revisionTime": "2018-09-22T20:34:18Z" + "revision": "20eaa7963b9d714bfaf678dcb2448fbd1e3671b9", + "revisionTime": "2018-10-10T12:56:47Z" }, { "checksumSHA1": "fFUF5XbI8Gx63ZQPn8gFmRnK+is=", "path": "github.com/jackc/pgx/pgtype", - "revision": "0b00887bf8e110d6e02e9e82c066cc616bc0ed71", - "revisionTime": "2018-09-22T20:34:18Z" + "revision": "20eaa7963b9d714bfaf678dcb2448fbd1e3671b9", + "revisionTime": "2018-10-10T12:56:47Z" }, { "checksumSHA1": "cIinEjB62s8j5cpY1u7sxtg4akg=", @@ -1617,10 +1617,10 @@ "revisionTime": "2018-09-20T17:11:16Z" }, { - "checksumSHA1": "cl9bdp4vvusDqC44P6NOtMK5tIU=", + "checksumSHA1": "r6ZMyP/HsdeToXIxvgP3ntL6Bds=", "path": "github.com/konsorten/go-windows-terminal-sequences", - "revision": "b729f2633dfe35f4d1d8a32385f6685610ce1cb5", - "revisionTime": "2018-04-02T22:36:58Z" + "revision": "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242", + "revisionTime": "2018-10-04T22:41:46Z" }, { "checksumSHA1": "3ohk4dFYrERZ6WTdKkIwnTA0HSI=", @@ -1677,10 +1677,10 @@ "revisionTime": "2018-08-14T19:25:59Z" }, { - "checksumSHA1": "KycBRsg27SKMCVnRgChcQOqviPA=", + "checksumSHA1": "dh1x2lCtVSPhvbQtcjjUlg6FMa0=", "path": "github.com/miekg/dns", - "revision": "ba6747e8a94115e9dc7738afb87850687611df1b", - "revisionTime": "2018-09-29T16:16:31Z" + "revision": "17c1bc6792fdf1918200e9a675cdf2e3c9d585cd", + "revisionTime": "2018-10-15T07:12:31Z" }, { "checksumSHA1": "fgd40uACm+f6nmSUC+R9A9NtwGY=", @@ -1713,10 +1713,10 @@ "revisionTime": "2018-08-28T14:53:49Z" }, { - "checksumSHA1": "+nNkT7mZEiQ3t1FSj7yNVu2TLdQ=", + "checksumSHA1": "J+g0oZePWp2zSIISD2dZZKTxmgg=", "path": "github.com/mitchellh/mapstructure", - "revision": "fe40af7a9c397fa3ddba203c38a5042c5d0475ad", - "revisionTime": "2018-10-01T14:12:12Z" + "revision": "3536a929edddb9a5b34bd6861dc4a9647cb459fe", + "revisionTime": "2018-10-05T04:51:35Z" }, { "checksumSHA1": "nxuST3bjBv5uDVPzrX9wdruOwv0=", @@ -1737,10 +1737,10 @@ "revisionTime": "2018-07-18T01:23:57Z" }, { - "checksumSHA1": "MU3gp7dhfZY+4gE9dEHiGyThBcg=", + "checksumSHA1": "l76EUB2FN0MXqh9v6ammV+JrruE=", "path": "github.com/ncw/swift", - "revision": "7854abff473c41fdde529ebe4cf7db6334dd61ce", - "revisionTime": "2018-09-07T11:40:03Z" + "revision": "6f907bcc698fc1d24fa918255b18c6c5668b8b2b", + "revisionTime": "2018-10-09T04:15:14Z" }, { "checksumSHA1": "nf3UoPNBIut7BL9nWE8Fw2X2j+Q=", @@ -1755,22 +1755,22 @@ "revisionTime": "2018-04-30T19:00:53Z" }, { - "checksumSHA1": "ZGlIwSRjdLYCUII7JLE++N4w7Xc=", + "checksumSHA1": "Yd/Bw+xjDW+66s4mr4yaHFY/EeE=", "path": "github.com/opencontainers/image-spec/specs-go", - "revision": "7b1e489870acb042978a3935d2fb76f8a79aff81", - "revisionTime": "2018-09-18T08:04:42Z" + "revision": "b6e51fa50549ee0bd5188494912a7f4c382cb0d4", + "revisionTime": "2018-10-11T18:26:54Z" }, { "checksumSHA1": "jdbXRRzeu0njLE9/nCEZG+Yg/Jk=", "path": "github.com/opencontainers/image-spec/specs-go/v1", - "revision": "7b1e489870acb042978a3935d2fb76f8a79aff81", - "revisionTime": "2018-09-18T08:04:42Z" + "revision": "b6e51fa50549ee0bd5188494912a7f4c382cb0d4", + "revisionTime": "2018-10-11T18:26:54Z" }, { "checksumSHA1": "XtLpcP6ca9SQG218re7E7UcOj3Y=", "path": "github.com/opencontainers/runc/libcontainer/user", - "revision": "2abd837c8c25b0102ac4ce14f17bc0bc7ddffba7", - "revisionTime": "2018-09-25T20:35:16Z" + "revision": "398f670bcba4dad3aa4c37f8fd1c6d6f9e5a6e15", + "revisionTime": "2018-10-13T07:32:37Z" }, { "checksumSHA1": "wJWRH5ORhyIO29LxvA/Sug1skF0=", @@ -1991,22 +1991,22 @@ "revisionTime": "2018-09-18T08:52:46Z" }, { - "checksumSHA1": "85AmRAEmy9EqHBUhwkBBVhLkCVU=", + "checksumSHA1": "imNaq3HPXbaiLyDDkbGcSNs42Xw=", "path": "github.com/pierrec/lz4", - "revision": "1f6e18d34f6790fc0afea6f13a5fe3d9ab1770af", - "revisionTime": "2018-09-11T17:58:58Z" + "revision": "635575b42742856941dbc767b44905bb9ba083f6", + "revisionTime": "2018-10-05T16:47:09Z" }, { "checksumSHA1": "YzBjaYp2pbrwPhT6XHY0CBSh71A=", "path": "github.com/pierrec/lz4/internal/xxh32", - "revision": "1f6e18d34f6790fc0afea6f13a5fe3d9ab1770af", - "revisionTime": "2018-09-11T17:58:58Z" + "revision": "635575b42742856941dbc767b44905bb9ba083f6", + "revisionTime": "2018-10-05T16:47:09Z" }, { - "checksumSHA1": "18YrywDvb67HU8xYF5vqKMgelx0=", + "checksumSHA1": "DTy0iJ2w5C+FDsN9EnzfhNmvS+o=", "path": "github.com/pkg/errors", - "revision": "c059e472caf75dbe73903f6521a20abac245b17f", - "revisionTime": "2018-09-11T06:21:13Z" + "revision": "2233dee583dcf88f3c8b22cb7a33f05a499800d8", + "revisionTime": "2018-10-08T04:53:15Z" }, { "checksumSHA1": "eU6C18oR9hAHGe/MBMYmMjBQ8r8=", @@ -2065,14 +2065,14 @@ { "checksumSHA1": "frS661rlSEZWE9CezHhnFioQK/I=", "path": "github.com/prometheus/client_golang/prometheus", - "revision": "0a8115f42e037a6e327f9a269a26ff6603fb8472", - "revisionTime": "2018-10-01T17:40:01Z" + "revision": "1cafe34db7fdec6022e17e00e1c1ea501022f3e4", + "revisionTime": "2018-10-15T14:52:39Z" }, { "checksumSHA1": "UBqhkyjCz47+S19MVTigxJ2VjVQ=", "path": "github.com/prometheus/client_golang/prometheus/internal", - "revision": "0a8115f42e037a6e327f9a269a26ff6603fb8472", - "revisionTime": "2018-10-01T17:40:01Z" + "revision": "1cafe34db7fdec6022e17e00e1c1ea501022f3e4", + "revisionTime": "2018-10-15T14:52:39Z" }, { "checksumSHA1": "V8xkqgmP66sq2ZW4QO5wi9a4oZE=", @@ -2081,46 +2081,46 @@ "revisionTime": "2018-07-12T10:51:10Z" }, { - "checksumSHA1": "Q0mjhUEjAklUQvPkrOChWGLpvRY=", + "checksumSHA1": "hGf3xT6gRaJh2zAEbWj9YnV+K+0=", "path": "github.com/prometheus/common/expfmt", - "revision": "c7de2306084e37d54b8be01f3541a8464345e9a5", - "revisionTime": "2018-08-01T06:44:54Z" + "revision": "bcb74de08d37a417cb6789eec1d6c810040f0470", + "revisionTime": "2018-10-15T12:42:27Z" }, { "checksumSHA1": "GWlM3d2vPYyNATtTFgftS10/A9w=", "path": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", - "revision": "c7de2306084e37d54b8be01f3541a8464345e9a5", - "revisionTime": "2018-08-01T06:44:54Z" + "revision": "bcb74de08d37a417cb6789eec1d6c810040f0470", + "revisionTime": "2018-10-15T12:42:27Z" }, { "checksumSHA1": "EXTRY7DL9gFW8c341Dk6LDXCBn8=", "path": "github.com/prometheus/common/model", - "revision": "c7de2306084e37d54b8be01f3541a8464345e9a5", - "revisionTime": "2018-08-01T06:44:54Z" + "revision": "bcb74de08d37a417cb6789eec1d6c810040f0470", + "revisionTime": "2018-10-15T12:42:27Z" }, { "checksumSHA1": "4zOdjJcskuocAzI+i6rcRzYjSlI=", "path": "github.com/prometheus/procfs", - "revision": "418d78d0b9a7b7de3a6bbc8a23def624cc977bb2", - "revisionTime": "2018-09-20T06:50:04Z" + "revision": "185b4288413d2a0dd0806f78c90dde719829e5ae", + "revisionTime": "2018-10-05T14:02:18Z" }, { - "checksumSHA1": "lv9rIcjbVEGo8AT1UCUZXhXrfQc=", + "checksumSHA1": "8E1IbrgtLBee7J404VKPyoI+qsk=", "path": "github.com/prometheus/procfs/internal/util", - "revision": "418d78d0b9a7b7de3a6bbc8a23def624cc977bb2", - "revisionTime": "2018-09-20T06:50:04Z" + "revision": "185b4288413d2a0dd0806f78c90dde719829e5ae", + "revisionTime": "2018-10-05T14:02:18Z" }, { "checksumSHA1": "HSP5hVT0CNMRa8+Xtz4z2Ic5U0E=", "path": "github.com/prometheus/procfs/nfs", - "revision": "418d78d0b9a7b7de3a6bbc8a23def624cc977bb2", - "revisionTime": "2018-09-20T06:50:04Z" + "revision": "185b4288413d2a0dd0806f78c90dde719829e5ae", + "revisionTime": "2018-10-05T14:02:18Z" }, { "checksumSHA1": "yItvTQLUVqm/ArLEbvEhqG0T5a0=", "path": "github.com/prometheus/procfs/xfs", - "revision": "418d78d0b9a7b7de3a6bbc8a23def624cc977bb2", - "revisionTime": "2018-09-20T06:50:04Z" + "revision": "185b4288413d2a0dd0806f78c90dde719829e5ae", + "revisionTime": "2018-10-05T14:02:18Z" }, { "checksumSHA1": "M57Rrfc8Z966p+IBtQ91QOcUtcg=", @@ -2155,10 +2155,10 @@ "revisionTime": "2017-03-13T16:33:22Z" }, { - "checksumSHA1": "cfWqNEVqrlTckIp6feLbp9bwUbM=", + "checksumSHA1": "IvPH2QFjJjXIxr2l3nTDKA3fOuU=", "path": "github.com/sirupsen/logrus", - "revision": "1ed61965b9e594bf37539680d7f63eccd060314f", - "revisionTime": "2018-09-30T20:58:21Z" + "revision": "458213699411cdceb6fd839a3178be49c01fba54", + "revisionTime": "2018-10-10T20:06:18Z" }, { "checksumSHA1": "2xcr/mhxBFlDjpxe/Mc2Wb4RGR8=", @@ -2173,208 +2173,214 @@ "revisionTime": "2018-06-15T12:55:16Z" }, { - "checksumSHA1": "1DS/Ji9N9XzFauTiFIa33Ek1+hw=", + "checksumSHA1": "2itAnNmxls3SdlIGdm22djyp9IY=", "path": "github.com/ugorji/go/codec", - "revision": "99ea80c8b19ad777f970258ca09c5c2d5826f0fd", - "revisionTime": "2018-09-27T12:51:28Z" + "revision": "8333dd4495169d0d17214592e805b1ce5cbcdf86", + "revisionTime": "2018-10-12T06:40:53Z" }, { "checksumSHA1": "8nCoO1ACxWgrtwUl84Se7YIWVMA=", "path": "go.etcd.io/etcd/auth/authpb", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { - "checksumSHA1": "RI+zaQ8erxISmjqBXOTDpBkm7kc=", + "checksumSHA1": "dOTPc9AnUrucXTh6dFhcj6Xyim8=", "path": "go.etcd.io/etcd/clientv3", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { "checksumSHA1": "EXaUZbFolMaWfGUJUKXymQo63Fc=", "path": "go.etcd.io/etcd/clientv3/balancer", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { "checksumSHA1": "g6eRx00F7Xl/0v8OCxgiC57ARs0=", "path": "go.etcd.io/etcd/clientv3/balancer/picker", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { "checksumSHA1": "zsQzOE0KolNLwLKQfahcy3WZwjk=", "path": "go.etcd.io/etcd/clientv3/balancer/resolver/endpoint", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { - "checksumSHA1": "ZrqOBeBTSsupblYMeANSpd3xyhQ=", + "checksumSHA1": "NFsezSuEcheJXAlnehOL431Q8us=", "path": "go.etcd.io/etcd/clientv3/concurrency", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { "checksumSHA1": "SDdAjoQeeV8tEDv05QZ7/Yv79CI=", "path": "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { "checksumSHA1": "Jk0gGFdOHgsqxhoj9atENsLkwD0=", "path": "go.etcd.io/etcd/etcdserver/etcdserverpb", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { "checksumSHA1": "C/ndQTz6TCtOsL29UYTxuax9/rw=", "path": "go.etcd.io/etcd/mvcc/mvccpb", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { "checksumSHA1": "38iFzK4Eq3kQ3t2xlTRQvypOeRI=", "path": "go.etcd.io/etcd/pkg/logutil", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { "checksumSHA1": "Q4ZZnZeTbMHrXKZxTd6EQjSf4uY=", "path": "go.etcd.io/etcd/pkg/pathutil", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { "checksumSHA1": "jF9R5QbAMut5PvZMIy9+zDPf6NU=", "path": "go.etcd.io/etcd/pkg/srv", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { "checksumSHA1": "Xf7TwBAdBTSSeTlEzqFH1Deglbk=", "path": "go.etcd.io/etcd/pkg/systemd", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { "checksumSHA1": "yl9eTlZ74Ixe7WxHQaO6rWJq+tU=", "path": "go.etcd.io/etcd/pkg/tlsutil", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { "checksumSHA1": "YQD4i+Li76ogXqgKS62MJVf1agE=", "path": "go.etcd.io/etcd/pkg/transport", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { "checksumSHA1": "VL9HsNKbbCb1Ntj9VcFCfiTmjmM=", "path": "go.etcd.io/etcd/pkg/types", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { - "checksumSHA1": "HxbfyMEdKQHL1I1308SwwQ44kVk=", + "checksumSHA1": "b6/18Kp2W5oWnbgqIq6Ks3FKOBk=", "path": "go.etcd.io/etcd/raft", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { "checksumSHA1": "0ZI+YcuXXszApIhI+HcsmIcSVgg=", "path": "go.etcd.io/etcd/raft/raftpb", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { "checksumSHA1": "Vyt9fFWEV2/sagq9KmkVbPn+LFM=", "path": "go.etcd.io/etcd/version", - "revision": "eca5f03cea2ec6d275a4b86ca1aa764b9d6aaa84", - "revisionTime": "2018-10-03T14:42:16Z" + "revision": "7a759c18d294698f537f8be91927354818a71e51", + "revisionTime": "2018-10-15T17:54:34Z" }, { - "checksumSHA1": "g6zbGRmSsrnc/Sfm8mRlT9UMwh0=", + "checksumSHA1": "FYNpZ/C+tTLe/JMqvIPybkoBh2Y=", "path": "go.opencensus.io", - "revision": "d5b2c1264a6e63d82d7b9527b34c0b36206627cb", - "revisionTime": "2018-09-27T22:28:52Z" + "revision": "1eb9a13c7dd02141e065a665f6bf5c99a090a16a", + "revisionTime": "2018-10-15T18:34:46Z" + }, + { + "checksumSHA1": "TA3C6+5PM7V2zbsnLMp13Efy/BA=", + "path": "go.opencensus.io/exemplar", + "revision": "1eb9a13c7dd02141e065a665f6bf5c99a090a16a", + "revisionTime": "2018-10-15T18:34:46Z" }, { "checksumSHA1": "BhwWPIG9k2gelU3zEOkhKdedctk=", "path": "go.opencensus.io/internal", - "revision": "d5b2c1264a6e63d82d7b9527b34c0b36206627cb", - "revisionTime": "2018-09-27T22:28:52Z" + "revision": "1eb9a13c7dd02141e065a665f6bf5c99a090a16a", + "revisionTime": "2018-10-15T18:34:46Z" }, { "checksumSHA1": "Vcwr4P/uIN4haoJPglU7liURepM=", "path": "go.opencensus.io/internal/tagencoding", - "revision": "d5b2c1264a6e63d82d7b9527b34c0b36206627cb", - "revisionTime": "2018-09-27T22:28:52Z" + "revision": "1eb9a13c7dd02141e065a665f6bf5c99a090a16a", + "revisionTime": "2018-10-15T18:34:46Z" }, { "checksumSHA1": "OpsEM9xBeU/TfVFGEsjpCSEhpEc=", "path": "go.opencensus.io/plugin/ocgrpc", - "revision": "d5b2c1264a6e63d82d7b9527b34c0b36206627cb", - "revisionTime": "2018-09-27T22:28:52Z" + "revision": "1eb9a13c7dd02141e065a665f6bf5c99a090a16a", + "revisionTime": "2018-10-15T18:34:46Z" }, { - "checksumSHA1": "giWeu8gmoVs746+Yo4+GFX0mrcs=", + "checksumSHA1": "kxVcsHl3DWhTdSZkterTpRFQRIs=", "path": "go.opencensus.io/plugin/ochttp", - "revision": "d5b2c1264a6e63d82d7b9527b34c0b36206627cb", - "revisionTime": "2018-09-27T22:28:52Z" + "revision": "1eb9a13c7dd02141e065a665f6bf5c99a090a16a", + "revisionTime": "2018-10-15T18:34:46Z" }, { "checksumSHA1": "0OVZlXVUMGzf8ddlnjg2yMZI4ao=", "path": "go.opencensus.io/plugin/ochttp/propagation/b3", - "revision": "d5b2c1264a6e63d82d7b9527b34c0b36206627cb", - "revisionTime": "2018-09-27T22:28:52Z" + "revision": "1eb9a13c7dd02141e065a665f6bf5c99a090a16a", + "revisionTime": "2018-10-15T18:34:46Z" }, { - "checksumSHA1": "GS+XFf77/UflWmme7TgzbZaecUs=", + "checksumSHA1": "9Qm9NNFLaZ8KM3pv4lIBSsD4q3A=", "path": "go.opencensus.io/stats", - "revision": "d5b2c1264a6e63d82d7b9527b34c0b36206627cb", - "revisionTime": "2018-09-27T22:28:52Z" + "revision": "1eb9a13c7dd02141e065a665f6bf5c99a090a16a", + "revisionTime": "2018-10-15T18:34:46Z" }, { - "checksumSHA1": "R0Glq4iKrvXHiHzbvt+5+Aqo6sY=", + "checksumSHA1": "SEHKoV2p561oIgFTqzQ67a/XU7I=", "path": "go.opencensus.io/stats/internal", - "revision": "d5b2c1264a6e63d82d7b9527b34c0b36206627cb", - "revisionTime": "2018-09-27T22:28:52Z" + "revision": "1eb9a13c7dd02141e065a665f6bf5c99a090a16a", + "revisionTime": "2018-10-15T18:34:46Z" }, { - "checksumSHA1": "rn1C8hwUXhliMZTCH0h6wmnn8Y0=", + "checksumSHA1": "KVJAD8BjQ045mOPz/mAWbPXMlIU=", "path": "go.opencensus.io/stats/view", - "revision": "d5b2c1264a6e63d82d7b9527b34c0b36206627cb", - "revisionTime": "2018-09-27T22:28:52Z" + "revision": "1eb9a13c7dd02141e065a665f6bf5c99a090a16a", + "revisionTime": "2018-10-15T18:34:46Z" }, { - "checksumSHA1": "VncCVu1IOAwNHrCGhgYzbZGmI3w=", + "checksumSHA1": "/xIgvCFhYpV43FT16tNBTbm7MeY=", "path": "go.opencensus.io/tag", - "revision": "d5b2c1264a6e63d82d7b9527b34c0b36206627cb", - "revisionTime": "2018-09-27T22:28:52Z" + "revision": "1eb9a13c7dd02141e065a665f6bf5c99a090a16a", + "revisionTime": "2018-10-15T18:34:46Z" }, { - "checksumSHA1": "F7lN2nYz35ELTEQJA8qBP67b05Y=", + "checksumSHA1": "LFehOdQ0p2mKk0rpV1FaQTq4UwU=", "path": "go.opencensus.io/trace", - "revision": "d5b2c1264a6e63d82d7b9527b34c0b36206627cb", - "revisionTime": "2018-09-27T22:28:52Z" + "revision": "1eb9a13c7dd02141e065a665f6bf5c99a090a16a", + "revisionTime": "2018-10-15T18:34:46Z" }, { "checksumSHA1": "0P3BycP6CFnFNRCnF4dTlMEJgEI=", "path": "go.opencensus.io/trace/internal", - "revision": "d5b2c1264a6e63d82d7b9527b34c0b36206627cb", - "revisionTime": "2018-09-27T22:28:52Z" + "revision": "1eb9a13c7dd02141e065a665f6bf5c99a090a16a", + "revisionTime": "2018-10-15T18:34:46Z" }, { "checksumSHA1": "FHJParRi8f1GHO7Cx+lk3bMWBq0=", "path": "go.opencensus.io/trace/propagation", - "revision": "d5b2c1264a6e63d82d7b9527b34c0b36206627cb", - "revisionTime": "2018-09-27T22:28:52Z" + "revision": "1eb9a13c7dd02141e065a665f6bf5c99a090a16a", + "revisionTime": "2018-10-15T18:34:46Z" }, { - "checksumSHA1": "Hv8+FGg2p+wtvj0OKRLu5Lf7mHk=", + "checksumSHA1": "UHbxxaMqpEPsubh8kPwzSlyEwqI=", "path": "go.opencensus.io/trace/tracestate", - "revision": "d5b2c1264a6e63d82d7b9527b34c0b36206627cb", - "revisionTime": "2018-09-27T22:28:52Z" + "revision": "1eb9a13c7dd02141e065a665f6bf5c99a090a16a", + "revisionTime": "2018-10-15T18:34:46Z" }, { "checksumSHA1": "Aj1YXCXqTITJWE1NQwoLETGf/Mc=", @@ -2427,308 +2433,308 @@ { "checksumSHA1": "oCH3J96RWvO8W4xjix47PModpio=", "path": "golang.org/x/crypto/bcrypt", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "ejjxT0+wDWWncfh0Rt3lSH4IbXQ=", "path": "golang.org/x/crypto/blake2b", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "oVPHWesOmZ02vLq2fglGvf+AMgk=", "path": "golang.org/x/crypto/blowfish", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "9TPZ7plxFmlYtMEv2LLXRCEQg7c=", "path": "golang.org/x/crypto/chacha20poly1305", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "VrW/nowBxVcqQhIrqDJNxr5NWu0=", "path": "golang.org/x/crypto/cryptobyte", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "YEoV2AiZZPDuF7pMVzDt7buS9gc=", "path": "golang.org/x/crypto/cryptobyte/asn1", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "IQkUIOnvlf0tYloFx9mLaXSvXWQ=", "path": "golang.org/x/crypto/curve25519", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "2LpxYGSf068307b7bhAuVjvzLLc=", "path": "golang.org/x/crypto/ed25519", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "0JTAFXPkankmWcZGQJGScLDiaN8=", "path": "golang.org/x/crypto/ed25519/internal/edwards25519", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "4D8hxMIaSDEW5pCQk22Xj4DcDh4=", "path": "golang.org/x/crypto/hkdf", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "fhxj9uzosD3dQefNF5JuGJzGZwg=", "path": "golang.org/x/crypto/internal/chacha20", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "/U7f2gaH6DnEmLguVLDbipU6kXU=", "path": "golang.org/x/crypto/internal/subtle", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "MCeXr2RNeiG1XG6V+er1OR0qyeo=", "path": "golang.org/x/crypto/md4", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "1MGpGDQqnUoRpv7VEcQrXOBydXE=", "path": "golang.org/x/crypto/pbkdf2", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "PJY7uCr3UnX4/Mf/RoWnbieSZ8o=", "path": "golang.org/x/crypto/pkcs12", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "p0GC51McIdA7JygoP223twJ1s0E=", "path": "golang.org/x/crypto/pkcs12/internal/rc2", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "vKbPb9fpjCdzuoOvajOJnYfHG2g=", "path": "golang.org/x/crypto/poly1305", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { - "checksumSHA1": "Hzj2xdOMAllJUdjY7WKiMKhUiTc=", + "checksumSHA1": "kFJZIGt0lpBufqlhuwRERsD1w6g=", "path": "golang.org/x/crypto/ssh", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "R9VBzgWGaphXv2/b4DLeMAbq9Xg=", "path": "golang.org/x/crypto/ssh/agent", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { "checksumSHA1": "BGm8lKZmvJbf/YOJLeL1rw2WVjA=", "path": "golang.org/x/crypto/ssh/terminal", - "revision": "e3636079e1a4c1f337f212cc5cd2aca108f6c900", - "revisionTime": "2018-09-26T22:24:25Z" + "revision": "0c41d7ab0a0ee717d4590a44bcb987dfd9e183eb", + "revisionTime": "2018-10-15T00:23:17Z" }, { - "checksumSHA1": "NjyXtXsaf0ulRJn6HQSP1FqGL4A=", + "checksumSHA1": "Pa6ivEz5fid3ECb1hptNbC+USfA=", "path": "golang.org/x/net/bpf", - "revision": "f5e5bdd778241bfefa8627f7124c39cd6ad8d74f", - "revisionTime": "2018-10-01T16:28:04Z" + "revision": "49bb7cea24b1df9410e1712aa6433dae904ff66a", + "revisionTime": "2018-10-11T05:27:23Z" }, { "checksumSHA1": "GtamqiJoL7PGHsN454AoffBFMa8=", "path": "golang.org/x/net/context", - "revision": "f5e5bdd778241bfefa8627f7124c39cd6ad8d74f", - "revisionTime": "2018-10-01T16:28:04Z" + "revision": "49bb7cea24b1df9410e1712aa6433dae904ff66a", + "revisionTime": "2018-10-11T05:27:23Z" }, { "checksumSHA1": "WHc3uByvGaMcnSoI21fhzYgbOgg=", "path": "golang.org/x/net/context/ctxhttp", - "revision": "f5e5bdd778241bfefa8627f7124c39cd6ad8d74f", - "revisionTime": "2018-10-01T16:28:04Z" + "revision": "49bb7cea24b1df9410e1712aa6433dae904ff66a", + "revisionTime": "2018-10-11T05:27:23Z" }, { "checksumSHA1": "pCY4YtdNKVBYRbNvODjx8hj0hIs=", "path": "golang.org/x/net/http/httpguts", - "revision": "f5e5bdd778241bfefa8627f7124c39cd6ad8d74f", - "revisionTime": "2018-10-01T16:28:04Z" + "revision": "49bb7cea24b1df9410e1712aa6433dae904ff66a", + "revisionTime": "2018-10-11T05:27:23Z" }, { "checksumSHA1": "N0asyGBBR6/7qIdCpdb2OxAyv4U=", "path": "golang.org/x/net/http2", - "revision": "f5e5bdd778241bfefa8627f7124c39cd6ad8d74f", - "revisionTime": "2018-10-01T16:28:04Z" + "revision": "49bb7cea24b1df9410e1712aa6433dae904ff66a", + "revisionTime": "2018-10-11T05:27:23Z" }, { "checksumSHA1": "KZniwnfpWkaTPhUQDUTvgex/7y0=", "path": "golang.org/x/net/http2/hpack", - "revision": "f5e5bdd778241bfefa8627f7124c39cd6ad8d74f", - "revisionTime": "2018-10-01T16:28:04Z" + "revision": "49bb7cea24b1df9410e1712aa6433dae904ff66a", + "revisionTime": "2018-10-11T05:27:23Z" }, { "checksumSHA1": "RcrB7tgYS/GMW4QrwVdMOTNqIU8=", "path": "golang.org/x/net/idna", - "revision": "f5e5bdd778241bfefa8627f7124c39cd6ad8d74f", - "revisionTime": "2018-10-01T16:28:04Z" + "revision": "49bb7cea24b1df9410e1712aa6433dae904ff66a", + "revisionTime": "2018-10-11T05:27:23Z" }, { "checksumSHA1": "8oJoT8rfokzpkJ19eNhRs2JgRxI=", "path": "golang.org/x/net/internal/iana", - "revision": "f5e5bdd778241bfefa8627f7124c39cd6ad8d74f", - "revisionTime": "2018-10-01T16:28:04Z" + "revision": "49bb7cea24b1df9410e1712aa6433dae904ff66a", + "revisionTime": "2018-10-11T05:27:23Z" }, { "checksumSHA1": "YsXlbexuTtUXHyhSv927ILOkf6A=", "path": "golang.org/x/net/internal/socket", - "revision": "f5e5bdd778241bfefa8627f7124c39cd6ad8d74f", - "revisionTime": "2018-10-01T16:28:04Z" + "revision": "49bb7cea24b1df9410e1712aa6433dae904ff66a", + "revisionTime": "2018-10-11T05:27:23Z" }, { "checksumSHA1": "UxahDzW2v4mf/+aFxruuupaoIwo=", "path": "golang.org/x/net/internal/timeseries", - "revision": "f5e5bdd778241bfefa8627f7124c39cd6ad8d74f", - "revisionTime": "2018-10-01T16:28:04Z" + "revision": "49bb7cea24b1df9410e1712aa6433dae904ff66a", + "revisionTime": "2018-10-11T05:27:23Z" }, { "checksumSHA1": "K4XNY0c60IPBpv5sO6aiCuB8o/0=", "path": "golang.org/x/net/ipv4", - "revision": "f5e5bdd778241bfefa8627f7124c39cd6ad8d74f", - "revisionTime": "2018-10-01T16:28:04Z" + "revision": "49bb7cea24b1df9410e1712aa6433dae904ff66a", + "revisionTime": "2018-10-11T05:27:23Z" }, { "checksumSHA1": "hDszlYGI8m9puPsEIDr59ccTiF8=", "path": "golang.org/x/net/ipv6", - "revision": "f5e5bdd778241bfefa8627f7124c39cd6ad8d74f", - "revisionTime": "2018-10-01T16:28:04Z" + "revision": "49bb7cea24b1df9410e1712aa6433dae904ff66a", + "revisionTime": "2018-10-11T05:27:23Z" }, { "checksumSHA1": "6ckrK99wkirarIfFNX4+AHWBEHM=", "path": "golang.org/x/net/trace", - "revision": "f5e5bdd778241bfefa8627f7124c39cd6ad8d74f", - "revisionTime": "2018-10-01T16:28:04Z" + "revision": "49bb7cea24b1df9410e1712aa6433dae904ff66a", + "revisionTime": "2018-10-11T05:27:23Z" }, { "checksumSHA1": "j0z/2h06wsvTkGiLaZ5XFLbMKfo=", "path": "golang.org/x/oauth2", - "revision": "d2e6202438beef2727060aa7cabdd924d92ebfd9", - "revisionTime": "2018-08-21T21:02:52Z" + "revision": "c57b0facaced709681d9f90397429b9430a74754", + "revisionTime": "2018-10-03T18:30:08Z" }, { "checksumSHA1": "z7mSaGccufg15ki2YPd+M5PlsUc=", "path": "golang.org/x/oauth2/google", - "revision": "d2e6202438beef2727060aa7cabdd924d92ebfd9", - "revisionTime": "2018-08-21T21:02:52Z" + "revision": "c57b0facaced709681d9f90397429b9430a74754", + "revisionTime": "2018-10-03T18:30:08Z" }, { - "checksumSHA1": "6M+WzHfPmZjznMhi8CzsRYWjJ9U=", + "checksumSHA1": "wmIDyFF0NcgupcX+tBZbi9Z+R0A=", "path": "golang.org/x/oauth2/internal", - "revision": "d2e6202438beef2727060aa7cabdd924d92ebfd9", - "revisionTime": "2018-08-21T21:02:52Z" + "revision": "c57b0facaced709681d9f90397429b9430a74754", + "revisionTime": "2018-10-03T18:30:08Z" }, { "checksumSHA1": "huVltYnXdRFDJLgp/ZP9IALzG7g=", "path": "golang.org/x/oauth2/jws", - "revision": "d2e6202438beef2727060aa7cabdd924d92ebfd9", - "revisionTime": "2018-08-21T21:02:52Z" + "revision": "c57b0facaced709681d9f90397429b9430a74754", + "revisionTime": "2018-10-03T18:30:08Z" }, { "checksumSHA1": "QPndO4ODVdEBILRhJ6869UDAoHc=", "path": "golang.org/x/oauth2/jwt", - "revision": "d2e6202438beef2727060aa7cabdd924d92ebfd9", - "revisionTime": "2018-08-21T21:02:52Z" + "revision": "c57b0facaced709681d9f90397429b9430a74754", + "revisionTime": "2018-10-03T18:30:08Z" }, { "checksumSHA1": "REkmyB368pIiip76LiqMLspgCRk=", "path": "golang.org/x/sys/cpu", - "revision": "af653ce8b74f808d092db8ca9741fbb63d2a469d", - "revisionTime": "2018-10-03T14:28:42Z" + "revision": "fa43e7bc11baaae89f3f902b2b4d832b68234844", + "revisionTime": "2018-10-11T14:35:51Z" }, { - "checksumSHA1": "+q5VeihKKc5NphlJ02thPICdeo0=", + "checksumSHA1": "SiJNkx+YGtq3Gtr6Ldu6OW83O+U=", "path": "golang.org/x/sys/unix", - "revision": "af653ce8b74f808d092db8ca9741fbb63d2a469d", - "revisionTime": "2018-10-03T14:28:42Z" + "revision": "fa43e7bc11baaae89f3f902b2b4d832b68234844", + "revisionTime": "2018-10-11T14:35:51Z" }, { "checksumSHA1": "Y7nctMxT58lRM78VtElPerhcnEs=", "path": "golang.org/x/sys/windows", - "revision": "af653ce8b74f808d092db8ca9741fbb63d2a469d", - "revisionTime": "2018-10-03T14:28:42Z" + "revision": "fa43e7bc11baaae89f3f902b2b4d832b68234844", + "revisionTime": "2018-10-11T14:35:51Z" }, { "checksumSHA1": "tqqo7DEeFCclb58XbN44WwdpWww=", "path": "golang.org/x/text/encoding", - "revision": "905a57155faa8230500121607930ebb9dd8e139c", - "revisionTime": "2018-09-08T17:02:15Z" + "revision": "4d1c5fb19474adfe9562c9847ba425e7da817e81", + "revisionTime": "2018-09-21T09:56:34Z" }, { "checksumSHA1": "zeHyHebIZl1tGuwGllIhjfci+wI=", "path": "golang.org/x/text/encoding/internal", - "revision": "905a57155faa8230500121607930ebb9dd8e139c", - "revisionTime": "2018-09-08T17:02:15Z" + "revision": "4d1c5fb19474adfe9562c9847ba425e7da817e81", + "revisionTime": "2018-09-21T09:56:34Z" }, { "checksumSHA1": "+bFlIgTuq1Rf8QVtiEGWeCJEnpY=", "path": "golang.org/x/text/encoding/internal/identifier", - "revision": "905a57155faa8230500121607930ebb9dd8e139c", - "revisionTime": "2018-09-08T17:02:15Z" + "revision": "4d1c5fb19474adfe9562c9847ba425e7da817e81", + "revisionTime": "2018-09-21T09:56:34Z" }, { "checksumSHA1": "bAJTZJ3IGJdNmN/PSlRMRxWtxec=", "path": "golang.org/x/text/encoding/unicode", - "revision": "905a57155faa8230500121607930ebb9dd8e139c", - "revisionTime": "2018-09-08T17:02:15Z" + "revision": "4d1c5fb19474adfe9562c9847ba425e7da817e81", + "revisionTime": "2018-09-21T09:56:34Z" }, { "checksumSHA1": "Qk7dljcrEK1BJkAEZguxAbG9dSo=", "path": "golang.org/x/text/internal/utf8internal", - "revision": "905a57155faa8230500121607930ebb9dd8e139c", - "revisionTime": "2018-09-08T17:02:15Z" + "revision": "4d1c5fb19474adfe9562c9847ba425e7da817e81", + "revisionTime": "2018-09-21T09:56:34Z" }, { "checksumSHA1": "IV4MN7KGBSocu/5NR3le3sxup4Y=", "path": "golang.org/x/text/runes", - "revision": "905a57155faa8230500121607930ebb9dd8e139c", - "revisionTime": "2018-09-08T17:02:15Z" + "revision": "4d1c5fb19474adfe9562c9847ba425e7da817e81", + "revisionTime": "2018-09-21T09:56:34Z" }, { "checksumSHA1": "CbpjEkkOeh0fdM/V8xKDdI0AA88=", "path": "golang.org/x/text/secure/bidirule", - "revision": "905a57155faa8230500121607930ebb9dd8e139c", - "revisionTime": "2018-09-08T17:02:15Z" + "revision": "4d1c5fb19474adfe9562c9847ba425e7da817e81", + "revisionTime": "2018-09-21T09:56:34Z" }, { "checksumSHA1": "ziMb9+ANGRJSSIuxYdRbA+cDRBQ=", "path": "golang.org/x/text/transform", - "revision": "905a57155faa8230500121607930ebb9dd8e139c", - "revisionTime": "2018-09-08T17:02:15Z" + "revision": "4d1c5fb19474adfe9562c9847ba425e7da817e81", + "revisionTime": "2018-09-21T09:56:34Z" }, { "checksumSHA1": "Qw4qdlZHCnBurAPPrSt+EKPIngM=", "path": "golang.org/x/text/unicode/bidi", - "revision": "905a57155faa8230500121607930ebb9dd8e139c", - "revisionTime": "2018-09-08T17:02:15Z" + "revision": "4d1c5fb19474adfe9562c9847ba425e7da817e81", + "revisionTime": "2018-09-21T09:56:34Z" }, { "checksumSHA1": "XJr6+rzzxASewSbC/SCStyGlmuw=", "path": "golang.org/x/text/unicode/norm", - "revision": "905a57155faa8230500121607930ebb9dd8e139c", - "revisionTime": "2018-09-08T17:02:15Z" + "revision": "4d1c5fb19474adfe9562c9847ba425e7da817e81", + "revisionTime": "2018-09-21T09:56:34Z" }, { "checksumSHA1": "HoCvrd3hEhsFeBOdEw7cbcfyk50=", @@ -2737,88 +2743,88 @@ "revisionTime": "2018-04-12T16:56:04Z" }, { - "checksumSHA1": "QkzppExyctFz029dvXzKzh6UHgU=", + "checksumSHA1": "gmT0tI5vdUnvc9Nase6RV0q+lMM=", "path": "google.golang.org/api/compute/v1", - "revision": "f5c49d98d21cc639f7ba8c7e4b127e88ee2af7ed", - "revisionTime": "2018-10-03T00:07:58Z" + "revision": "625cd1887957946515db468ce519bb71fa31fc7f", + "revisionTime": "2018-10-12T22:54:34Z" }, { "checksumSHA1": "sdBIpvPJTg/6SjFgFVcRhjlkz0s=", "path": "google.golang.org/api/gensupport", - "revision": "f5c49d98d21cc639f7ba8c7e4b127e88ee2af7ed", - "revisionTime": "2018-10-03T00:07:58Z" + "revision": "625cd1887957946515db468ce519bb71fa31fc7f", + "revisionTime": "2018-10-12T22:54:34Z" }, { "checksumSHA1": "vN2q4J0jDKFQRGfFQ15cOSILz5s=", "path": "google.golang.org/api/googleapi", - "revision": "f5c49d98d21cc639f7ba8c7e4b127e88ee2af7ed", - "revisionTime": "2018-10-03T00:07:58Z" + "revision": "625cd1887957946515db468ce519bb71fa31fc7f", + "revisionTime": "2018-10-12T22:54:34Z" }, { "checksumSHA1": "1K0JxrUfDqAB3MyRiU1LKjfHyf4=", "path": "google.golang.org/api/googleapi/internal/uritemplates", - "revision": "f5c49d98d21cc639f7ba8c7e4b127e88ee2af7ed", - "revisionTime": "2018-10-03T00:07:58Z" + "revision": "625cd1887957946515db468ce519bb71fa31fc7f", + "revisionTime": "2018-10-12T22:54:34Z" }, { "checksumSHA1": "Mr2fXhMRzlQCgANFm91s536pG7E=", "path": "google.golang.org/api/googleapi/transport", - "revision": "f5c49d98d21cc639f7ba8c7e4b127e88ee2af7ed", - "revisionTime": "2018-10-03T00:07:58Z" + "revision": "625cd1887957946515db468ce519bb71fa31fc7f", + "revisionTime": "2018-10-12T22:54:34Z" }, { - "checksumSHA1": "INsH0xknP2SUmV25kixoWttV6Ts=", + "checksumSHA1": "1qOzh2razY/WraqaAt9ZJGxFa/0=", "path": "google.golang.org/api/iam/v1", - "revision": "f5c49d98d21cc639f7ba8c7e4b127e88ee2af7ed", - "revisionTime": "2018-10-03T00:07:58Z" + "revision": "625cd1887957946515db468ce519bb71fa31fc7f", + "revisionTime": "2018-10-12T22:54:34Z" }, { "checksumSHA1": "Udn/2xRvKr6bjxdRE9x3Xr7I3+g=", "path": "google.golang.org/api/internal", - "revision": "f5c49d98d21cc639f7ba8c7e4b127e88ee2af7ed", - "revisionTime": "2018-10-03T00:07:58Z" + "revision": "625cd1887957946515db468ce519bb71fa31fc7f", + "revisionTime": "2018-10-12T22:54:34Z" }, { "checksumSHA1": "zh9AcT6oNvhnOqb7w7njY48TkvI=", "path": "google.golang.org/api/iterator", - "revision": "f5c49d98d21cc639f7ba8c7e4b127e88ee2af7ed", - "revisionTime": "2018-10-03T00:07:58Z" + "revision": "625cd1887957946515db468ce519bb71fa31fc7f", + "revisionTime": "2018-10-12T22:54:34Z" }, { "checksumSHA1": "eWpZHp+uWAazxPugLLYfYP8rRKg=", "path": "google.golang.org/api/oauth2/v2", - "revision": "f5c49d98d21cc639f7ba8c7e4b127e88ee2af7ed", - "revisionTime": "2018-10-03T00:07:58Z" + "revision": "625cd1887957946515db468ce519bb71fa31fc7f", + "revisionTime": "2018-10-12T22:54:34Z" }, { "checksumSHA1": "UbkxTZQanhUNhfpC0TN8vYFIsho=", "path": "google.golang.org/api/option", - "revision": "f5c49d98d21cc639f7ba8c7e4b127e88ee2af7ed", - "revisionTime": "2018-10-03T00:07:58Z" + "revision": "625cd1887957946515db468ce519bb71fa31fc7f", + "revisionTime": "2018-10-12T22:54:34Z" }, { "checksumSHA1": "uAQGaBzKQeNiJpWP5rc7nAYTR04=", "path": "google.golang.org/api/storage/v1", - "revision": "f5c49d98d21cc639f7ba8c7e4b127e88ee2af7ed", - "revisionTime": "2018-10-03T00:07:58Z" + "revision": "625cd1887957946515db468ce519bb71fa31fc7f", + "revisionTime": "2018-10-12T22:54:34Z" }, { - "checksumSHA1": "5liLu0bBeBRE6tzLr3Al7usDJ+U=", + "checksumSHA1": "HhOh8QNToR6IhjDP/wlAh1wT7aA=", "path": "google.golang.org/api/transport/grpc", - "revision": "f5c49d98d21cc639f7ba8c7e4b127e88ee2af7ed", - "revisionTime": "2018-10-03T00:07:58Z" + "revision": "625cd1887957946515db468ce519bb71fa31fc7f", + "revisionTime": "2018-10-12T22:54:34Z" }, { "checksumSHA1": "qROxv/rFbX8MnEjpfvxEIoJh+UA=", "path": "google.golang.org/api/transport/http", - "revision": "f5c49d98d21cc639f7ba8c7e4b127e88ee2af7ed", - "revisionTime": "2018-10-03T00:07:58Z" + "revision": "625cd1887957946515db468ce519bb71fa31fc7f", + "revisionTime": "2018-10-12T22:54:34Z" }, { "checksumSHA1": "sJcKCvjPtoysqyelsB2CQzC5oQI=", "path": "google.golang.org/api/transport/http/internal/propagation", - "revision": "f5c49d98d21cc639f7ba8c7e4b127e88ee2af7ed", - "revisionTime": "2018-10-03T00:07:58Z" + "revision": "625cd1887957946515db468ce519bb71fa31fc7f", + "revisionTime": "2018-10-12T22:54:34Z" }, { "checksumSHA1": "fEcQGfCu8agHVWKVfVfDLAIKOf0=", @@ -2899,214 +2905,214 @@ "revisionTime": "2018-09-18T20:26:59Z" }, { - "checksumSHA1": "JUkKqVGWtx2JqAO9CX+vst3tAwA=", + "checksumSHA1": "vZ5PYWH+FgRxI2yMzIOtdonWH9w=", "path": "google.golang.org/genproto/googleapis/api/annotations", - "revision": "c7e5094acea1ca1b899e2259d80a6b0f882f81f8", - "revisionTime": "2018-09-28T22:33:49Z" + "revision": "af9cb2a35e7f169ec875002c1829c9b315cddc04", + "revisionTime": "2018-10-04T00:54:41Z" }, { - "checksumSHA1": "QqTLNfg/l81YFvtlFSja1k+pbLY=", + "checksumSHA1": "D2/OkmqUSm74ngHV/7GVwNGgNjE=", "path": "google.golang.org/genproto/googleapis/iam/v1", - "revision": "c7e5094acea1ca1b899e2259d80a6b0f882f81f8", - "revisionTime": "2018-09-28T22:33:49Z" + "revision": "af9cb2a35e7f169ec875002c1829c9b315cddc04", + "revisionTime": "2018-10-04T00:54:41Z" }, { - "checksumSHA1": "Pbu93QLSWFuaYv9UsXzczPKF5ek=", + "checksumSHA1": "YlTC3MG+u3IHAr1iHJDsGjKAshU=", "path": "google.golang.org/genproto/googleapis/rpc/code", - "revision": "c7e5094acea1ca1b899e2259d80a6b0f882f81f8", - "revisionTime": "2018-09-28T22:33:49Z" + "revision": "af9cb2a35e7f169ec875002c1829c9b315cddc04", + "revisionTime": "2018-10-04T00:54:41Z" }, { - "checksumSHA1": "vSYht5ceTKyCTCyA3D/n2rQ9aek=", + "checksumSHA1": "CNGv/r61wb6XyWWsCKNnBhq6Ab8=", "path": "google.golang.org/genproto/googleapis/rpc/errdetails", - "revision": "c7e5094acea1ca1b899e2259d80a6b0f882f81f8", - "revisionTime": "2018-09-28T22:33:49Z" + "revision": "af9cb2a35e7f169ec875002c1829c9b315cddc04", + "revisionTime": "2018-10-04T00:54:41Z" }, { - "checksumSHA1": "vQIg9/iv/oEKsIDz42NOrvSG/Z4=", + "checksumSHA1": "MgYFT27I9gfAtSVBpGVqkCYOj3U=", "path": "google.golang.org/genproto/googleapis/rpc/status", - "revision": "c7e5094acea1ca1b899e2259d80a6b0f882f81f8", - "revisionTime": "2018-09-28T22:33:49Z" + "revision": "af9cb2a35e7f169ec875002c1829c9b315cddc04", + "revisionTime": "2018-10-04T00:54:41Z" }, { - "checksumSHA1": "gd9r8O+6jKsJOg9voXKuKKcGuOM=", + "checksumSHA1": "YWYZseFIYe2DR9AQaaAP5TELIsI=", "path": "google.golang.org/genproto/googleapis/spanner/v1", - "revision": "c7e5094acea1ca1b899e2259d80a6b0f882f81f8", - "revisionTime": "2018-09-28T22:33:49Z" + "revision": "af9cb2a35e7f169ec875002c1829c9b315cddc04", + "revisionTime": "2018-10-04T00:54:41Z" }, { - "checksumSHA1": "wnDe3e3IOnmWptx5J+yoOoEYcME=", + "checksumSHA1": "5cU6eHFzsIKIyj/oKC28aVBEyMs=", "path": "google.golang.org/grpc", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { - "checksumSHA1": "E0pC3ZRxNttVoj7fv6XC8PLNAp8=", + "checksumSHA1": "9KEKKMRAdFnz2sMBXbb33ZLS8Oo=", "path": "google.golang.org/grpc/balancer", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "lw+L836hLeH8+//le+C+ycddCCU=", "path": "google.golang.org/grpc/balancer/base", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "DJ1AtOk4Pu7bqtUMob95Hw8HPNw=", "path": "google.golang.org/grpc/balancer/roundrobin", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "R3tuACGAPyK4lr+oSNt1saUzC0M=", "path": "google.golang.org/grpc/codes", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "XH2WYcDNwVO47zYShREJjcYXm0Y=", "path": "google.golang.org/grpc/connectivity", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "5r6NIQY1c3NjwLtxUOo/BcUOqFo=", "path": "google.golang.org/grpc/credentials", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "QbufP1o0bXrtd5XecqdRCK/Vl0M=", "path": "google.golang.org/grpc/credentials/oauth", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "cfLb+pzWB+Glwp82rgfcEST1mv8=", "path": "google.golang.org/grpc/encoding", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "LKKkn7EYA+Do9Qwb2/SUKLFNxoo=", "path": "google.golang.org/grpc/encoding/proto", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "ZPPSFisPDz2ANO4FBZIft+fRxyk=", "path": "google.golang.org/grpc/grpclog", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { - "checksumSHA1": "5f4iDmBsp267R75UdXD6s01l+hk=", + "checksumSHA1": "1w2nqVXgpqKg3mHXsllP4t15eO8=", "path": "google.golang.org/grpc/health", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "KfgIKMqGJ8FdFbWlGDsnmrCY7eE=", "path": "google.golang.org/grpc/health/grpc_health_v1", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "LCor0tzo+6c0KVpfkpY7P65JFGo=", "path": "google.golang.org/grpc/internal", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "uDJA7QK2iGnEwbd9TPqkLaM+xuU=", "path": "google.golang.org/grpc/internal/backoff", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { - "checksumSHA1": "KCnkjqO49gNTv2JmnBdJqxQ7JT0=", + "checksumSHA1": "V6eyqZJfYh+cX+I/AxPVjkQLjTM=", "path": "google.golang.org/grpc/internal/channelz", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "5dFUCEaPjKwza9kwKqgljp8ckU4=", "path": "google.golang.org/grpc/internal/envconfig", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "70gndc/uHwyAl3D45zqp7vyHWlo=", "path": "google.golang.org/grpc/internal/grpcrand", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { - "checksumSHA1": "Knc8CAFB/mRN3o3WsalpBrIYsdU=", + "checksumSHA1": "2ktnBLlL5W/PpaYOTsAmPPETG20=", "path": "google.golang.org/grpc/internal/transport", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "350+v+N+AuknxomqjND19nR969g=", "path": "google.golang.org/grpc/keepalive", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "OjIAi5AzqlQ7kLtdAyjvdgMf6hc=", "path": "google.golang.org/grpc/metadata", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "VvGBoawND0urmYDy11FT+U1IHtU=", "path": "google.golang.org/grpc/naming", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "n5EgDdBqFMa2KQFhtl+FF/4gIFo=", "path": "google.golang.org/grpc/peer", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "GEq6wwE1qWLmkaM02SjxBmmnHDo=", "path": "google.golang.org/grpc/resolver", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "grHAHa6Fi3WBsXJpmlEOlRbWWVg=", "path": "google.golang.org/grpc/resolver/dns", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "zs9M4xE8Lyg4wvuYvR00XoBxmuw=", "path": "google.golang.org/grpc/resolver/passthrough", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "YclPgme2gT3S0hTkHVdE1zAxJdo=", "path": "google.golang.org/grpc/stats", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "hFyBO5vgsMamKhUOSyPCqROk1vo=", "path": "google.golang.org/grpc/status", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "qvArRhlrww5WvRmbyMF2mUfbJew=", "path": "google.golang.org/grpc/tap", - "revision": "431e4028c50657d346e072d44889191b4a717d38", - "revisionTime": "2018-10-03T01:26:40Z" + "revision": "1da8e51941b9a2c8f4bc3271acc30393c29e9cc0", + "revisionTime": "2018-10-15T21:28:12Z" }, { "checksumSHA1": "xsaHqy6/sonLV6xIxTNh4FfkWbU=", @@ -3193,154 +3199,154 @@ "revisionTime": "2018-03-28T19:50:20Z" }, { - "checksumSHA1": "hC8w4Dj+ThhnfQaw4+vzztonBTY=", + "checksumSHA1": "izopgFdSaf2cSxUY8i//YxpvDjk=", "path": "k8s.io/api/authentication/v1", - "revision": "a191abe0b71e00ce4cde58af8002aa4c1a8bb068", - "revisionTime": "2018-09-25T13:13:23Z" + "revision": "bbf5c193d86c33256702fc781833463a7bca7849", + "revisionTime": "2018-10-14T04:47:08Z" }, { "checksumSHA1": "eD3XRvVzKmBUr4mFMD9hAMMuLSE=", "path": "k8s.io/apimachinery/pkg/api/errors", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { - "checksumSHA1": "fIjxQX7kAy+JvcmSKJ6pcYQt17o=", + "checksumSHA1": "3HmyxPj391ek2E2x9rL9zMbeCKA=", "path": "k8s.io/apimachinery/pkg/api/resource", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { - "checksumSHA1": "HMeyyfulw+kOjgT5RNnimUNHm54=", + "checksumSHA1": "d/GUWW4B/VQX1aJGtv5pOx23jl0=", "path": "k8s.io/apimachinery/pkg/apis/meta/v1", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { "checksumSHA1": "I6+J//YuSZdxncAuc/kar8G1jkY=", "path": "k8s.io/apimachinery/pkg/conversion", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { "checksumSHA1": "lZgVHIUDKFlQKJBYQVWqED5sDnE=", "path": "k8s.io/apimachinery/pkg/conversion/queryparams", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { "checksumSHA1": "zgYf1VhDcfCpouwRgF5Zodz+g6M=", "path": "k8s.io/apimachinery/pkg/fields", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { "checksumSHA1": "rQgW7h6N5U40wFE9/zgmjwOrf8w=", "path": "k8s.io/apimachinery/pkg/labels", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { - "checksumSHA1": "UGZdS4vnY1JjxoonKgMqRf/Ljbs=", + "checksumSHA1": "JIpwe6Bv1V3ZtLBRdAYWmbYvL7E=", "path": "k8s.io/apimachinery/pkg/runtime", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { - "checksumSHA1": "/Kc4utHTK2ej1EEbo/WWTKjGGw0=", + "checksumSHA1": "+smcRTuloHxJVUatJ13qZ4NoUOk=", "path": "k8s.io/apimachinery/pkg/runtime/schema", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { "checksumSHA1": "p9Wv7xurZXAW0jYL/SLNPbiUjaA=", "path": "k8s.io/apimachinery/pkg/selection", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { "checksumSHA1": "SwEleXXE22RGlg0t7wirGDuisO4=", "path": "k8s.io/apimachinery/pkg/types", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { "checksumSHA1": "hHYW7OWXFMbRVotl2830pa4kO+Y=", "path": "k8s.io/apimachinery/pkg/util/errors", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { - "checksumSHA1": "EtANF+MRp4PfGxCMQHa1SoOtfno=", + "checksumSHA1": "oTqOscZlw40TtJEHP0OWjTeIszA=", "path": "k8s.io/apimachinery/pkg/util/intstr", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { "checksumSHA1": "NaIzEbjlhEMikewzRXivi2r0xyM=", "path": "k8s.io/apimachinery/pkg/util/json", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { "checksumSHA1": "y1dxj1ipJgt2PRXXSXDmXrSka6A=", "path": "k8s.io/apimachinery/pkg/util/naming", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { "checksumSHA1": "8M1X3xMlH0bDsm18RJZULuzWPWw=", "path": "k8s.io/apimachinery/pkg/util/net", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { "checksumSHA1": "keWEyQHGKGkDo2SARgphbGgN608=", "path": "k8s.io/apimachinery/pkg/util/runtime", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { "checksumSHA1": "ozEwMzA48zwMyQ50SwNKSM852U4=", "path": "k8s.io/apimachinery/pkg/util/sets", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { "checksumSHA1": "xE6oX9Tv1pEhOSa/U9nMIaaNDPs=", "path": "k8s.io/apimachinery/pkg/util/validation", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { "checksumSHA1": "T+DDCd+Y07tEOHiPNt2zzXFq6Tw=", "path": "k8s.io/apimachinery/pkg/util/validation/field", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { "checksumSHA1": "W0veWY2Vj1W0w8rC0vPtl5Ey8nU=", "path": "k8s.io/apimachinery/pkg/watch", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { "checksumSHA1": "9sFA+EjKrjpmK4OofQH0p0Rowfg=", "path": "k8s.io/apimachinery/third_party/forked/golang/reflect", - "revision": "ed5594dcf47bc331577d2ffbcd24c4b1d20d9a6d", - "revisionTime": "2018-10-03T11:43:59Z" + "revision": "60666be32c5de527b69dabe8e4400b4f0aa897de", + "revisionTime": "2018-10-15T20:53:01Z" }, { "checksumSHA1": "mDGMF5YdxQj+yJfYKEOhZ+IwRQQ=", "path": "layeh.com/radius", - "revision": "86e590152a3b227910515879c6d980c324b4c737", - "revisionTime": "2018-09-27T12:16:27Z" + "revision": "3e49d211563650cdc0a7848a2e315d1737cac01c", + "revisionTime": "2018-10-13T12:38:17Z" }, { "checksumSHA1": "PHUjek3Jt4v+AlPyMJdFlVBa40k=", "path": "layeh.com/radius/rfc2865", - "revision": "86e590152a3b227910515879c6d980c324b4c737", - "revisionTime": "2018-09-27T12:16:27Z" + "revision": "3e49d211563650cdc0a7848a2e315d1737cac01c", + "revisionTime": "2018-10-13T12:38:17Z" } ], "rootPath": "github.com/hashicorp/vault" diff --git a/website/source/guides/operations/replication.html.md b/website/source/guides/operations/replication.html.md index 438500b32..de7b5d82a 100644 --- a/website/source/guides/operations/replication.html.md +++ b/website/source/guides/operations/replication.html.md @@ -77,7 +77,16 @@ remove it from rotation (e.g. if using Consul for service discovery), but if a standby does not attempt taking over it will throw errors. We hope to make this workflow better in a future update. -### Dev-Mode Root Tokens +### Secondary Tokens + +On a production system, after a secondary is activated, the enabled +auth methods should be used to get tokens with appropriate policies, +as policies and auth method configuration are replicated. + +The generate-root command can also be used to generate a root token local to +the secondary cluster. After the secondary is activated, it will need to use the unseal or recovery keys from the Primary when generating a new root token or performing other commands that require unseal or recovery keys. + +#### Dev-Mode Root Tokens To ease development and testing, when both the primary and secondary are running in development mode, the initial root token created by the primary @@ -86,13 +95,6 @@ populated into the secondary upon activation. This allows a developer to keep a consistent `~/.vault-token` file or `VAULT_TOKEN` environment variable when working with both clusters. -On a production system, after a secondary is activated, the enabled -auth methods should be used to get tokens with appropriate policies, -as policies and auth method configuration are replicated. - -The generate-root command can also be used to generate a root token local to -the secondary cluster. - ## Managing Vault Performance Replication Vault’s performance replication model is intended to allow horizontally scaling Vault’s