Update Deps (#5454)

This commit is contained in:
Brian Kassouf 2018-10-03 09:55:26 -07:00 committed by GitHub
parent 2e27e96441
commit 9307ba4b0b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1141 changed files with 135505 additions and 41677 deletions

View File

@ -69,9 +69,12 @@ func validDatabaseName(db string) error {
// client is safe to use concurrently, except for its Close method.
type Client struct {
// rr must be accessed through atomic operations.
rr uint32
conns []*grpc.ClientConn
clients []sppb.SpannerClient
rr uint32
// TODO(deklerk): we should not keep multiple ClientConns / SpannerClients. Instead, we should
// have a single ClientConn that has many connections: https://github.com/googleapis/google-api-go-client/blob/003c13302b3ea5ae44344459ba080364bd46155f/internal/pool.go
conns []*grpc.ClientConn
clients []sppb.SpannerClient
database string
// Metadata to be sent with each request.
md metadata.MD

View File

@ -19,6 +19,8 @@ package spanner
import (
"fmt"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/trace"
"golang.org/x/net/context"
)
@ -57,3 +59,25 @@ func tracePrintf(ctx context.Context, attrMap map[string]interface{}, format str
}
trace.FromContext(ctx).Annotatef(attrs, format, args...)
}
const statsPrefix = "cloud.google.com/go/spanner/"
func recordStat(ctx context.Context, m *stats.Int64Measure, n int64) {
stats.Record(ctx, m.M(n))
}
var (
// OpenSessionCount is a measure of the number of sessions currently opened.
// It is EXPERIMENTAL and subject to change or removal without notice.
OpenSessionCount = stats.Int64(statsPrefix+"open_session_count", "Number of sessions currently opened",
stats.UnitDimensionless)
// OpenSessionCountView is a view of the last value of OpenSessionCount.
// It is EXPERIMENTAL and subject to change or removal without notice.
OpenSessionCountView = &view.View{
Name: OpenSessionCount.Name(),
Description: OpenSessionCount.Description(),
Measure: OpenSessionCount,
Aggregation: view.LastValue(),
}
)

View File

@ -29,3 +29,11 @@ func traceEndSpan(context.Context, error) {
func tracePrintf(context.Context, map[string]interface{}, string, ...interface{}) {
}
type dummy struct{}
// Not supported below Go 1.8.
var OpenSessionCount dummy
func recordStat(context.Context, dummy, int64) {
}

View File

@ -290,8 +290,8 @@ type resumableStreamDecoder struct {
// beginning at the restartToken if non-nil.
func newResumableStreamDecoder(ctx context.Context, rpc func(ct context.Context, restartToken []byte) (streamingReceiver, error)) *resumableStreamDecoder {
return &resumableStreamDecoder{
ctx: ctx,
rpc: rpc,
ctx: ctx,
rpc: rpc,
maxBytesBetweenResumeTokens: atomic.LoadInt32(&maxBytesBetweenResumeTokens),
backoff: defaultBackoff,
}

View File

@ -71,20 +71,21 @@ func isErrorRST(err error) bool {
return false
}
// isErrorUnexpectedEOF returns true if error is generated by gRPC layer
// receiving io.EOF unexpectedly.
// isErrorUnexpectedEOF returns true if error is generated by gRPC layer receiving io.EOF unexpectedly.
func isErrorUnexpectedEOF(err error) bool {
if err == nil {
return false
}
// Unexpected EOF is a transport layer issue that could be recovered by
// retries. The most likely scenario is a flaky RecvMsg() call due to
// network issues.
// For grpc version >= 1.14.0, the error code is Internal.
// (https://github.com/grpc/grpc-go/releases/tag/v1.14.0)
if ErrCode(err) == codes.Internal && strings.Contains(ErrDesc(err), "unexpected EOF") {
return true
}
// For grpc version < 1.14.0, the error code in Unknown.
if ErrCode(err) == codes.Unknown && strings.Contains(ErrDesc(err), "unexpected EOF") {
// Unexpected EOF is an transport layer issue that
// could be recovered by retries. The most likely
// scenario is a flaky RecvMsg() call due to network
// issues.
// TODO: once gRPC is able to categorize
// this as retryable error, we should stop parsing the
// error message here.
return true
}
return false

View File

@ -451,6 +451,7 @@ func (p *sessionPool) createSession(ctx context.Context) (*session, error) {
if !done {
// Session creation failed, give budget back.
p.numOpened--
recordStat(ctx, OpenSessionCount, int64(p.numOpened))
}
p.createReqs--
// Notify other waiters blocking on session creation.
@ -551,6 +552,7 @@ func (p *sessionPool) take(ctx context.Context) (*sessionHandle, error) {
}
// Take budget before the actual session creation.
p.numOpened++
recordStat(ctx, OpenSessionCount, int64(p.numOpened))
p.createReqs++
p.mu.Unlock()
if s, err = p.createSession(ctx); err != nil {
@ -613,6 +615,7 @@ func (p *sessionPool) takeWriteSession(ctx context.Context) (*sessionHandle, err
// Take budget before the actual session creation.
p.numOpened++
recordStat(ctx, OpenSessionCount, int64(p.numOpened))
p.createReqs++
p.mu.Unlock()
if s, err = p.createSession(ctx); err != nil {
@ -673,6 +676,7 @@ func (p *sessionPool) remove(s *session, isExpire bool) bool {
if s.invalidate() {
// Decrease the number of opened sessions.
p.numOpened--
recordStat(context.Background(), OpenSessionCount, int64(p.numOpened))
// Broadcast that a session has been destroyed.
close(p.mayGetSession)
p.mayGetSession = make(chan struct{})
@ -966,6 +970,7 @@ func (hc *healthChecker) maintainer() {
break
}
p.numOpened++
recordStat(ctx, OpenSessionCount, int64(p.numOpened))
p.createReqs++
shouldPrepareWrite := p.shouldPrepareWrite()
p.mu.Unlock()

View File

@ -169,6 +169,7 @@ func (t *txReadOnly) QueryWithStats(ctx context.Context, statement Statement) *R
// AnalyzeQuery returns the query plan for statement.
func (t *txReadOnly) AnalyzeQuery(ctx context.Context, statement Statement) (*sppb.QueryPlan, error) {
iter := t.query(ctx, statement, sppb.ExecuteSqlRequest_PLAN)
defer iter.Stop()
for {
_, err := iter.Next()
if err == iterator.Done {

View File

@ -48,10 +48,21 @@ const (
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
)
// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket.
// ACLRule represents a grant for a role to an entity (user, group or team) for a
// Google Cloud Storage object or bucket.
type ACLRule struct {
Entity ACLEntity
Role ACLRole
Entity ACLEntity
EntityID string
Role ACLRole
Domain string
Email string
ProjectTeam *ProjectTeam
}
// ProjectTeam is the project team associated with the entity, if any.
type ProjectTeam struct {
ProjectNumber string
Team string
}
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
@ -77,7 +88,7 @@ func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) {
return a.bucketDelete(ctx, entity)
}
// Set sets the permission level for the given entity.
// Set sets the role for the given entity.
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set")
defer func() { trace.EndSpan(ctx, err) }()
@ -117,7 +128,7 @@ func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
if err != nil {
return nil, err
}
return toACLRules(acls.Items), nil
return toObjectACLRules(acls.Items), nil
}
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
@ -140,12 +151,7 @@ func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
if err != nil {
return nil, err
}
r := make([]ACLRule, len(acls.Items))
for i, v := range acls.Items {
r[i].Entity = ACLEntity(v.Entity)
r[i].Role = ACLRole(v.Role)
}
return r, nil
return toBucketACLRules(acls.Items), nil
}
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
@ -167,15 +173,11 @@ func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRol
}
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
err := runWithRetry(ctx, func() error {
return runWithRetry(ctx, func() error {
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
a.configureCall(req, ctx)
return req.Do()
})
if err != nil {
return err
}
return nil
}
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
@ -190,7 +192,7 @@ func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
if err != nil {
return nil, err
}
return toACLRules(acls.Items), nil
return toObjectACLRules(acls.Items), nil
}
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
@ -225,9 +227,7 @@ func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
})
}
func (a *ACLHandle) configureCall(call interface {
Header() http.Header
}, ctx context.Context) {
func (a *ACLHandle) configureCall(call interface{ Header() http.Header }, ctx context.Context) {
vc := reflect.ValueOf(call)
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
if a.userProject != "" {
@ -236,10 +236,100 @@ func (a *ACLHandle) configureCall(call interface {
setClientHeader(call.Header())
}
func toACLRules(items []*raw.ObjectAccessControl) []ACLRule {
r := make([]ACLRule, 0, len(items))
func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)})
rs = append(rs, toObjectACLRule(item))
}
return rs
}
func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
rs = append(rs, toBucketACLRule(item))
}
return rs
}
func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule {
return ACLRule{
Entity: ACLEntity(a.Entity),
EntityID: a.EntityId,
Role: ACLRole(a.Role),
Domain: a.Domain,
Email: a.Email,
ProjectTeam: toObjectProjectTeam(a.ProjectTeam),
}
}
func toBucketACLRule(a *raw.BucketAccessControl) ACLRule {
return ACLRule{
Entity: ACLEntity(a.Entity),
EntityID: a.EntityId,
Role: ACLRole(a.Role),
Domain: a.Domain,
Email: a.Email,
ProjectTeam: toBucketProjectTeam(a.ProjectTeam),
}
}
func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl {
if len(rules) == 0 {
return nil
}
r := make([]*raw.ObjectAccessControl, 0, len(rules))
for _, rule := range rules {
r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary
}
return r
}
func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl {
if len(rules) == 0 {
return nil
}
r := make([]*raw.BucketAccessControl, 0, len(rules))
for _, rule := range rules {
r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary
}
return r
}
func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl {
return &raw.BucketAccessControl{
Bucket: bucket,
Entity: string(r.Entity),
Role: string(r.Role),
// The other fields are not settable.
}
}
func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl {
return &raw.ObjectAccessControl{
Bucket: bucket,
Entity: string(r.Entity),
Role: string(r.Role),
// The other fields are not settable.
}
}
func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam {
if p == nil {
return nil
}
return &ProjectTeam{
ProjectNumber: p.ProjectNumber,
Team: p.Team,
}
}
func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam {
if p == nil {
return nil
}
return &ProjectTeam{
ProjectNumber: p.ProjectNumber,
Team: p.Team,
}
}

View File

@ -82,6 +82,12 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
}
req := b.c.raw.Buckets.Insert(projectID, bkt)
setClientHeader(req.Header())
if attrs != nil && attrs.PredefinedACL != "" {
req.PredefinedAcl(attrs.PredefinedACL)
}
if attrs != nil && attrs.PredefinedDefaultObjectACL != "" {
req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL)
}
return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
}
@ -188,6 +194,12 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (
if err != nil {
return nil, err
}
if uattrs.PredefinedACL != "" {
req.PredefinedAcl(uattrs.PredefinedACL)
}
if uattrs.PredefinedDefaultObjectACL != "" {
req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL)
}
// TODO(jba): retry iff metagen is set?
rb, err := req.Context(ctx).Do()
if err != nil {
@ -223,6 +235,24 @@ type BucketAttrs struct {
// apply to new objects when no object ACL is provided.
DefaultObjectACL []ACLRule
// DefaultEventBasedHold is the default value for event-based hold on
// newly created objects in this bucket. It defaults to false.
DefaultEventBasedHold bool
// If not empty, applies a predefined set of access controls. It should be set
// only when creating a bucket.
// It is always empty for BucketAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
// for valid values.
PredefinedACL string
// If not empty, applies a predefined set of default object access controls.
// It should be set only when creating a bucket.
// It is always empty for BucketAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
// for valid values.
PredefinedDefaultObjectACL string
// Location is the location of the bucket. It defaults to "US".
Location string
@ -272,6 +302,12 @@ type BucketAttrs struct {
// The encryption configuration used by default for newly inserted objects.
Encryption *BucketEncryption
// The logging configuration.
Logging *BucketLogging
// The website configuration.
Website *BucketWebsite
}
// Lifecycle is the lifecycle configuration for objects in the bucket.
@ -302,6 +338,11 @@ type RetentionPolicy struct {
// EffectiveTime is the time from which the policy was enforced and
// effective. This field is read-only.
EffectiveTime time.Time
// IsLocked describes whether the bucket is locked. Once locked, an
// object retention policy cannot be modified.
// This field is read-only.
IsLocked bool
}
const (
@ -389,6 +430,34 @@ type LifecycleCondition struct {
NumNewerVersions int64
}
// BucketLogging holds the bucket's logging configuration, which defines the
// destination bucket and optional name prefix for the current bucket's
// logs.
type BucketLogging struct {
// The destination bucket where the current bucket's logs
// should be placed.
LogBucket string
// A prefix for log object names.
LogObjectPrefix string
}
// Website holds the bucket's website configuration, controlling how the
// service behaves when accessing bucket contents as a web site. See
// https://cloud.google.com/storage/docs/static-website for more information.
type BucketWebsite struct {
// If the requested object path is missing, the service will ensure the path has
// a trailing '/', append this suffix, and attempt to retrieve the resulting
// object. This allows the creation of index.html objects to represent directory
// pages.
MainPageSuffix string
// If the requested object path is missing, and any mainPageSuffix object is
// missing, if applicable, the service will return the named object from this
// bucket as the content for a 404 Not Found result.
NotFoundPage string
}
func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
if b == nil {
return nil, nil
@ -397,52 +466,29 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
if err != nil {
return nil, err
}
bucket := &BucketAttrs{
Name: b.Name,
Location: b.Location,
MetaGeneration: b.Metageneration,
StorageClass: b.StorageClass,
Created: convertTime(b.TimeCreated),
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
Labels: b.Labels,
RequesterPays: b.Billing != nil && b.Billing.RequesterPays,
Lifecycle: toLifecycle(b.Lifecycle),
RetentionPolicy: rp,
CORS: toCORS(b.Cors),
Encryption: toBucketEncryption(b.Encryption),
}
acl := make([]ACLRule, len(b.Acl))
for i, rule := range b.Acl {
acl[i] = ACLRule{
Entity: ACLEntity(rule.Entity),
Role: ACLRole(rule.Role),
}
}
bucket.ACL = acl
objACL := make([]ACLRule, len(b.DefaultObjectAcl))
for i, rule := range b.DefaultObjectAcl {
objACL[i] = ACLRule{
Entity: ACLEntity(rule.Entity),
Role: ACLRole(rule.Role),
}
}
bucket.DefaultObjectACL = objACL
return bucket, nil
return &BucketAttrs{
Name: b.Name,
Location: b.Location,
MetaGeneration: b.Metageneration,
DefaultEventBasedHold: b.DefaultEventBasedHold,
StorageClass: b.StorageClass,
Created: convertTime(b.TimeCreated),
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
ACL: toBucketACLRules(b.Acl),
DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl),
Labels: b.Labels,
RequesterPays: b.Billing != nil && b.Billing.RequesterPays,
Lifecycle: toLifecycle(b.Lifecycle),
RetentionPolicy: rp,
CORS: toCORS(b.Cors),
Encryption: toBucketEncryption(b.Encryption),
Logging: toBucketLogging(b.Logging),
Website: toBucketWebsite(b.Website),
}, nil
}
// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
func (b *BucketAttrs) toRawBucket() *raw.Bucket {
var acl []*raw.BucketAccessControl
if len(b.ACL) > 0 {
acl = make([]*raw.BucketAccessControl, len(b.ACL))
for i, rule := range b.ACL {
acl[i] = &raw.BucketAccessControl{
Entity: string(rule.Entity),
Role: string(rule.Role),
}
}
}
dACL := toRawObjectACL(b.DefaultObjectACL)
// Copy label map.
var labels map[string]string
if len(b.Labels) > 0 {
@ -464,10 +510,10 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
}
return &raw.Bucket{
Name: b.Name,
DefaultObjectAcl: dACL,
Location: b.Location,
StorageClass: b.StorageClass,
Acl: acl,
Acl: toRawBucketACL(b.ACL),
DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL),
Versioning: v,
Labels: labels,
Billing: bb,
@ -475,6 +521,8 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(),
Cors: toRawCORS(b.CORS),
Encryption: b.Encryption.toRawBucketEncryption(),
Logging: b.Logging.toRawBucketLogging(),
Website: b.Website.toRawBucketWebsite(),
}
}
@ -516,6 +564,10 @@ type BucketAttrsToUpdate struct {
// If set, updates whether the bucket is a Requester Pays bucket.
RequesterPays optional.Bool
// DefaultEventBasedHold is the default value for event-based hold on
// newly created objects in this bucket.
DefaultEventBasedHold optional.Bool
// If set, updates the retention policy of the bucket. Using
// RetentionPolicy.RetentionPeriod = 0 will delete the existing policy.
//
@ -536,6 +588,20 @@ type BucketAttrsToUpdate struct {
// If set, replaces the lifecycle configuration of the bucket.
Lifecycle *Lifecycle
// If set, replaces the logging configuration of the bucket.
Logging *BucketLogging
// If set, replaces the website configuration of the bucket.
Website *BucketWebsite
// If not empty, applies a predefined set of access controls.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch.
PredefinedACL string
// If not empty, applies a predefined set of default object access controls.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch.
PredefinedDefaultObjectACL string
setLabels map[string]string
deleteLabels map[string]bool
}
@ -564,6 +630,10 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
rb.Cors = toRawCORS(ua.CORS)
rb.ForceSendFields = append(rb.ForceSendFields, "Cors")
}
if ua.DefaultEventBasedHold != nil {
rb.DefaultEventBasedHold = optional.ToBool(ua.DefaultEventBasedHold)
rb.ForceSendFields = append(rb.ForceSendFields, "DefaultEventBasedHold")
}
if ua.RetentionPolicy != nil {
if ua.RetentionPolicy.RetentionPeriod == 0 {
rb.NullFields = append(rb.NullFields, "RetentionPolicy")
@ -595,6 +665,32 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
if ua.Lifecycle != nil {
rb.Lifecycle = toRawLifecycle(*ua.Lifecycle)
}
if ua.Logging != nil {
if *ua.Logging == (BucketLogging{}) {
rb.NullFields = append(rb.NullFields, "Logging")
rb.Logging = nil
} else {
rb.Logging = ua.Logging.toRawBucketLogging()
}
}
if ua.Website != nil {
if *ua.Website == (BucketWebsite{}) {
rb.NullFields = append(rb.NullFields, "Website")
rb.Website = nil
} else {
rb.Website = ua.Website.toRawBucketWebsite()
}
}
if ua.PredefinedACL != "" {
// Clear ACL or the call will fail.
rb.Acl = nil
rb.ForceSendFields = append(rb.ForceSendFields, "Acl")
}
if ua.PredefinedDefaultObjectACL != "" {
// Clear ACLs or the call will fail.
rb.DefaultObjectAcl = nil
rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl")
}
if ua.setLabels != nil || ua.deleteLabels != nil {
rb.Labels = map[string]string{}
for k, v := range ua.setLabels {
@ -721,6 +817,7 @@ func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error)
return &RetentionPolicy{
RetentionPeriod: time.Duration(rp.RetentionPeriod) * time.Second,
EffectiveTime: t,
IsLocked: rp.IsLocked,
}, nil
}
@ -836,6 +933,46 @@ func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption {
return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName}
}
func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging {
if b == nil {
return nil
}
return &raw.BucketLogging{
LogBucket: b.LogBucket,
LogObjectPrefix: b.LogObjectPrefix,
}
}
func toBucketLogging(b *raw.BucketLogging) *BucketLogging {
if b == nil {
return nil
}
return &BucketLogging{
LogBucket: b.LogBucket,
LogObjectPrefix: b.LogObjectPrefix,
}
}
func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite {
if w == nil {
return nil
}
return &raw.BucketWebsite{
MainPageSuffix: w.MainPageSuffix,
NotFoundPage: w.NotFoundPage,
}
}
func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite {
if w == nil {
return nil
}
return &BucketWebsite{
MainPageSuffix: w.MainPageSuffix,
NotFoundPage: w.NotFoundPage,
}
}
// Objects returns an iterator over the objects in the bucket that match the Query q.
// If q is nil, no filtering is done.
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {

View File

@ -115,6 +115,9 @@ func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.Rewr
if c.DestinationKMSKeyName != "" {
call.DestinationKmsKeyName(c.DestinationKMSKeyName)
}
if c.PredefinedACL != "" {
call.DestinationPredefinedAcl(c.PredefinedACL)
}
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
return nil, err
}
@ -209,6 +212,9 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
if c.dst.userProject != "" {
call.UserProject(c.dst.userProject)
}
if c.PredefinedACL != "" {
call.DestinationPredefinedAcl(c.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
return nil, err
}

View File

@ -22,9 +22,11 @@ https://cloud.google.com/storage/docs.
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
connection pooling and similar aspects of this package.
All of the methods of this package use exponential backoff to retry calls
that fail with certain errors, as described in
https://cloud.google.com/storage/docs/exponential-backoff.
All of the methods of this package use exponential backoff to retry calls that fail
with certain errors, as described in
https://cloud.google.com/storage/docs/exponential-backoff. Retrying continues
indefinitely unless the controlling context is canceled or the client is closed. See
context.WithTimeout and context.WithCancel.
Creating a Client
@ -161,5 +163,14 @@ SignedURL for details.
// TODO: Handle error.
}
fmt.Println(url)
Errors
Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error).
These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example:
if e, ok := err.(*googleapi.Error); ok {
if e.Code = 409 { ... }
}
*/
package storage // import "cloud.google.com/go/storage"

View File

@ -24,6 +24,8 @@ func shouldRetry(err error) bool {
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
case interface{ Temporary() bool }:
return e.Temporary()
default:
return false
}

View File

@ -34,6 +34,8 @@ func shouldRetry(err error) bool {
// Unfortunately the error type is unexported, so we resort to string
// matching.
return strings.Contains(e.Error(), "REFUSED_STREAM")
case interface{ Temporary() bool }:
return e.Temporary()
default:
return false
}

View File

@ -25,6 +25,7 @@ import (
"reflect"
"strconv"
"strings"
"time"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
@ -33,6 +34,39 @@ import (
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
// ReaderObjectAttrs are attributes about the object being read. These are populated
// during the New call. This struct only holds a subset of object attributes: to
// get the full set of attributes, use ObjectHandle.Attrs.
//
// Each field is read-only.
type ReaderObjectAttrs struct {
// Size is the length of the object's content.
Size int64
// ContentType is the MIME type of the object's content.
ContentType string
// ContentEncoding is the encoding of the object's content.
ContentEncoding string
// CacheControl specifies whether and for how long browser and Internet
// caches are allowed to cache your objects.
CacheControl string
// LastModified is the time that the object was last modified.
LastModified time.Time
// Generation is the generation number of the object's content.
Generation int64
// Metageneration is the version of the metadata for this object at
// this generation. This field is used for preconditions and for
// detecting changes in metadata. A metageneration number is only
// meaningful in the context of a particular generation of a
// particular object.
Metageneration int64
}
// NewReader creates a new Reader to read the contents of the
// object.
// ErrObjectNotExist will be returned if the object is not found.
@ -61,10 +95,9 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
}
}
u := &url.URL{
Scheme: "https",
Host: "storage.googleapis.com",
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
RawQuery: conditionsQuery(o.gen, o.conds),
Scheme: "https",
Host: "storage.googleapis.com",
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
}
verb := "GET"
if length == 0 {
@ -85,6 +118,8 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
return nil, err
}
gen := o.gen
// Define a function that initiates a Read with offset and length, assuming we
// have already read seen bytes.
reopen := func(seen int64) (*http.Response, error) {
@ -95,6 +130,8 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
// The end character isn't affected by how many bytes we've seen.
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1))
}
// We wait to assign conditions here because the generation number can change in between reopen() runs.
req.URL.RawQuery = conditionsQuery(gen, o.conds)
var res *http.Response
err = runWithRetry(ctx, func() error {
res, err = o.c.hc.Do(req)
@ -118,6 +155,15 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
res.Body.Close()
return errors.New("storage: partial request not satisfied")
}
// If a generation hasn't been specified, and this is the first response we get, let's record the
// generation. In future requests we'll use this generation as a precondition to avoid data races.
if gen < 0 && res.Header.Get("X-Goog-Generation") != "" {
gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64)
if err != nil {
return err
}
gen = gen64
}
return nil
})
if err != nil {
@ -168,16 +214,39 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
body.Close()
body = emptyBody
}
var metaGen int64
if res.Header.Get("X-Goog-Generation") != "" {
metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64)
if err != nil {
return nil, err
}
}
var lm time.Time
if res.Header.Get("Last-Modified") != "" {
lm, err = http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
return nil, err
}
}
attrs := ReaderObjectAttrs{
Size: size,
ContentType: res.Header.Get("Content-Type"),
ContentEncoding: res.Header.Get("Content-Encoding"),
CacheControl: res.Header.Get("Cache-Control"),
LastModified: lm,
Generation: gen,
Metageneration: metaGen,
}
return &Reader{
body: body,
size: size,
remain: remain,
contentType: res.Header.Get("Content-Type"),
contentEncoding: res.Header.Get("Content-Encoding"),
cacheControl: res.Header.Get("Cache-Control"),
wantCRC: crc,
checkCRC: checkCRC,
reopen: reopen,
Attrs: attrs,
body: body,
size: size,
remain: remain,
wantCRC: crc,
checkCRC: checkCRC,
reopen: reopen,
}, nil
}
@ -210,11 +279,9 @@ var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
type Reader struct {
Attrs ReaderObjectAttrs
body io.ReadCloser
seen, remain, size int64
contentType string
contentEncoding string
cacheControl string
checkCRC bool // should we check the CRC?
wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc
@ -278,8 +345,10 @@ func shouldRetryRead(err error) bool {
// Size returns the size of the object in bytes.
// The returned value is always the same and is not affected by
// calls to Read or Close.
//
// Deprecated: use Reader.Attrs.Size.
func (r *Reader) Size() int64 {
return r.size
return r.Attrs.Size
}
// Remain returns the number of bytes left to read, or -1 if unknown.
@ -288,16 +357,29 @@ func (r *Reader) Remain() int64 {
}
// ContentType returns the content type of the object.
//
// Deprecated: use Reader.Attrs.ContentType.
func (r *Reader) ContentType() string {
return r.contentType
return r.Attrs.ContentType
}
// ContentEncoding returns the content encoding of the object.
//
// Deprecated: use Reader.Attrs.ContentEncoding.
func (r *Reader) ContentEncoding() string {
return r.contentEncoding
return r.Attrs.ContentEncoding
}
// CacheControl returns the cache control of the object.
//
// Deprecated: use Reader.Attrs.CacheControl.
func (r *Reader) CacheControl() string {
return r.cacheControl
return r.Attrs.CacheControl
}
// LastModified returns the value of the Last-Modified header.
//
// Deprecated: use Reader.Attrs.LastModified.
func (r *Reader) LastModified() (time.Time, error) {
return r.Attrs.LastModified, nil
}

View File

@ -112,8 +112,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
//
// Close need not be called at program exit.
func (c *Client) Close() error {
// Set fields to nil so that subsequent uses
// will panic.
// Set fields to nil so that subsequent uses will panic.
c.hc = nil
c.raw = nil
return nil
@ -442,6 +441,14 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
attrs.CacheControl = optional.ToString(uattrs.CacheControl)
forceSendFields = append(forceSendFields, "CacheControl")
}
if uattrs.EventBasedHold != nil {
attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold)
forceSendFields = append(forceSendFields, "EventBasedHold")
}
if uattrs.TemporaryHold != nil {
attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold)
forceSendFields = append(forceSendFields, "TemporaryHold")
}
if uattrs.Metadata != nil {
attrs.Metadata = uattrs.Metadata
if len(attrs.Metadata) == 0 {
@ -467,6 +474,9 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
if o.userProject != "" {
call.UserProject(o.userProject)
}
if uattrs.PredefinedACL != "" {
call.PredefinedAcl(uattrs.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
return nil, err
}
@ -482,6 +492,16 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
return newObject(obj), nil
}
// BucketName returns the name of the bucket.
func (o *ObjectHandle) BucketName() string {
return o.bucket
}
// ObjectName returns the name of the object.
func (o *ObjectHandle) ObjectName() string {
return o.object
}
// ObjectAttrsToUpdate is used to update the attributes of an object.
// Only fields set to non-nil values will be updated.
// Set a field to its zero value to delete it.
@ -494,6 +514,8 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
// Metadata: map[string]string{},
// }
type ObjectAttrsToUpdate struct {
EventBasedHold optional.Bool
TemporaryHold optional.Bool
ContentType optional.String
ContentLanguage optional.String
ContentEncoding optional.String
@ -501,6 +523,10 @@ type ObjectAttrsToUpdate struct {
CacheControl optional.String
Metadata map[string]string // set to map[string]string{} to delete
ACL []ACLRule
// If not empty, applies a predefined set of access controls. ACL must be nil.
// See https://cloud.google.com/storage/docs/json_api/v1/objects/patch.
PredefinedACL string
}
// Delete deletes the single specified object.
@ -549,7 +575,8 @@ func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle {
// attribute is specified, the content type will be automatically sniffed
// using net/http.DetectContentType.
//
// It is the caller's responsibility to call Close when writing is done.
// It is the caller's responsibility to call Close when writing is done. To
// stop writing without saving the data, cancel the context.
func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer {
return &Writer{
ctx: ctx,
@ -595,34 +622,26 @@ func parseKey(key []byte) (*rsa.PrivateKey, error) {
return parsed, nil
}
func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl {
var acl []*raw.ObjectAccessControl
if len(oldACL) > 0 {
acl = make([]*raw.ObjectAccessControl, len(oldACL))
for i, rule := range oldACL {
acl[i] = &raw.ObjectAccessControl{
Entity: string(rule.Entity),
Role: string(rule.Role),
}
}
}
return acl
}
// toRawObject copies the editable attributes from o to the raw library's Object type.
func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
acl := toRawObjectACL(o.ACL)
var ret string
if !o.RetentionExpirationTime.IsZero() {
ret = o.RetentionExpirationTime.Format(time.RFC3339)
}
return &raw.Object{
Bucket: bucket,
Name: o.Name,
ContentType: o.ContentType,
ContentEncoding: o.ContentEncoding,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
ContentDisposition: o.ContentDisposition,
StorageClass: o.StorageClass,
Acl: acl,
Metadata: o.Metadata,
Bucket: bucket,
Name: o.Name,
EventBasedHold: o.EventBasedHold,
TemporaryHold: o.TemporaryHold,
RetentionExpirationTime: ret,
ContentType: o.ContentType,
ContentEncoding: o.ContentEncoding,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
ContentDisposition: o.ContentDisposition,
StorageClass: o.StorageClass,
Acl: toRawObjectACL(o.ACL),
Metadata: o.Metadata,
}
}
@ -646,9 +665,32 @@ type ObjectAttrs struct {
// headers when serving the object data.
CacheControl string
// EventBasedHold specifies whether an object is under event-based hold. New
// objects created in a bucket whose DefaultEventBasedHold is set will
// default to that value.
EventBasedHold bool
// TemporaryHold specifies whether an object is under temporary hold. While
// this flag is set to true, the object is protected against deletion and
// overwrites.
TemporaryHold bool
// RetentionExpirationTime is a server-determined value that specifies the
// earliest time that the object's retention period expires.
// This is a read-only field.
RetentionExpirationTime time.Time
// ACL is the list of access control rules for the object.
ACL []ACLRule
// If not empty, applies a predefined set of access controls. It should be set
// only when writing, copying or composing an object. When copying or composing,
// it acts as the destinationPredefinedAcl parameter.
// PredefinedACL is always empty for ObjectAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/objects/insert
// for valid values.
PredefinedACL string
// Owner is the owner of the object. This field is read-only.
//
// If non-zero, it is in the form of "user-<userId>".
@ -751,13 +793,6 @@ func newObject(o *raw.Object) *ObjectAttrs {
if o == nil {
return nil
}
acl := make([]ACLRule, len(o.Acl))
for i, rule := range o.Acl {
acl[i] = ACLRule{
Entity: ACLEntity(rule.Entity),
Role: ACLRole(rule.Role),
}
}
owner := ""
if o.Owner != nil {
owner = o.Owner.Entity
@ -769,28 +804,31 @@ func newObject(o *raw.Object) *ObjectAttrs {
sha256 = o.CustomerEncryption.KeySha256
}
return &ObjectAttrs{
Bucket: o.Bucket,
Name: o.Name,
ContentType: o.ContentType,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
ACL: acl,
Owner: owner,
ContentEncoding: o.ContentEncoding,
ContentDisposition: o.ContentDisposition,
Size: int64(o.Size),
MD5: md5,
CRC32C: crc32c,
MediaLink: o.MediaLink,
Metadata: o.Metadata,
Generation: o.Generation,
Metageneration: o.Metageneration,
StorageClass: o.StorageClass,
CustomerKeySHA256: sha256,
KMSKeyName: o.KmsKeyName,
Created: convertTime(o.TimeCreated),
Deleted: convertTime(o.TimeDeleted),
Updated: convertTime(o.Updated),
Bucket: o.Bucket,
Name: o.Name,
ContentType: o.ContentType,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
EventBasedHold: o.EventBasedHold,
TemporaryHold: o.TemporaryHold,
RetentionExpirationTime: convertTime(o.RetentionExpirationTime),
ACL: toObjectACLRules(o.Acl),
Owner: owner,
ContentEncoding: o.ContentEncoding,
ContentDisposition: o.ContentDisposition,
Size: int64(o.Size),
MD5: md5,
CRC32C: crc32c,
MediaLink: o.MediaLink,
Metadata: o.Metadata,
Generation: o.Generation,
Metageneration: o.Metageneration,
StorageClass: o.StorageClass,
CustomerKeySHA256: sha256,
KMSKeyName: o.KmsKeyName,
Created: convertTime(o.TimeCreated),
Deleted: convertTime(o.TimeDeleted),
Updated: convertTime(o.Updated),
}
}
@ -1073,4 +1111,12 @@ func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) erro
return nil
}
// TODO(jbd): Add storage.objects.watch.
// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account.
func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) {
r := c.raw.Projects.ServiceAccount.Get(projectID)
res, err := r.Context(ctx).Do()
if err != nil {
return "", err
}
return res.EmailAddress, nil
}

File diff suppressed because one or more lines are too long

View File

@ -95,6 +95,8 @@ func (w *Writer) open() error {
w.pw = pw
w.opened = true
go w.monitorCancel()
if w.ChunkSize < 0 {
return errors.New("storage: Writer.ChunkSize must be non-negative")
}
@ -125,6 +127,9 @@ func (w *Writer) open() error {
if attrs.KMSKeyName != "" {
call.KmsKeyName(attrs.KMSKeyName)
}
if attrs.PredefinedACL != "" {
call.PredefinedAcl(attrs.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
w.mu.Lock()
w.err = err
@ -185,7 +190,19 @@ func (w *Writer) Write(p []byte) (n int, err error) {
return 0, err
}
}
return w.pw.Write(p)
n, err = w.pw.Write(p)
if err != nil {
w.mu.Lock()
werr := w.err
w.mu.Unlock()
// Preserve existing functionality that when context is canceled, Write will return
// context.Canceled instead of "io: read/write on closed pipe". This hides the
// pipe implementation detail from users and makes Write seem as though it's an RPC.
if werr == context.Canceled || werr == context.DeadlineExceeded {
return n, werr
}
}
return n, err
}
// Close completes the write operation and flushes any buffered data.
@ -197,15 +214,35 @@ func (w *Writer) Close() error {
return err
}
}
// Closing either the read or write causes the entire pipe to close.
if err := w.pw.Close(); err != nil {
return err
}
<-w.donec
w.mu.Lock()
defer w.mu.Unlock()
return w.err
}
// monitorCancel is intended to be used as a background goroutine. It monitors the
// the context, and when it observes that the context has been canceled, it manually
// closes things that do not take a context.
func (w *Writer) monitorCancel() {
select {
case <-w.ctx.Done():
w.mu.Lock()
werr := w.ctx.Err()
w.err = werr
w.mu.Unlock()
// Closing either the read or write causes the entire pipe to close.
w.CloseWithError(werr)
case <-w.donec:
}
}
// CloseWithError aborts the write operation with the provided error.
// CloseWithError always returns nil.
//

View File

@ -40,9 +40,10 @@ func NewAvailabilitySetsClientWithBaseURI(baseURI string, subscriptionID string)
}
// CreateOrUpdate create or update an availability set.
//
// resourceGroupName is the name of the resource group. availabilitySetName is the name of the availability set.
// parameters is parameters supplied to the Create Availability Set operation.
// Parameters:
// resourceGroupName - the name of the resource group.
// availabilitySetName - the name of the availability set.
// parameters - parameters supplied to the Create Availability Set operation.
func (client AvailabilitySetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters AvailabilitySet) (result AvailabilitySet, err error) {
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, availabilitySetName, parameters)
if err != nil {
@ -109,8 +110,9 @@ func (client AvailabilitySetsClient) CreateOrUpdateResponder(resp *http.Response
}
// Delete delete an availability set.
//
// resourceGroupName is the name of the resource group. availabilitySetName is the name of the availability set.
// Parameters:
// resourceGroupName - the name of the resource group.
// availabilitySetName - the name of the availability set.
func (client AvailabilitySetsClient) Delete(ctx context.Context, resourceGroupName string, availabilitySetName string) (result OperationStatusResponse, err error) {
req, err := client.DeletePreparer(ctx, resourceGroupName, availabilitySetName)
if err != nil {
@ -175,8 +177,9 @@ func (client AvailabilitySetsClient) DeleteResponder(resp *http.Response) (resul
}
// Get retrieves information about an availability set.
//
// resourceGroupName is the name of the resource group. availabilitySetName is the name of the availability set.
// Parameters:
// resourceGroupName - the name of the resource group.
// availabilitySetName - the name of the availability set.
func (client AvailabilitySetsClient) Get(ctx context.Context, resourceGroupName string, availabilitySetName string) (result AvailabilitySet, err error) {
req, err := client.GetPreparer(ctx, resourceGroupName, availabilitySetName)
if err != nil {
@ -241,9 +244,10 @@ func (client AvailabilitySetsClient) GetResponder(resp *http.Response) (result A
}
// List lists all availability sets in a resource group.
//
// resourceGroupName is the name of the resource group.
func (client AvailabilitySetsClient) List(ctx context.Context, resourceGroupName string) (result AvailabilitySetListResult, err error) {
// Parameters:
// resourceGroupName - the name of the resource group.
func (client AvailabilitySetsClient) List(ctx context.Context, resourceGroupName string) (result AvailabilitySetListResultPage, err error) {
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", nil, "Failure preparing request")
@ -252,12 +256,12 @@ func (client AvailabilitySetsClient) List(ctx context.Context, resourceGroupName
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
result.aslr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", resp, "Failure sending request")
return
}
result, err = client.ListResponder(resp)
result.aslr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", resp, "Failure responding to request")
}
@ -305,10 +309,38 @@ func (client AvailabilitySetsClient) ListResponder(resp *http.Response) (result
return
}
// listNextResults retrieves the next set of results, if any.
func (client AvailabilitySetsClient) listNextResults(lastResults AvailabilitySetListResult) (result AvailabilitySetListResult, err error) {
req, err := lastResults.availabilitySetListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client AvailabilitySetsClient) ListComplete(ctx context.Context, resourceGroupName string) (result AvailabilitySetListResultIterator, err error) {
result.page, err = client.List(ctx, resourceGroupName)
return
}
// ListAvailableSizes lists all available virtual machine sizes that can be used to create a new virtual machine in an
// existing availability set.
//
// resourceGroupName is the name of the resource group. availabilitySetName is the name of the availability set.
// Parameters:
// resourceGroupName - the name of the resource group.
// availabilitySetName - the name of the availability set.
func (client AvailabilitySetsClient) ListAvailableSizes(ctx context.Context, resourceGroupName string, availabilitySetName string) (result VirtualMachineSizeListResult, err error) {
req, err := client.ListAvailableSizesPreparer(ctx, resourceGroupName, availabilitySetName)
if err != nil {
@ -372,10 +404,101 @@ func (client AvailabilitySetsClient) ListAvailableSizesResponder(resp *http.Resp
return
}
// ListBySubscription lists all availability sets in a subscription.
func (client AvailabilitySetsClient) ListBySubscription(ctx context.Context) (result AvailabilitySetListResultPage, err error) {
result.fn = client.listBySubscriptionNextResults
req, err := client.ListBySubscriptionPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListBySubscription", nil, "Failure preparing request")
return
}
resp, err := client.ListBySubscriptionSender(req)
if err != nil {
result.aslr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListBySubscription", resp, "Failure sending request")
return
}
result.aslr, err = client.ListBySubscriptionResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListBySubscription", resp, "Failure responding to request")
}
return
}
// ListBySubscriptionPreparer prepares the ListBySubscription request.
func (client AvailabilitySetsClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/availabilitySets", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListBySubscriptionSender sends the ListBySubscription request. The method will close the
// http.Response Body if it receives an error.
func (client AvailabilitySetsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always
// closes the http.Response Body.
func (client AvailabilitySetsClient) ListBySubscriptionResponder(resp *http.Response) (result AvailabilitySetListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listBySubscriptionNextResults retrieves the next set of results, if any.
func (client AvailabilitySetsClient) listBySubscriptionNextResults(lastResults AvailabilitySetListResult) (result AvailabilitySetListResult, err error) {
req, err := lastResults.availabilitySetListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListBySubscriptionSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "listBySubscriptionNextResults", resp, "Failure sending next results request")
}
result, err = client.ListBySubscriptionResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request")
}
return
}
// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required.
func (client AvailabilitySetsClient) ListBySubscriptionComplete(ctx context.Context) (result AvailabilitySetListResultIterator, err error) {
result.page, err = client.ListBySubscription(ctx)
return
}
// Update update an availability set.
//
// resourceGroupName is the name of the resource group. availabilitySetName is the name of the availability set.
// parameters is parameters supplied to the Update Availability Set operation.
// Parameters:
// resourceGroupName - the name of the resource group.
// availabilitySetName - the name of the availability set.
// parameters - parameters supplied to the Update Availability Set operation.
func (client AvailabilitySetsClient) Update(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters AvailabilitySetUpdate) (result AvailabilitySet, err error) {
req, err := client.UpdatePreparer(ctx, resourceGroupName, availabilitySetName, parameters)
if err != nil {

View File

@ -1,4 +1,4 @@
// Package compute implements the Azure ARM Compute service API version .
// Package compute implements the Azure ARM Compute service API version 2017-12-01.
//
// Compute Client
package compute

View File

@ -1,471 +0,0 @@
package compute
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"net/http"
)
// ContainerServicesClient is the compute Client
type ContainerServicesClient struct {
BaseClient
}
// NewContainerServicesClient creates an instance of the ContainerServicesClient client.
func NewContainerServicesClient(subscriptionID string) ContainerServicesClient {
return NewContainerServicesClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewContainerServicesClientWithBaseURI creates an instance of the ContainerServicesClient client.
func NewContainerServicesClientWithBaseURI(baseURI string, subscriptionID string) ContainerServicesClient {
return ContainerServicesClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or updates a container service with the specified configuration of orchestrator, masters, and
// agents.
//
// resourceGroupName is the name of the resource group. containerServiceName is the name of the container service
// in the specified subscription and resource group. parameters is parameters supplied to the Create or Update a
// Container Service operation.
func (client ContainerServicesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, containerServiceName string, parameters ContainerService) (result ContainerServicesCreateOrUpdateFuture, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.ContainerServiceProperties", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.CustomProfile", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.CustomProfile.Orchestrator", Name: validation.Null, Rule: true, Chain: nil}}},
{Target: "parameters.ContainerServiceProperties.ServicePrincipalProfile", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.ServicePrincipalProfile.ClientID", Name: validation.Null, Rule: true, Chain: nil},
{Target: "parameters.ContainerServiceProperties.ServicePrincipalProfile.Secret", Name: validation.Null, Rule: true, Chain: nil},
}},
{Target: "parameters.ContainerServiceProperties.MasterProfile", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.MasterProfile.DNSPrefix", Name: validation.Null, Rule: true, Chain: nil}}},
{Target: "parameters.ContainerServiceProperties.AgentPoolProfiles", Name: validation.Null, Rule: true, Chain: nil},
{Target: "parameters.ContainerServiceProperties.WindowsProfile", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.WindowsProfile.AdminUsername", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.WindowsProfile.AdminUsername", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([._]?[a-zA-Z0-9]+)*$`, Chain: nil}}},
{Target: "parameters.ContainerServiceProperties.WindowsProfile.AdminPassword", Name: validation.Null, Rule: true, Chain: nil},
}},
{Target: "parameters.ContainerServiceProperties.LinuxProfile", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.LinuxProfile.AdminUsername", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.LinuxProfile.AdminUsername", Name: validation.Pattern, Rule: `^[a-z][a-z0-9_-]*$`, Chain: nil}}},
{Target: "parameters.ContainerServiceProperties.LinuxProfile.SSH", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.LinuxProfile.SSH.PublicKeys", Name: validation.Null, Rule: true, Chain: nil}}},
}},
{Target: "parameters.ContainerServiceProperties.DiagnosticsProfile", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.DiagnosticsProfile.VMDiagnostics", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.DiagnosticsProfile.VMDiagnostics.Enabled", Name: validation.Null, Rule: true, Chain: nil}}},
}},
}}}}}); err != nil {
return result, validation.NewError("compute.ContainerServicesClient", "CreateOrUpdate", err.Error())
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, containerServiceName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client ContainerServicesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, containerServiceName string, parameters ContainerService) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerServiceName": autorest.Encode("path", containerServiceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-01-31"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client ContainerServicesClient) CreateOrUpdateSender(req *http.Request) (future ContainerServicesCreateOrUpdateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted))
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client ContainerServicesClient) CreateOrUpdateResponder(resp *http.Response) (result ContainerService, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes the specified container service in the specified subscription and resource group. The operation does
// not delete other resources created as part of creating a container service, including storage accounts, VMs, and
// availability sets. All the other resources created with the container service are part of the same resource group
// and can be deleted individually.
//
// resourceGroupName is the name of the resource group. containerServiceName is the name of the container service
// in the specified subscription and resource group.
func (client ContainerServicesClient) Delete(ctx context.Context, resourceGroupName string, containerServiceName string) (result ContainerServicesDeleteFuture, err error) {
req, err := client.DeletePreparer(ctx, resourceGroupName, containerServiceName)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Delete", nil, "Failure preparing request")
return
}
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Delete", result.Response(), "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client ContainerServicesClient) DeletePreparer(ctx context.Context, resourceGroupName string, containerServiceName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerServiceName": autorest.Encode("path", containerServiceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-01-31"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client ContainerServicesClient) DeleteSender(req *http.Request) (future ContainerServicesDeleteFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client ContainerServicesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get gets the properties of the specified container service in the specified subscription and resource group. The
// operation returns the properties including state, orchestrator, number of masters and agents, and FQDNs of masters
// and agents.
//
// resourceGroupName is the name of the resource group. containerServiceName is the name of the container service
// in the specified subscription and resource group.
func (client ContainerServicesClient) Get(ctx context.Context, resourceGroupName string, containerServiceName string) (result ContainerService, err error) {
req, err := client.GetPreparer(ctx, resourceGroupName, containerServiceName)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client ContainerServicesClient) GetPreparer(ctx context.Context, resourceGroupName string, containerServiceName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"containerServiceName": autorest.Encode("path", containerServiceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-01-31"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ContainerServicesClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client ContainerServicesClient) GetResponder(resp *http.Response) (result ContainerService, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List gets a list of container services in the specified subscription. The operation returns properties of each
// container service including state, orchestrator, number of masters and agents, and FQDNs of masters and agents.
func (client ContainerServicesClient) List(ctx context.Context) (result ContainerServiceListResultPage, err error) {
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.cslr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", resp, "Failure sending request")
return
}
result.cslr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client ContainerServicesClient) ListPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-01-31"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/containerServices", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client ContainerServicesClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client ContainerServicesClient) ListResponder(resp *http.Response) (result ContainerServiceListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client ContainerServicesClient) listNextResults(lastResults ContainerServiceListResult) (result ContainerServiceListResult, err error) {
req, err := lastResults.containerServiceListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client ContainerServicesClient) ListComplete(ctx context.Context) (result ContainerServiceListResultIterator, err error) {
result.page, err = client.List(ctx)
return
}
// ListByResourceGroup gets a list of container services in the specified subscription and resource group. The
// operation returns properties of each container service including state, orchestrator, number of masters and agents,
// and FQDNs of masters and agents.
//
// resourceGroupName is the name of the resource group.
func (client ContainerServicesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ContainerServiceListResultPage, err error) {
result.fn = client.listByResourceGroupNextResults
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", nil, "Failure preparing request")
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.cslr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", resp, "Failure sending request")
return
}
result.cslr, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", resp, "Failure responding to request")
}
return
}
// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
func (client ContainerServicesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-01-31"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
// http.Response Body if it receives an error.
func (client ContainerServicesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
// closes the http.Response Body.
func (client ContainerServicesClient) ListByResourceGroupResponder(resp *http.Response) (result ContainerServiceListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listByResourceGroupNextResults retrieves the next set of results, if any.
func (client ContainerServicesClient) listByResourceGroupNextResults(lastResults ContainerServiceListResult) (result ContainerServiceListResult, err error) {
req, err := lastResults.containerServiceListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
}
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
}
return
}
// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
func (client ContainerServicesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ContainerServiceListResultIterator, err error) {
result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
return
}

View File

@ -1,678 +0,0 @@
package compute
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"net/http"
)
// DisksClient is the compute Client
type DisksClient struct {
BaseClient
}
// NewDisksClient creates an instance of the DisksClient client.
func NewDisksClient(subscriptionID string) DisksClient {
return NewDisksClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewDisksClientWithBaseURI creates an instance of the DisksClient client.
func NewDisksClientWithBaseURI(baseURI string, subscriptionID string) DisksClient {
return DisksClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or updates a disk.
//
// resourceGroupName is the name of the resource group. diskName is the name of the managed disk that is being
// created. The name can't be changed after the disk is created. Supported characters for the name are a-z, A-Z,
// 0-9 and _. The maximum name length is 80 characters. disk is disk object supplied in the body of the Put disk
// operation.
func (client DisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, disk Disk) (result DisksCreateOrUpdateFuture, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: disk,
Constraints: []validation.Constraint{{Target: "disk.DiskProperties", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "disk.DiskProperties.CreationData", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "disk.DiskProperties.CreationData.ImageReference", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "disk.DiskProperties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}},
}},
{Target: "disk.DiskProperties.EncryptionSettings", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "disk.DiskProperties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "disk.DiskProperties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil},
{Target: "disk.DiskProperties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil},
}},
{Target: "disk.DiskProperties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "disk.DiskProperties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil},
{Target: "disk.DiskProperties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil},
}},
}},
}}}}}); err != nil {
return result, validation.NewError("compute.DisksClient", "CreateOrUpdate", err.Error())
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, diskName, disk)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "CreateOrUpdate", result.Response(), "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client DisksClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, diskName string, disk Disk) (*http.Request, error) {
pathParameters := map[string]interface{}{
"diskName": autorest.Encode("path", diskName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters),
autorest.WithJSON(disk),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client DisksClient) CreateOrUpdateSender(req *http.Request) (future DisksCreateOrUpdateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client DisksClient) CreateOrUpdateResponder(resp *http.Response) (result Disk, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes a disk.
//
// resourceGroupName is the name of the resource group. diskName is the name of the managed disk that is being
// created. The name can't be changed after the disk is created. Supported characters for the name are a-z, A-Z,
// 0-9 and _. The maximum name length is 80 characters.
func (client DisksClient) Delete(ctx context.Context, resourceGroupName string, diskName string) (result DisksDeleteFuture, err error) {
req, err := client.DeletePreparer(ctx, resourceGroupName, diskName)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "Delete", nil, "Failure preparing request")
return
}
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "Delete", result.Response(), "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client DisksClient) DeletePreparer(ctx context.Context, resourceGroupName string, diskName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"diskName": autorest.Encode("path", diskName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client DisksClient) DeleteSender(req *http.Request) (future DisksDeleteFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client DisksClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Get gets information about a disk.
//
// resourceGroupName is the name of the resource group. diskName is the name of the managed disk that is being
// created. The name can't be changed after the disk is created. Supported characters for the name are a-z, A-Z,
// 0-9 and _. The maximum name length is 80 characters.
func (client DisksClient) Get(ctx context.Context, resourceGroupName string, diskName string) (result Disk, err error) {
req, err := client.GetPreparer(ctx, resourceGroupName, diskName)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "compute.DisksClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client DisksClient) GetPreparer(ctx context.Context, resourceGroupName string, diskName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"diskName": autorest.Encode("path", diskName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client DisksClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client DisksClient) GetResponder(resp *http.Response) (result Disk, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GrantAccess grants access to a disk.
//
// resourceGroupName is the name of the resource group. diskName is the name of the managed disk that is being
// created. The name can't be changed after the disk is created. Supported characters for the name are a-z, A-Z,
// 0-9 and _. The maximum name length is 80 characters. grantAccessData is access data object supplied in the body
// of the get disk access operation.
func (client DisksClient) GrantAccess(ctx context.Context, resourceGroupName string, diskName string, grantAccessData GrantAccessData) (result DisksGrantAccessFuture, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: grantAccessData,
Constraints: []validation.Constraint{{Target: "grantAccessData.DurationInSeconds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("compute.DisksClient", "GrantAccess", err.Error())
}
req, err := client.GrantAccessPreparer(ctx, resourceGroupName, diskName, grantAccessData)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "GrantAccess", nil, "Failure preparing request")
return
}
result, err = client.GrantAccessSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "GrantAccess", result.Response(), "Failure sending request")
return
}
return
}
// GrantAccessPreparer prepares the GrantAccess request.
func (client DisksClient) GrantAccessPreparer(ctx context.Context, resourceGroupName string, diskName string, grantAccessData GrantAccessData) (*http.Request, error) {
pathParameters := map[string]interface{}{
"diskName": autorest.Encode("path", diskName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess", pathParameters),
autorest.WithJSON(grantAccessData),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GrantAccessSender sends the GrantAccess request. The method will close the
// http.Response Body if it receives an error.
func (client DisksClient) GrantAccessSender(req *http.Request) (future DisksGrantAccessFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
return
}
// GrantAccessResponder handles the response to the GrantAccess request. The method always
// closes the http.Response Body.
func (client DisksClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List lists all the disks under a subscription.
func (client DisksClient) List(ctx context.Context) (result DiskListPage, err error) {
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.dl.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "compute.DisksClient", "List", resp, "Failure sending request")
return
}
result.dl, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client DisksClient) ListPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client DisksClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client DisksClient) ListResponder(resp *http.Response) (result DiskList, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client DisksClient) listNextResults(lastResults DiskList) (result DiskList, err error) {
req, err := lastResults.diskListPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "compute.DisksClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "compute.DisksClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client DisksClient) ListComplete(ctx context.Context) (result DiskListIterator, err error) {
result.page, err = client.List(ctx)
return
}
// ListByResourceGroup lists all the disks under a resource group.
//
// resourceGroupName is the name of the resource group.
func (client DisksClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result DiskListPage, err error) {
result.fn = client.listByResourceGroupNextResults
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", nil, "Failure preparing request")
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.dl.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", resp, "Failure sending request")
return
}
result.dl, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", resp, "Failure responding to request")
}
return
}
// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
func (client DisksClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
// http.Response Body if it receives an error.
func (client DisksClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
// closes the http.Response Body.
func (client DisksClient) ListByResourceGroupResponder(resp *http.Response) (result DiskList, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listByResourceGroupNextResults retrieves the next set of results, if any.
func (client DisksClient) listByResourceGroupNextResults(lastResults DiskList) (result DiskList, err error) {
req, err := lastResults.diskListPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "compute.DisksClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "compute.DisksClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
}
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
}
return
}
// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
func (client DisksClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result DiskListIterator, err error) {
result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
return
}
// RevokeAccess revokes access to a disk.
//
// resourceGroupName is the name of the resource group. diskName is the name of the managed disk that is being
// created. The name can't be changed after the disk is created. Supported characters for the name are a-z, A-Z,
// 0-9 and _. The maximum name length is 80 characters.
func (client DisksClient) RevokeAccess(ctx context.Context, resourceGroupName string, diskName string) (result DisksRevokeAccessFuture, err error) {
req, err := client.RevokeAccessPreparer(ctx, resourceGroupName, diskName)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "RevokeAccess", nil, "Failure preparing request")
return
}
result, err = client.RevokeAccessSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "RevokeAccess", result.Response(), "Failure sending request")
return
}
return
}
// RevokeAccessPreparer prepares the RevokeAccess request.
func (client DisksClient) RevokeAccessPreparer(ctx context.Context, resourceGroupName string, diskName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"diskName": autorest.Encode("path", diskName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// RevokeAccessSender sends the RevokeAccess request. The method will close the
// http.Response Body if it receives an error.
func (client DisksClient) RevokeAccessSender(req *http.Request) (future DisksRevokeAccessFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
return
}
// RevokeAccessResponder handles the response to the RevokeAccess request. The method always
// closes the http.Response Body.
func (client DisksClient) RevokeAccessResponder(resp *http.Response) (result OperationStatusResponse, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Update updates (patches) a disk.
//
// resourceGroupName is the name of the resource group. diskName is the name of the managed disk that is being
// created. The name can't be changed after the disk is created. Supported characters for the name are a-z, A-Z,
// 0-9 and _. The maximum name length is 80 characters. disk is disk object supplied in the body of the Patch disk
// operation.
func (client DisksClient) Update(ctx context.Context, resourceGroupName string, diskName string, disk DiskUpdate) (result DisksUpdateFuture, err error) {
req, err := client.UpdatePreparer(ctx, resourceGroupName, diskName, disk)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "Update", nil, "Failure preparing request")
return
}
result, err = client.UpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.DisksClient", "Update", result.Response(), "Failure sending request")
return
}
return
}
// UpdatePreparer prepares the Update request.
func (client DisksClient) UpdatePreparer(ctx context.Context, resourceGroupName string, diskName string, disk DiskUpdate) (*http.Request, error) {
pathParameters := map[string]interface{}{
"diskName": autorest.Encode("path", diskName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters),
autorest.WithJSON(disk),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client DisksClient) UpdateSender(req *http.Request) (future DisksUpdateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
return
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client DisksClient) UpdateResponder(resp *http.Response) (result Disk, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}

View File

@ -21,7 +21,6 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"net/http"
)
@ -41,19 +40,11 @@ func NewImagesClientWithBaseURI(baseURI string, subscriptionID string) ImagesCli
}
// CreateOrUpdate create or update an image.
//
// resourceGroupName is the name of the resource group. imageName is the name of the image. parameters is
// parameters supplied to the Create Image operation.
// Parameters:
// resourceGroupName - the name of the resource group.
// imageName - the name of the image.
// parameters - parameters supplied to the Create Image operation.
func (client ImagesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, imageName string, parameters Image) (result ImagesCreateOrUpdateFuture, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.ImageProperties", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.ImageProperties.StorageProfile", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.ImageProperties.StorageProfile.OsDisk", Name: validation.Null, Rule: true, Chain: nil}}},
}}}}}); err != nil {
return result, validation.NewError("compute.ImagesClient", "CreateOrUpdate", err.Error())
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, imageName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ImagesClient", "CreateOrUpdate", nil, "Failure preparing request")
@ -95,15 +86,17 @@ func (client ImagesClient) CreateOrUpdatePreparer(ctx context.Context, resourceG
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client ImagesClient) CreateOrUpdateSender(req *http.Request) (future ImagesCreateOrUpdateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -121,8 +114,9 @@ func (client ImagesClient) CreateOrUpdateResponder(resp *http.Response) (result
}
// Delete deletes an Image.
//
// resourceGroupName is the name of the resource group. imageName is the name of the image.
// Parameters:
// resourceGroupName - the name of the resource group.
// imageName - the name of the image.
func (client ImagesClient) Delete(ctx context.Context, resourceGroupName string, imageName string) (result ImagesDeleteFuture, err error) {
req, err := client.DeletePreparer(ctx, resourceGroupName, imageName)
if err != nil {
@ -163,15 +157,17 @@ func (client ImagesClient) DeletePreparer(ctx context.Context, resourceGroupName
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client ImagesClient) DeleteSender(req *http.Request) (future ImagesDeleteFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -189,9 +185,10 @@ func (client ImagesClient) DeleteResponder(resp *http.Response) (result Operatio
}
// Get gets an image.
//
// resourceGroupName is the name of the resource group. imageName is the name of the image. expand is the expand
// expression to apply on the operation.
// Parameters:
// resourceGroupName - the name of the resource group.
// imageName - the name of the image.
// expand - the expand expression to apply on the operation.
func (client ImagesClient) Get(ctx context.Context, resourceGroupName string, imageName string, expand string) (result Image, err error) {
req, err := client.GetPreparer(ctx, resourceGroupName, imageName, expand)
if err != nil {
@ -350,8 +347,8 @@ func (client ImagesClient) ListComplete(ctx context.Context) (result ImageListRe
}
// ListByResourceGroup gets the list of images under a resource group.
//
// resourceGroupName is the name of the resource group.
// Parameters:
// resourceGroupName - the name of the resource group.
func (client ImagesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ImageListResultPage, err error) {
result.fn = client.listByResourceGroupNextResults
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
@ -443,9 +440,10 @@ func (client ImagesClient) ListByResourceGroupComplete(ctx context.Context, reso
}
// Update update an image.
//
// resourceGroupName is the name of the resource group. imageName is the name of the image. parameters is
// parameters supplied to the Update Image operation.
// Parameters:
// resourceGroupName - the name of the resource group.
// imageName - the name of the image.
// parameters - parameters supplied to the Update Image operation.
func (client ImagesClient) Update(ctx context.Context, resourceGroupName string, imageName string, parameters ImageUpdate) (result ImagesUpdateFuture, err error) {
req, err := client.UpdatePreparer(ctx, resourceGroupName, imageName, parameters)
if err != nil {
@ -488,15 +486,17 @@ func (client ImagesClient) UpdatePreparer(ctx context.Context, resourceGroupName
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client ImagesClient) UpdateSender(req *http.Request) (future ImagesUpdateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}

View File

@ -42,9 +42,9 @@ func NewLogAnalyticsClientWithBaseURI(baseURI string, subscriptionID string) Log
// ExportRequestRateByInterval export logs that show Api requests made by this subscription in the given time window to
// show throttling activities.
//
// parameters is parameters supplied to the LogAnalytics getRequestRateByInterval Api. location is the location
// upon which virtual-machine-sizes is queried.
// Parameters:
// parameters - parameters supplied to the LogAnalytics getRequestRateByInterval Api.
// location - the location upon which virtual-machine-sizes is queried.
func (client LogAnalyticsClient) ExportRequestRateByInterval(ctx context.Context, parameters RequestRateByIntervalInput, location string) (result LogAnalyticsExportRequestRateByIntervalFuture, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: location,
@ -92,15 +92,17 @@ func (client LogAnalyticsClient) ExportRequestRateByIntervalPreparer(ctx context
// ExportRequestRateByIntervalSender sends the ExportRequestRateByInterval request. The method will close the
// http.Response Body if it receives an error.
func (client LogAnalyticsClient) ExportRequestRateByIntervalSender(req *http.Request) (future LogAnalyticsExportRequestRateByIntervalFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -119,9 +121,9 @@ func (client LogAnalyticsClient) ExportRequestRateByIntervalResponder(resp *http
// ExportThrottledRequests export logs that show total throttled Api requests for this subscription in the given time
// window.
//
// parameters is parameters supplied to the LogAnalytics getThrottledRequests Api. location is the location upon
// which virtual-machine-sizes is queried.
// Parameters:
// parameters - parameters supplied to the LogAnalytics getThrottledRequests Api.
// location - the location upon which virtual-machine-sizes is queried.
func (client LogAnalyticsClient) ExportThrottledRequests(ctx context.Context, parameters ThrottledRequestsInput, location string) (result LogAnalyticsExportThrottledRequestsFuture, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: location,
@ -169,15 +171,17 @@ func (client LogAnalyticsClient) ExportThrottledRequestsPreparer(ctx context.Con
// ExportThrottledRequestsSender sends the ExportThrottledRequests request. The method will close the
// http.Response Body if it receives an error.
func (client LogAnalyticsClient) ExportThrottledRequestsSender(req *http.Request) (future LogAnalyticsExportThrottledRequestsFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}

File diff suppressed because one or more lines are too long

View File

@ -1,130 +0,0 @@
package compute
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"net/http"
)
// ResourceSkusClient is the compute Client
type ResourceSkusClient struct {
BaseClient
}
// NewResourceSkusClient creates an instance of the ResourceSkusClient client.
func NewResourceSkusClient(subscriptionID string) ResourceSkusClient {
return NewResourceSkusClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewResourceSkusClientWithBaseURI creates an instance of the ResourceSkusClient client.
func NewResourceSkusClientWithBaseURI(baseURI string, subscriptionID string) ResourceSkusClient {
return ResourceSkusClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// List gets the list of Microsoft.Compute SKUs available for your Subscription.
func (client ResourceSkusClient) List(ctx context.Context) (result ResourceSkusResultPage, err error) {
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.rsr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", resp, "Failure sending request")
return
}
result.rsr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client ResourceSkusClient) ListPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-09-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/skus", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client ResourceSkusClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client ResourceSkusClient) ListResponder(resp *http.Response) (result ResourceSkusResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client ResourceSkusClient) listNextResults(lastResults ResourceSkusResult) (result ResourceSkusResult, err error) {
req, err := lastResults.resourceSkusResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client ResourceSkusClient) ListComplete(ctx context.Context) (result ResourceSkusResultIterator, err error) {
result.page, err = client.List(ctx)
return
}

View File

@ -1,678 +0,0 @@
package compute
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"net/http"
)
// SnapshotsClient is the compute Client
type SnapshotsClient struct {
BaseClient
}
// NewSnapshotsClient creates an instance of the SnapshotsClient client.
func NewSnapshotsClient(subscriptionID string) SnapshotsClient {
return NewSnapshotsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewSnapshotsClientWithBaseURI creates an instance of the SnapshotsClient client.
func NewSnapshotsClientWithBaseURI(baseURI string, subscriptionID string) SnapshotsClient {
return SnapshotsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or updates a snapshot.
//
// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot that is being
// created. The name can't be changed after the snapshot is created. Supported characters for the name are a-z,
// A-Z, 0-9 and _. The max name length is 80 characters. snapshot is snapshot object supplied in the body of the
// Put disk operation.
func (client SnapshotsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot Snapshot) (result SnapshotsCreateOrUpdateFuture, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: snapshot,
Constraints: []validation.Constraint{{Target: "snapshot.DiskProperties", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData.ImageReference", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}},
}},
{Target: "snapshot.DiskProperties.EncryptionSettings", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil},
{Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil},
}},
{Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil},
{Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil},
}},
}},
}}}}}); err != nil {
return result, validation.NewError("compute.SnapshotsClient", "CreateOrUpdate", err.Error())
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, snapshotName, snapshot)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client SnapshotsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, snapshotName string, snapshot Snapshot) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"snapshotName": autorest.Encode("path", snapshotName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters),
autorest.WithJSON(snapshot),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client SnapshotsClient) CreateOrUpdateSender(req *http.Request) (future SnapshotsCreateOrUpdateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client SnapshotsClient) CreateOrUpdateResponder(resp *http.Response) (result Snapshot, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes a snapshot.
//
// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot that is being
// created. The name can't be changed after the snapshot is created. Supported characters for the name are a-z,
// A-Z, 0-9 and _. The max name length is 80 characters.
func (client SnapshotsClient) Delete(ctx context.Context, resourceGroupName string, snapshotName string) (result SnapshotsDeleteFuture, err error) {
req, err := client.DeletePreparer(ctx, resourceGroupName, snapshotName)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Delete", nil, "Failure preparing request")
return
}
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Delete", result.Response(), "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client SnapshotsClient) DeletePreparer(ctx context.Context, resourceGroupName string, snapshotName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"snapshotName": autorest.Encode("path", snapshotName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client SnapshotsClient) DeleteSender(req *http.Request) (future SnapshotsDeleteFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client SnapshotsClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Get gets information about a snapshot.
//
// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot that is being
// created. The name can't be changed after the snapshot is created. Supported characters for the name are a-z,
// A-Z, 0-9 and _. The max name length is 80 characters.
func (client SnapshotsClient) Get(ctx context.Context, resourceGroupName string, snapshotName string) (result Snapshot, err error) {
req, err := client.GetPreparer(ctx, resourceGroupName, snapshotName)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client SnapshotsClient) GetPreparer(ctx context.Context, resourceGroupName string, snapshotName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"snapshotName": autorest.Encode("path", snapshotName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client SnapshotsClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client SnapshotsClient) GetResponder(resp *http.Response) (result Snapshot, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GrantAccess grants access to a snapshot.
//
// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot that is being
// created. The name can't be changed after the snapshot is created. Supported characters for the name are a-z,
// A-Z, 0-9 and _. The max name length is 80 characters. grantAccessData is access data object supplied in the body
// of the get snapshot access operation.
func (client SnapshotsClient) GrantAccess(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData GrantAccessData) (result SnapshotsGrantAccessFuture, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: grantAccessData,
Constraints: []validation.Constraint{{Target: "grantAccessData.DurationInSeconds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("compute.SnapshotsClient", "GrantAccess", err.Error())
}
req, err := client.GrantAccessPreparer(ctx, resourceGroupName, snapshotName, grantAccessData)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "GrantAccess", nil, "Failure preparing request")
return
}
result, err = client.GrantAccessSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "GrantAccess", result.Response(), "Failure sending request")
return
}
return
}
// GrantAccessPreparer prepares the GrantAccess request.
func (client SnapshotsClient) GrantAccessPreparer(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData GrantAccessData) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"snapshotName": autorest.Encode("path", snapshotName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess", pathParameters),
autorest.WithJSON(grantAccessData),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GrantAccessSender sends the GrantAccess request. The method will close the
// http.Response Body if it receives an error.
func (client SnapshotsClient) GrantAccessSender(req *http.Request) (future SnapshotsGrantAccessFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
return
}
// GrantAccessResponder handles the response to the GrantAccess request. The method always
// closes the http.Response Body.
func (client SnapshotsClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List lists snapshots under a subscription.
func (client SnapshotsClient) List(ctx context.Context) (result SnapshotListPage, err error) {
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.sl.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", resp, "Failure sending request")
return
}
result.sl, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client SnapshotsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client SnapshotsClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client SnapshotsClient) ListResponder(resp *http.Response) (result SnapshotList, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client SnapshotsClient) listNextResults(lastResults SnapshotList) (result SnapshotList, err error) {
req, err := lastResults.snapshotListPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client SnapshotsClient) ListComplete(ctx context.Context) (result SnapshotListIterator, err error) {
result.page, err = client.List(ctx)
return
}
// ListByResourceGroup lists snapshots under a resource group.
//
// resourceGroupName is the name of the resource group.
func (client SnapshotsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result SnapshotListPage, err error) {
result.fn = client.listByResourceGroupNextResults
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", nil, "Failure preparing request")
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.sl.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", resp, "Failure sending request")
return
}
result.sl, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", resp, "Failure responding to request")
}
return
}
// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
func (client SnapshotsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
// http.Response Body if it receives an error.
func (client SnapshotsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
// closes the http.Response Body.
func (client SnapshotsClient) ListByResourceGroupResponder(resp *http.Response) (result SnapshotList, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listByResourceGroupNextResults retrieves the next set of results, if any.
func (client SnapshotsClient) listByResourceGroupNextResults(lastResults SnapshotList) (result SnapshotList, err error) {
req, err := lastResults.snapshotListPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
}
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
}
return
}
// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
func (client SnapshotsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result SnapshotListIterator, err error) {
result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
return
}
// RevokeAccess revokes access to a snapshot.
//
// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot that is being
// created. The name can't be changed after the snapshot is created. Supported characters for the name are a-z,
// A-Z, 0-9 and _. The max name length is 80 characters.
func (client SnapshotsClient) RevokeAccess(ctx context.Context, resourceGroupName string, snapshotName string) (result SnapshotsRevokeAccessFuture, err error) {
req, err := client.RevokeAccessPreparer(ctx, resourceGroupName, snapshotName)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "RevokeAccess", nil, "Failure preparing request")
return
}
result, err = client.RevokeAccessSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "RevokeAccess", result.Response(), "Failure sending request")
return
}
return
}
// RevokeAccessPreparer prepares the RevokeAccess request.
func (client SnapshotsClient) RevokeAccessPreparer(ctx context.Context, resourceGroupName string, snapshotName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"snapshotName": autorest.Encode("path", snapshotName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// RevokeAccessSender sends the RevokeAccess request. The method will close the
// http.Response Body if it receives an error.
func (client SnapshotsClient) RevokeAccessSender(req *http.Request) (future SnapshotsRevokeAccessFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
return
}
// RevokeAccessResponder handles the response to the RevokeAccess request. The method always
// closes the http.Response Body.
func (client SnapshotsClient) RevokeAccessResponder(resp *http.Response) (result OperationStatusResponse, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Update updates (patches) a snapshot.
//
// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot that is being
// created. The name can't be changed after the snapshot is created. Supported characters for the name are a-z,
// A-Z, 0-9 and _. The max name length is 80 characters. snapshot is snapshot object supplied in the body of the
// Patch snapshot operation.
func (client SnapshotsClient) Update(ctx context.Context, resourceGroupName string, snapshotName string, snapshot SnapshotUpdate) (result SnapshotsUpdateFuture, err error) {
req, err := client.UpdatePreparer(ctx, resourceGroupName, snapshotName, snapshot)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Update", nil, "Failure preparing request")
return
}
result, err = client.UpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Update", result.Response(), "Failure sending request")
return
}
return
}
// UpdatePreparer prepares the Update request.
func (client SnapshotsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, snapshotName string, snapshot SnapshotUpdate) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"snapshotName": autorest.Encode("path", snapshotName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters),
autorest.WithJSON(snapshot),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client SnapshotsClient) UpdateSender(req *http.Request) (future SnapshotsUpdateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
return
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client SnapshotsClient) UpdateResponder(resp *http.Response) (result Snapshot, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}

View File

@ -42,8 +42,8 @@ func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClien
// List gets, for the specified location, the current compute resource usage information as well as the limits for
// compute resources under the subscription.
//
// location is the location for which resource usage is queried.
// Parameters:
// location - the location for which resource usage is queried.
func (client UsageClient) List(ctx context.Context, location string) (result ListUsagesResultPage, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: location,

View File

@ -41,8 +41,8 @@ func NewVirtualMachineExtensionImagesClientWithBaseURI(baseURI string, subscript
}
// Get gets a virtual machine extension image.
//
// location is the name of a supported Azure region.
// Parameters:
// location - the name of a supported Azure region.
func (client VirtualMachineExtensionImagesClient) Get(ctx context.Context, location string, publisherName string, typeParameter string, version string) (result VirtualMachineExtensionImage, err error) {
req, err := client.GetPreparer(ctx, location, publisherName, typeParameter, version)
if err != nil {
@ -109,8 +109,8 @@ func (client VirtualMachineExtensionImagesClient) GetResponder(resp *http.Respon
}
// ListTypes gets a list of virtual machine extension image types.
//
// location is the name of a supported Azure region.
// Parameters:
// location - the name of a supported Azure region.
func (client VirtualMachineExtensionImagesClient) ListTypes(ctx context.Context, location string, publisherName string) (result ListVirtualMachineExtensionImage, err error) {
req, err := client.ListTypesPreparer(ctx, location, publisherName)
if err != nil {
@ -175,8 +175,9 @@ func (client VirtualMachineExtensionImagesClient) ListTypesResponder(resp *http.
}
// ListVersions gets a list of virtual machine extension image versions.
//
// location is the name of a supported Azure region. filter is the filter to apply on the operation.
// Parameters:
// location - the name of a supported Azure region.
// filter - the filter to apply on the operation.
func (client VirtualMachineExtensionImagesClient) ListVersions(ctx context.Context, location string, publisherName string, typeParameter string, filter string, top *int32, orderby string) (result ListVirtualMachineExtensionImage, err error) {
req, err := client.ListVersionsPreparer(ctx, location, publisherName, typeParameter, filter, top, orderby)
if err != nil {

View File

@ -40,10 +40,11 @@ func NewVirtualMachineExtensionsClientWithBaseURI(baseURI string, subscriptionID
}
// CreateOrUpdate the operation to create or update the extension.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine where the
// extension should be create or updated. VMExtensionName is the name of the virtual machine extension.
// extensionParameters is parameters supplied to the Create Virtual Machine Extension operation.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine where the extension should be created or updated.
// VMExtensionName - the name of the virtual machine extension.
// extensionParameters - parameters supplied to the Create Virtual Machine Extension operation.
func (client VirtualMachineExtensionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, extensionParameters VirtualMachineExtension) (result VirtualMachineExtensionsCreateOrUpdateFuture, err error) {
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, VMName, VMExtensionName, extensionParameters)
if err != nil {
@ -87,15 +88,17 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(ctx context.
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineExtensionsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineExtensionsCreateOrUpdateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -113,9 +116,10 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdateResponder(resp *http.
}
// Delete the operation to delete the extension.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine where the
// extension should be deleted. VMExtensionName is the name of the virtual machine extension.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine where the extension should be deleted.
// VMExtensionName - the name of the virtual machine extension.
func (client VirtualMachineExtensionsClient) Delete(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string) (result VirtualMachineExtensionsDeleteFuture, err error) {
req, err := client.DeletePreparer(ctx, resourceGroupName, VMName, VMExtensionName)
if err != nil {
@ -157,15 +161,17 @@ func (client VirtualMachineExtensionsClient) DeletePreparer(ctx context.Context,
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineExtensionsClient) DeleteSender(req *http.Request) (future VirtualMachineExtensionsDeleteFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -183,10 +189,11 @@ func (client VirtualMachineExtensionsClient) DeleteResponder(resp *http.Response
}
// Get the operation to get the extension.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine containing the
// extension. VMExtensionName is the name of the virtual machine extension. expand is the expand expression to
// apply on the operation.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine containing the extension.
// VMExtensionName - the name of the virtual machine extension.
// expand - the expand expression to apply on the operation.
func (client VirtualMachineExtensionsClient) Get(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, expand string) (result VirtualMachineExtension, err error) {
req, err := client.GetPreparer(ctx, resourceGroupName, VMName, VMExtensionName, expand)
if err != nil {
@ -253,3 +260,79 @@ func (client VirtualMachineExtensionsClient) GetResponder(resp *http.Response) (
result.Response = autorest.Response{Response: resp}
return
}
// Update the operation to update the extension.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine where the extension should be updated.
// VMExtensionName - the name of the virtual machine extension.
// extensionParameters - parameters supplied to the Update Virtual Machine Extension operation.
func (client VirtualMachineExtensionsClient) Update(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, extensionParameters VirtualMachineExtensionUpdate) (result VirtualMachineExtensionsUpdateFuture, err error) {
req, err := client.UpdatePreparer(ctx, resourceGroupName, VMName, VMExtensionName, extensionParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Update", nil, "Failure preparing request")
return
}
result, err = client.UpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Update", result.Response(), "Failure sending request")
return
}
return
}
// UpdatePreparer prepares the Update request.
func (client VirtualMachineExtensionsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, extensionParameters VirtualMachineExtensionUpdate) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vmExtensionName": autorest.Encode("path", VMExtensionName),
"vmName": autorest.Encode("path", VMName),
}
const APIVersion = "2017-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}", pathParameters),
autorest.WithJSON(extensionParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineExtensionsClient) UpdateSender(req *http.Request) (future VirtualMachineExtensionsUpdateFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client VirtualMachineExtensionsClient) UpdateResponder(resp *http.Response) (result VirtualMachineExtension, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}

View File

@ -40,9 +40,12 @@ func NewVirtualMachineImagesClientWithBaseURI(baseURI string, subscriptionID str
}
// Get gets a virtual machine image.
//
// location is the name of a supported Azure region. publisherName is a valid image publisher. offer is a valid
// image publisher offer. skus is a valid image SKU. version is a valid image SKU version.
// Parameters:
// location - the name of a supported Azure region.
// publisherName - a valid image publisher.
// offer - a valid image publisher offer.
// skus - a valid image SKU.
// version - a valid image SKU version.
func (client VirtualMachineImagesClient) Get(ctx context.Context, location string, publisherName string, offer string, skus string, version string) (result VirtualMachineImage, err error) {
req, err := client.GetPreparer(ctx, location, publisherName, offer, skus, version)
if err != nil {
@ -110,9 +113,12 @@ func (client VirtualMachineImagesClient) GetResponder(resp *http.Response) (resu
}
// List gets a list of all virtual machine image versions for the specified location, publisher, offer, and SKU.
//
// location is the name of a supported Azure region. publisherName is a valid image publisher. offer is a valid
// image publisher offer. skus is a valid image SKU. filter is the filter to apply on the operation.
// Parameters:
// location - the name of a supported Azure region.
// publisherName - a valid image publisher.
// offer - a valid image publisher offer.
// skus - a valid image SKU.
// filter - the filter to apply on the operation.
func (client VirtualMachineImagesClient) List(ctx context.Context, location string, publisherName string, offer string, skus string, filter string, top *int32, orderby string) (result ListVirtualMachineImageResource, err error) {
req, err := client.ListPreparer(ctx, location, publisherName, offer, skus, filter, top, orderby)
if err != nil {
@ -188,8 +194,9 @@ func (client VirtualMachineImagesClient) ListResponder(resp *http.Response) (res
}
// ListOffers gets a list of virtual machine image offers for the specified location and publisher.
//
// location is the name of a supported Azure region. publisherName is a valid image publisher.
// Parameters:
// location - the name of a supported Azure region.
// publisherName - a valid image publisher.
func (client VirtualMachineImagesClient) ListOffers(ctx context.Context, location string, publisherName string) (result ListVirtualMachineImageResource, err error) {
req, err := client.ListOffersPreparer(ctx, location, publisherName)
if err != nil {
@ -254,8 +261,8 @@ func (client VirtualMachineImagesClient) ListOffersResponder(resp *http.Response
}
// ListPublishers gets a list of virtual machine image publishers for the specified Azure location.
//
// location is the name of a supported Azure region.
// Parameters:
// location - the name of a supported Azure region.
func (client VirtualMachineImagesClient) ListPublishers(ctx context.Context, location string) (result ListVirtualMachineImageResource, err error) {
req, err := client.ListPublishersPreparer(ctx, location)
if err != nil {
@ -319,9 +326,10 @@ func (client VirtualMachineImagesClient) ListPublishersResponder(resp *http.Resp
}
// ListSkus gets a list of virtual machine image SKUs for the specified location, publisher, and offer.
//
// location is the name of a supported Azure region. publisherName is a valid image publisher. offer is a valid
// image publisher offer.
// Parameters:
// location - the name of a supported Azure region.
// publisherName - a valid image publisher.
// offer - a valid image publisher offer.
func (client VirtualMachineImagesClient) ListSkus(ctx context.Context, location string, publisherName string, offer string) (result ListVirtualMachineImageResource, err error) {
req, err := client.ListSkusPreparer(ctx, location, publisherName, offer)
if err != nil {

View File

@ -41,8 +41,9 @@ func NewVirtualMachineRunCommandsClientWithBaseURI(baseURI string, subscriptionI
}
// Get gets specific run command for a subscription in a location.
//
// location is the location upon which run commands is queried. commandID is the command id.
// Parameters:
// location - the location upon which run commands is queried.
// commandID - the command id.
func (client VirtualMachineRunCommandsClient) Get(ctx context.Context, location string, commandID string) (result RunCommandDocument, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: location,
@ -113,8 +114,8 @@ func (client VirtualMachineRunCommandsClient) GetResponder(resp *http.Response)
}
// List lists all available run commands for a subscription in a location.
//
// location is the location upon which run commands is queried.
// Parameters:
// location - the location upon which run commands is queried.
func (client VirtualMachineRunCommandsClient) List(ctx context.Context, location string) (result RunCommandListResultPage, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: location,

View File

@ -42,9 +42,10 @@ func NewVirtualMachinesClientWithBaseURI(baseURI string, subscriptionID string)
// Capture captures the VM by copying virtual hard disks of the VM and outputs a template that can be used to create
// similar VMs.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. parameters is
// parameters supplied to the Capture Virtual Machine operation.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine.
// parameters - parameters supplied to the Capture Virtual Machine operation.
func (client VirtualMachinesClient) Capture(ctx context.Context, resourceGroupName string, VMName string, parameters VirtualMachineCaptureParameters) (result VirtualMachinesCaptureFuture, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
@ -95,15 +96,17 @@ func (client VirtualMachinesClient) CapturePreparer(ctx context.Context, resourc
// CaptureSender sends the Capture request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachinesClient) CaptureSender(req *http.Request) (future VirtualMachinesCaptureFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -122,8 +125,9 @@ func (client VirtualMachinesClient) CaptureResponder(resp *http.Response) (resul
// ConvertToManagedDisks converts virtual machine disks from blob-based to managed disks. Virtual machine must be
// stop-deallocated before invoking this operation.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine.
func (client VirtualMachinesClient) ConvertToManagedDisks(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesConvertToManagedDisksFuture, err error) {
req, err := client.ConvertToManagedDisksPreparer(ctx, resourceGroupName, VMName)
if err != nil {
@ -164,15 +168,17 @@ func (client VirtualMachinesClient) ConvertToManagedDisksPreparer(ctx context.Co
// ConvertToManagedDisksSender sends the ConvertToManagedDisks request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachinesClient) ConvertToManagedDisksSender(req *http.Request) (future VirtualMachinesConvertToManagedDisksFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -190,9 +196,10 @@ func (client VirtualMachinesClient) ConvertToManagedDisksResponder(resp *http.Re
}
// CreateOrUpdate the operation to create or update a virtual machine.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. parameters is
// parameters supplied to the Create Virtual Machine operation.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine.
// parameters - parameters supplied to the Create Virtual Machine operation.
func (client VirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters VirtualMachine) (result VirtualMachinesCreateOrUpdateFuture, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
@ -256,15 +263,17 @@ func (client VirtualMachinesClient) CreateOrUpdatePreparer(ctx context.Context,
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachinesClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachinesCreateOrUpdateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -283,8 +292,9 @@ func (client VirtualMachinesClient) CreateOrUpdateResponder(resp *http.Response)
// Deallocate shuts down the virtual machine and releases the compute resources. You are not billed for the compute
// resources that this virtual machine uses.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine.
func (client VirtualMachinesClient) Deallocate(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesDeallocateFuture, err error) {
req, err := client.DeallocatePreparer(ctx, resourceGroupName, VMName)
if err != nil {
@ -325,15 +335,17 @@ func (client VirtualMachinesClient) DeallocatePreparer(ctx context.Context, reso
// DeallocateSender sends the Deallocate request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachinesClient) DeallocateSender(req *http.Request) (future VirtualMachinesDeallocateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -351,8 +363,9 @@ func (client VirtualMachinesClient) DeallocateResponder(resp *http.Response) (re
}
// Delete the operation to delete a virtual machine.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine.
func (client VirtualMachinesClient) Delete(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesDeleteFuture, err error) {
req, err := client.DeletePreparer(ctx, resourceGroupName, VMName)
if err != nil {
@ -393,15 +406,17 @@ func (client VirtualMachinesClient) DeletePreparer(ctx context.Context, resource
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachinesClient) DeleteSender(req *http.Request) (future VirtualMachinesDeleteFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -419,8 +434,9 @@ func (client VirtualMachinesClient) DeleteResponder(resp *http.Response) (result
}
// Generalize sets the state of the virtual machine to generalized.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine.
func (client VirtualMachinesClient) Generalize(ctx context.Context, resourceGroupName string, VMName string) (result OperationStatusResponse, err error) {
req, err := client.GeneralizePreparer(ctx, resourceGroupName, VMName)
if err != nil {
@ -485,9 +501,10 @@ func (client VirtualMachinesClient) GeneralizeResponder(resp *http.Response) (re
}
// Get retrieves information about the model view or the instance view of a virtual machine.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. expand is the
// expand expression to apply on the operation.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine.
// expand - the expand expression to apply on the operation.
func (client VirtualMachinesClient) Get(ctx context.Context, resourceGroupName string, VMName string, expand InstanceViewTypes) (result VirtualMachine, err error) {
req, err := client.GetPreparer(ctx, resourceGroupName, VMName, expand)
if err != nil {
@ -554,9 +571,81 @@ func (client VirtualMachinesClient) GetResponder(resp *http.Response) (result Vi
return
}
// GetExtensions the operation to get all extensions of a Virtual Machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine containing the extension.
// expand - the expand expression to apply on the operation.
func (client VirtualMachinesClient) GetExtensions(ctx context.Context, resourceGroupName string, VMName string, expand string) (result VirtualMachineExtensionsListResult, err error) {
req, err := client.GetExtensionsPreparer(ctx, resourceGroupName, VMName, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "GetExtensions", nil, "Failure preparing request")
return
}
resp, err := client.GetExtensionsSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "GetExtensions", resp, "Failure sending request")
return
}
result, err = client.GetExtensionsResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "GetExtensions", resp, "Failure responding to request")
}
return
}
// GetExtensionsPreparer prepares the GetExtensions request.
func (client VirtualMachinesClient) GetExtensionsPreparer(ctx context.Context, resourceGroupName string, VMName string, expand string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vmName": autorest.Encode("path", VMName),
}
const APIVersion = "2017-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(expand) > 0 {
queryParameters["$expand"] = autorest.Encode("query", expand)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetExtensionsSender sends the GetExtensions request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachinesClient) GetExtensionsSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetExtensionsResponder handles the response to the GetExtensions request. The method always
// closes the http.Response Body.
func (client VirtualMachinesClient) GetExtensionsResponder(resp *http.Response) (result VirtualMachineExtensionsListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// InstanceView retrieves information about the run-time state of a virtual machine.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine.
func (client VirtualMachinesClient) InstanceView(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachineInstanceView, err error) {
req, err := client.InstanceViewPreparer(ctx, resourceGroupName, VMName)
if err != nil {
@ -622,8 +711,8 @@ func (client VirtualMachinesClient) InstanceViewResponder(resp *http.Response) (
// List lists all of the virtual machines in the specified resource group. Use the nextLink property in the response to
// get the next page of virtual machines.
//
// resourceGroupName is the name of the resource group.
// Parameters:
// resourceGroupName - the name of the resource group.
func (client VirtualMachinesClient) List(ctx context.Context, resourceGroupName string) (result VirtualMachineListResultPage, err error) {
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, resourceGroupName)
@ -806,8 +895,9 @@ func (client VirtualMachinesClient) ListAllComplete(ctx context.Context) (result
}
// ListAvailableSizes lists all available virtual machine sizes to which the specified virtual machine can be resized.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine.
func (client VirtualMachinesClient) ListAvailableSizes(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachineSizeListResult, err error) {
req, err := client.ListAvailableSizesPreparer(ctx, resourceGroupName, VMName)
if err != nil {
@ -872,8 +962,9 @@ func (client VirtualMachinesClient) ListAvailableSizesResponder(resp *http.Respo
}
// PerformMaintenance the operation to perform maintenance on a virtual machine.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine.
func (client VirtualMachinesClient) PerformMaintenance(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesPerformMaintenanceFuture, err error) {
req, err := client.PerformMaintenancePreparer(ctx, resourceGroupName, VMName)
if err != nil {
@ -914,15 +1005,17 @@ func (client VirtualMachinesClient) PerformMaintenancePreparer(ctx context.Conte
// PerformMaintenanceSender sends the PerformMaintenance request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachinesClient) PerformMaintenanceSender(req *http.Request) (future VirtualMachinesPerformMaintenanceFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -941,8 +1034,9 @@ func (client VirtualMachinesClient) PerformMaintenanceResponder(resp *http.Respo
// PowerOff the operation to power off (stop) a virtual machine. The virtual machine can be restarted with the same
// provisioned resources. You are still charged for this virtual machine.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine.
func (client VirtualMachinesClient) PowerOff(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesPowerOffFuture, err error) {
req, err := client.PowerOffPreparer(ctx, resourceGroupName, VMName)
if err != nil {
@ -983,15 +1077,17 @@ func (client VirtualMachinesClient) PowerOffPreparer(ctx context.Context, resour
// PowerOffSender sends the PowerOff request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachinesClient) PowerOffSender(req *http.Request) (future VirtualMachinesPowerOffFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -1009,8 +1105,9 @@ func (client VirtualMachinesClient) PowerOffResponder(resp *http.Response) (resu
}
// Redeploy the operation to redeploy a virtual machine.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine.
func (client VirtualMachinesClient) Redeploy(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesRedeployFuture, err error) {
req, err := client.RedeployPreparer(ctx, resourceGroupName, VMName)
if err != nil {
@ -1051,15 +1148,17 @@ func (client VirtualMachinesClient) RedeployPreparer(ctx context.Context, resour
// RedeploySender sends the Redeploy request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachinesClient) RedeploySender(req *http.Request) (future VirtualMachinesRedeployFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -1077,8 +1176,9 @@ func (client VirtualMachinesClient) RedeployResponder(resp *http.Response) (resu
}
// Restart the operation to restart a virtual machine.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine.
func (client VirtualMachinesClient) Restart(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesRestartFuture, err error) {
req, err := client.RestartPreparer(ctx, resourceGroupName, VMName)
if err != nil {
@ -1119,15 +1219,17 @@ func (client VirtualMachinesClient) RestartPreparer(ctx context.Context, resourc
// RestartSender sends the Restart request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachinesClient) RestartSender(req *http.Request) (future VirtualMachinesRestartFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -1145,9 +1247,10 @@ func (client VirtualMachinesClient) RestartResponder(resp *http.Response) (resul
}
// RunCommand run command on the VM.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. parameters is
// parameters supplied to the Run command operation.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine.
// parameters - parameters supplied to the Run command operation.
func (client VirtualMachinesClient) RunCommand(ctx context.Context, resourceGroupName string, VMName string, parameters RunCommandInput) (result VirtualMachinesRunCommandFuture, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
@ -1196,15 +1299,17 @@ func (client VirtualMachinesClient) RunCommandPreparer(ctx context.Context, reso
// RunCommandSender sends the RunCommand request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachinesClient) RunCommandSender(req *http.Request) (future VirtualMachinesRunCommandFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -1222,8 +1327,9 @@ func (client VirtualMachinesClient) RunCommandResponder(resp *http.Response) (re
}
// Start the operation to start a virtual machine.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine.
func (client VirtualMachinesClient) Start(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesStartFuture, err error) {
req, err := client.StartPreparer(ctx, resourceGroupName, VMName)
if err != nil {
@ -1264,15 +1370,17 @@ func (client VirtualMachinesClient) StartPreparer(ctx context.Context, resourceG
// StartSender sends the Start request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachinesClient) StartSender(req *http.Request) (future VirtualMachinesStartFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -1290,9 +1398,10 @@ func (client VirtualMachinesClient) StartResponder(resp *http.Response) (result
}
// Update the operation to update a virtual machine.
//
// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. parameters is
// parameters supplied to the Update Virtual Machine operation.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMName - the name of the virtual machine.
// parameters - parameters supplied to the Update Virtual Machine operation.
func (client VirtualMachinesClient) Update(ctx context.Context, resourceGroupName string, VMName string, parameters VirtualMachineUpdate) (result VirtualMachinesUpdateFuture, err error) {
req, err := client.UpdatePreparer(ctx, resourceGroupName, VMName, parameters)
if err != nil {
@ -1335,15 +1444,17 @@ func (client VirtualMachinesClient) UpdatePreparer(ctx context.Context, resource
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachinesClient) UpdateSender(req *http.Request) (future VirtualMachinesUpdateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}

View File

@ -41,10 +41,11 @@ func NewVirtualMachineScaleSetExtensionsClientWithBaseURI(baseURI string, subscr
}
// CreateOrUpdate the operation to create or update an extension.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set where the
// extension should be create or updated. vmssExtensionName is the name of the VM scale set extension.
// extensionParameters is parameters supplied to the Create VM scale set Extension operation.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set where the extension should be create or updated.
// vmssExtensionName - the name of the VM scale set extension.
// extensionParameters - parameters supplied to the Create VM scale set Extension operation.
func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtension) (result VirtualMachineScaleSetExtensionsCreateOrUpdateFuture, err error) {
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, VMScaleSetName, vmssExtensionName, extensionParameters)
if err != nil {
@ -88,15 +89,17 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdatePreparer(ctx
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineScaleSetExtensionsCreateOrUpdateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -114,9 +117,10 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdateResponder(res
}
// Delete the operation to delete the extension.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set where the
// extension should be deleted. vmssExtensionName is the name of the VM scale set extension.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set where the extension should be deleted.
// vmssExtensionName - the name of the VM scale set extension.
func (client VirtualMachineScaleSetExtensionsClient) Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string) (result VirtualMachineScaleSetExtensionsDeleteFuture, err error) {
req, err := client.DeletePreparer(ctx, resourceGroupName, VMScaleSetName, vmssExtensionName)
if err != nil {
@ -158,15 +162,17 @@ func (client VirtualMachineScaleSetExtensionsClient) DeletePreparer(ctx context.
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetExtensionsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetExtensionsDeleteFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -184,10 +190,11 @@ func (client VirtualMachineScaleSetExtensionsClient) DeleteResponder(resp *http.
}
// Get the operation to get the extension.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set containing
// the extension. vmssExtensionName is the name of the VM scale set extension. expand is the expand expression to
// apply on the operation.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set containing the extension.
// vmssExtensionName - the name of the VM scale set extension.
// expand - the expand expression to apply on the operation.
func (client VirtualMachineScaleSetExtensionsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, expand string) (result VirtualMachineScaleSetExtension, err error) {
req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName, vmssExtensionName, expand)
if err != nil {
@ -256,9 +263,9 @@ func (client VirtualMachineScaleSetExtensionsClient) GetResponder(resp *http.Res
}
// List gets a list of all extensions in a VM scale set.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set containing
// the extension.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set containing the extension.
func (client VirtualMachineScaleSetExtensionsClient) List(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetExtensionListResultPage, err error) {
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, resourceGroupName, VMScaleSetName)

View File

@ -42,8 +42,9 @@ func NewVirtualMachineScaleSetRollingUpgradesClientWithBaseURI(baseURI string, s
}
// Cancel cancels the current virtual machine scale set rolling upgrade.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
func (client VirtualMachineScaleSetRollingUpgradesClient) Cancel(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetRollingUpgradesCancelFuture, err error) {
req, err := client.CancelPreparer(ctx, resourceGroupName, VMScaleSetName)
if err != nil {
@ -84,15 +85,17 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) CancelPreparer(ctx con
// CancelSender sends the Cancel request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetRollingUpgradesClient) CancelSender(req *http.Request) (future VirtualMachineScaleSetRollingUpgradesCancelFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -110,8 +113,9 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) CancelResponder(resp *
}
// GetLatest gets the status of the latest virtual machine scale set rolling upgrade.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatest(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result RollingUpgradeStatusInfo, err error) {
req, err := client.GetLatestPreparer(ctx, resourceGroupName, VMScaleSetName)
if err != nil {
@ -177,8 +181,9 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestResponder(res
// StartOSUpgrade starts a rolling upgrade to move all virtual machine scale set instances to the latest available
// Platform Image OS version. Instances which are already running the latest available OS version are not affected.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgrade(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture, err error) {
req, err := client.StartOSUpgradePreparer(ctx, resourceGroupName, VMScaleSetName)
if err != nil {
@ -219,15 +224,17 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradePreparer
// StartOSUpgradeSender sends the StartOSUpgrade request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradeSender(req *http.Request) (future VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}

View File

@ -41,9 +41,10 @@ func NewVirtualMachineScaleSetsClientWithBaseURI(baseURI string, subscriptionID
}
// CreateOrUpdate create or update a VM scale set.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set to create or
// update. parameters is the scale set object.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set to create or update.
// parameters - the scale set object.
func (client VirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters VirtualMachineScaleSet) (result VirtualMachineScaleSetsCreateOrUpdateFuture, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
@ -51,15 +52,15 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context,
Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent", Name: validation.InclusiveMaximum, Rule: 100, Chain: nil},
Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil},
{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent", Name: validation.InclusiveMinimum, Rule: 5, Chain: nil},
}},
{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyInstancePercent", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyInstancePercent", Name: validation.InclusiveMaximum, Rule: 100, Chain: nil},
Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyInstancePercent", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil},
{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyInstancePercent", Name: validation.InclusiveMinimum, Rule: 5, Chain: nil},
}},
{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent", Name: validation.InclusiveMaximum, Rule: 100, Chain: nil},
Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil},
{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil},
}},
}},
@ -109,15 +110,17 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(ctx context.C
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineScaleSetsCreateOrUpdateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -136,9 +139,10 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdateResponder(resp *http.R
// Deallocate deallocates specific virtual machines in a VM scale set. Shuts down the virtual machines and releases the
// compute resources. You are not billed for the compute resources that this virtual machine scale set deallocates.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set.
// VMInstanceIDs is a list of virtual machine instance IDs from the VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
func (client VirtualMachineScaleSetsClient) Deallocate(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsDeallocateFuture, err error) {
req, err := client.DeallocatePreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs)
if err != nil {
@ -184,15 +188,17 @@ func (client VirtualMachineScaleSetsClient) DeallocatePreparer(ctx context.Conte
// DeallocateSender sends the Deallocate request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetsClient) DeallocateSender(req *http.Request) (future VirtualMachineScaleSetsDeallocateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -210,8 +216,9 @@ func (client VirtualMachineScaleSetsClient) DeallocateResponder(resp *http.Respo
}
// Delete deletes a VM scale set.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
func (client VirtualMachineScaleSetsClient) Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetsDeleteFuture, err error) {
req, err := client.DeletePreparer(ctx, resourceGroupName, VMScaleSetName)
if err != nil {
@ -252,15 +259,17 @@ func (client VirtualMachineScaleSetsClient) DeletePreparer(ctx context.Context,
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetsDeleteFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -278,9 +287,10 @@ func (client VirtualMachineScaleSetsClient) DeleteResponder(resp *http.Response)
}
// DeleteInstances deletes virtual machines in a VM scale set.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set.
// VMInstanceIDs is a list of virtual machine instance IDs from the VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
func (client VirtualMachineScaleSetsClient) DeleteInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs) (result VirtualMachineScaleSetsDeleteInstancesFuture, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: VMInstanceIDs,
@ -329,15 +339,17 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(ctx context.
// DeleteInstancesSender sends the DeleteInstances request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetsClient) DeleteInstancesSender(req *http.Request) (future VirtualMachineScaleSetsDeleteInstancesFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -356,9 +368,10 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesResponder(resp *http.
// ForceRecoveryServiceFabricPlatformUpdateDomainWalk manual platform update domain walk to update virtual machines in
// a service fabric virtual machine scale set.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set.
// platformUpdateDomain is the platform update domain for which a manual recovery walk is requested
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// platformUpdateDomain - the platform update domain for which a manual recovery walk is requested
func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUpdateDomainWalk(ctx context.Context, resourceGroupName string, VMScaleSetName string, platformUpdateDomain int32) (result RecoveryWalkResponse, err error) {
req, err := client.ForceRecoveryServiceFabricPlatformUpdateDomainWalkPreparer(ctx, resourceGroupName, VMScaleSetName, platformUpdateDomain)
if err != nil {
@ -424,8 +437,9 @@ func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUp
}
// Get display information about a virtual machine scale set.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
func (client VirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSet, err error) {
req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName)
if err != nil {
@ -490,8 +504,9 @@ func (client VirtualMachineScaleSetsClient) GetResponder(resp *http.Response) (r
}
// GetInstanceView gets the status of a VM scale set instance.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
func (client VirtualMachineScaleSetsClient) GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetInstanceView, err error) {
req, err := client.GetInstanceViewPreparer(ctx, resourceGroupName, VMScaleSetName)
if err != nil {
@ -555,9 +570,104 @@ func (client VirtualMachineScaleSetsClient) GetInstanceViewResponder(resp *http.
return
}
// GetOSUpgradeHistory gets list of OS upgrades on a VM scale set instance.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistory(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetListOSUpgradeHistoryPage, err error) {
result.fn = client.getOSUpgradeHistoryNextResults
req, err := client.GetOSUpgradeHistoryPreparer(ctx, resourceGroupName, VMScaleSetName)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetOSUpgradeHistory", nil, "Failure preparing request")
return
}
resp, err := client.GetOSUpgradeHistorySender(req)
if err != nil {
result.vmsslouh.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetOSUpgradeHistory", resp, "Failure sending request")
return
}
result.vmsslouh, err = client.GetOSUpgradeHistoryResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetOSUpgradeHistory", resp, "Failure responding to request")
}
return
}
// GetOSUpgradeHistoryPreparer prepares the GetOSUpgradeHistory request.
func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistoryPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
const APIVersion = "2017-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osUpgradeHistory", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetOSUpgradeHistorySender sends the GetOSUpgradeHistory request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistorySender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetOSUpgradeHistoryResponder handles the response to the GetOSUpgradeHistory request. The method always
// closes the http.Response Body.
func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistoryResponder(resp *http.Response) (result VirtualMachineScaleSetListOSUpgradeHistory, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// getOSUpgradeHistoryNextResults retrieves the next set of results, if any.
func (client VirtualMachineScaleSetsClient) getOSUpgradeHistoryNextResults(lastResults VirtualMachineScaleSetListOSUpgradeHistory) (result VirtualMachineScaleSetListOSUpgradeHistory, err error) {
req, err := lastResults.virtualMachineScaleSetListOSUpgradeHistoryPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "getOSUpgradeHistoryNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.GetOSUpgradeHistorySender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "getOSUpgradeHistoryNextResults", resp, "Failure sending next results request")
}
result, err = client.GetOSUpgradeHistoryResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "getOSUpgradeHistoryNextResults", resp, "Failure responding to next results request")
}
return
}
// GetOSUpgradeHistoryComplete enumerates all values, automatically crossing page boundaries as required.
func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistoryComplete(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetListOSUpgradeHistoryIterator, err error) {
result.page, err = client.GetOSUpgradeHistory(ctx, resourceGroupName, VMScaleSetName)
return
}
// List gets a list of all VM scale sets under a resource group.
//
// resourceGroupName is the name of the resource group.
// Parameters:
// resourceGroupName - the name of the resource group.
func (client VirtualMachineScaleSetsClient) List(ctx context.Context, resourceGroupName string) (result VirtualMachineScaleSetListResultPage, err error) {
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, resourceGroupName)
@ -742,8 +852,9 @@ func (client VirtualMachineScaleSetsClient) ListAllComplete(ctx context.Context)
// ListSkus gets a list of SKUs available for your VM scale set, including the minimum and maximum VM instances allowed
// for each SKU.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
func (client VirtualMachineScaleSetsClient) ListSkus(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetListSkusResultPage, err error) {
result.fn = client.listSkusNextResults
req, err := client.ListSkusPreparer(ctx, resourceGroupName, VMScaleSetName)
@ -835,11 +946,91 @@ func (client VirtualMachineScaleSetsClient) ListSkusComplete(ctx context.Context
return
}
// PerformMaintenance perform maintenance on one or more virtual machines in a VM scale set. Operation on instances
// which are not eligible for perform maintenance will be failed. Please refer to best practices for more details:
// https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-maintenance-notifications
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
func (client VirtualMachineScaleSetsClient) PerformMaintenance(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsPerformMaintenanceFuture, err error) {
req, err := client.PerformMaintenancePreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PerformMaintenance", nil, "Failure preparing request")
return
}
result, err = client.PerformMaintenanceSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PerformMaintenance", result.Response(), "Failure sending request")
return
}
return
}
// PerformMaintenancePreparer prepares the PerformMaintenance request.
func (client VirtualMachineScaleSetsClient) PerformMaintenancePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
const APIVersion = "2017-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/performMaintenance", pathParameters),
autorest.WithQueryParameters(queryParameters))
if VMInstanceIDs != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithJSON(VMInstanceIDs))
}
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// PerformMaintenanceSender sends the PerformMaintenance request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetsClient) PerformMaintenanceSender(req *http.Request) (future VirtualMachineScaleSetsPerformMaintenanceFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// PerformMaintenanceResponder handles the response to the PerformMaintenance request. The method always
// closes the http.Response Body.
func (client VirtualMachineScaleSetsClient) PerformMaintenanceResponder(resp *http.Response) (result OperationStatusResponse, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// PowerOff power off (stop) one or more virtual machines in a VM scale set. Note that resources are still attached and
// you are getting charged for the resources. Instead, use deallocate to release resources and avoid charges.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set.
// VMInstanceIDs is a list of virtual machine instance IDs from the VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
func (client VirtualMachineScaleSetsClient) PowerOff(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsPowerOffFuture, err error) {
req, err := client.PowerOffPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs)
if err != nil {
@ -885,15 +1076,17 @@ func (client VirtualMachineScaleSetsClient) PowerOffPreparer(ctx context.Context
// PowerOffSender sends the PowerOff request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetsClient) PowerOffSender(req *http.Request) (future VirtualMachineScaleSetsPowerOffFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -910,10 +1103,88 @@ func (client VirtualMachineScaleSetsClient) PowerOffResponder(resp *http.Respons
return
}
// Redeploy redeploy one or more virtual machines in a VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
func (client VirtualMachineScaleSetsClient) Redeploy(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsRedeployFuture, err error) {
req, err := client.RedeployPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Redeploy", nil, "Failure preparing request")
return
}
result, err = client.RedeploySender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Redeploy", result.Response(), "Failure sending request")
return
}
return
}
// RedeployPreparer prepares the Redeploy request.
func (client VirtualMachineScaleSetsClient) RedeployPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
const APIVersion = "2017-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/redeploy", pathParameters),
autorest.WithQueryParameters(queryParameters))
if VMInstanceIDs != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithJSON(VMInstanceIDs))
}
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// RedeploySender sends the Redeploy request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetsClient) RedeploySender(req *http.Request) (future VirtualMachineScaleSetsRedeployFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// RedeployResponder handles the response to the Redeploy request. The method always
// closes the http.Response Body.
func (client VirtualMachineScaleSetsClient) RedeployResponder(resp *http.Response) (result OperationStatusResponse, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Reimage reimages (upgrade the operating system) one or more virtual machines in a VM scale set.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set.
// VMInstanceIDs is a list of virtual machine instance IDs from the VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
func (client VirtualMachineScaleSetsClient) Reimage(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsReimageFuture, err error) {
req, err := client.ReimagePreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs)
if err != nil {
@ -959,15 +1230,17 @@ func (client VirtualMachineScaleSetsClient) ReimagePreparer(ctx context.Context,
// ReimageSender sends the Reimage request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetsClient) ReimageSender(req *http.Request) (future VirtualMachineScaleSetsReimageFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -986,9 +1259,10 @@ func (client VirtualMachineScaleSetsClient) ReimageResponder(resp *http.Response
// ReimageAll reimages all the disks ( including data disks ) in the virtual machines in a VM scale set. This operation
// is only supported for managed disks.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set.
// VMInstanceIDs is a list of virtual machine instance IDs from the VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
func (client VirtualMachineScaleSetsClient) ReimageAll(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsReimageAllFuture, err error) {
req, err := client.ReimageAllPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs)
if err != nil {
@ -1034,15 +1308,17 @@ func (client VirtualMachineScaleSetsClient) ReimageAllPreparer(ctx context.Conte
// ReimageAllSender sends the ReimageAll request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetsClient) ReimageAllSender(req *http.Request) (future VirtualMachineScaleSetsReimageAllFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -1060,9 +1336,10 @@ func (client VirtualMachineScaleSetsClient) ReimageAllResponder(resp *http.Respo
}
// Restart restarts one or more virtual machines in a VM scale set.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set.
// VMInstanceIDs is a list of virtual machine instance IDs from the VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
func (client VirtualMachineScaleSetsClient) Restart(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsRestartFuture, err error) {
req, err := client.RestartPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs)
if err != nil {
@ -1108,15 +1385,17 @@ func (client VirtualMachineScaleSetsClient) RestartPreparer(ctx context.Context,
// RestartSender sends the Restart request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetsClient) RestartSender(req *http.Request) (future VirtualMachineScaleSetsRestartFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -1134,9 +1413,10 @@ func (client VirtualMachineScaleSetsClient) RestartResponder(resp *http.Response
}
// Start starts one or more virtual machines in a VM scale set.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set.
// VMInstanceIDs is a list of virtual machine instance IDs from the VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
func (client VirtualMachineScaleSetsClient) Start(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsStartFuture, err error) {
req, err := client.StartPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs)
if err != nil {
@ -1182,15 +1462,17 @@ func (client VirtualMachineScaleSetsClient) StartPreparer(ctx context.Context, r
// StartSender sends the Start request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetsClient) StartSender(req *http.Request) (future VirtualMachineScaleSetsStartFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -1208,9 +1490,10 @@ func (client VirtualMachineScaleSetsClient) StartResponder(resp *http.Response)
}
// Update update a VM scale set.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set to create or
// update. parameters is the scale set object.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set to create or update.
// parameters - the scale set object.
func (client VirtualMachineScaleSetsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters VirtualMachineScaleSetUpdate) (result VirtualMachineScaleSetsUpdateFuture, err error) {
req, err := client.UpdatePreparer(ctx, resourceGroupName, VMScaleSetName, parameters)
if err != nil {
@ -1253,15 +1536,17 @@ func (client VirtualMachineScaleSetsClient) UpdatePreparer(ctx context.Context,
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetsClient) UpdateSender(req *http.Request) (future VirtualMachineScaleSetsUpdateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -1279,9 +1564,10 @@ func (client VirtualMachineScaleSetsClient) UpdateResponder(resp *http.Response)
}
// UpdateInstances upgrades one or more virtual machines to the latest SKU set in the VM scale set model.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set.
// VMInstanceIDs is a list of virtual machine instance IDs from the VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set.
func (client VirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs) (result VirtualMachineScaleSetsUpdateInstancesFuture, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: VMInstanceIDs,
@ -1330,15 +1616,17 @@ func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(ctx context.
// UpdateInstancesSender sends the UpdateInstances request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetsClient) UpdateInstancesSender(req *http.Request) (future VirtualMachineScaleSetsUpdateInstancesFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}

View File

@ -43,9 +43,10 @@ func NewVirtualMachineScaleSetVMsClientWithBaseURI(baseURI string, subscriptionI
// Deallocate deallocates a specific virtual machine in a VM scale set. Shuts down the virtual machine and releases the
// compute resources it uses. You are not billed for the compute resources of this virtual machine once it is
// deallocated.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID
// is the instance ID of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// instanceID - the instance ID of the virtual machine.
func (client VirtualMachineScaleSetVMsClient) Deallocate(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsDeallocateFuture, err error) {
req, err := client.DeallocatePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
if err != nil {
@ -87,15 +88,17 @@ func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(ctx context.Con
// DeallocateSender sends the Deallocate request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetVMsClient) DeallocateSender(req *http.Request) (future VirtualMachineScaleSetVMsDeallocateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -113,9 +116,10 @@ func (client VirtualMachineScaleSetVMsClient) DeallocateResponder(resp *http.Res
}
// Delete deletes a virtual machine from a VM scale set.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID
// is the instance ID of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// instanceID - the instance ID of the virtual machine.
func (client VirtualMachineScaleSetVMsClient) Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsDeleteFuture, err error) {
req, err := client.DeletePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
if err != nil {
@ -157,15 +161,17 @@ func (client VirtualMachineScaleSetVMsClient) DeletePreparer(ctx context.Context
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetVMsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetVMsDeleteFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -183,9 +189,10 @@ func (client VirtualMachineScaleSetVMsClient) DeleteResponder(resp *http.Respons
}
// Get gets a virtual machine from a VM scale set.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID
// is the instance ID of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// instanceID - the instance ID of the virtual machine.
func (client VirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVM, err error) {
req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
if err != nil {
@ -251,9 +258,10 @@ func (client VirtualMachineScaleSetVMsClient) GetResponder(resp *http.Response)
}
// GetInstanceView gets the status of a virtual machine from a VM scale set.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID
// is the instance ID of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// instanceID - the instance ID of the virtual machine.
func (client VirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMInstanceView, err error) {
req, err := client.GetInstanceViewPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
if err != nil {
@ -319,10 +327,12 @@ func (client VirtualMachineScaleSetVMsClient) GetInstanceViewResponder(resp *htt
}
// List gets a list of all virtual machines in a VM scale sets.
//
// resourceGroupName is the name of the resource group. virtualMachineScaleSetName is the name of the VM scale set.
// filter is the filter to apply to the operation. selectParameter is the list parameters. expand is the expand
// expression to apply to the operation.
// Parameters:
// resourceGroupName - the name of the resource group.
// virtualMachineScaleSetName - the name of the VM scale set.
// filter - the filter to apply to the operation.
// selectParameter - the list parameters.
// expand - the expand expression to apply to the operation.
func (client VirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result VirtualMachineScaleSetVMListResultPage, err error) {
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand)
@ -423,11 +433,85 @@ func (client VirtualMachineScaleSetVMsClient) ListComplete(ctx context.Context,
return
}
// PerformMaintenance performs maintenance on a virtual machine in a VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// instanceID - the instance ID of the virtual machine.
func (client VirtualMachineScaleSetVMsClient) PerformMaintenance(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsPerformMaintenanceFuture, err error) {
req, err := client.PerformMaintenancePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PerformMaintenance", nil, "Failure preparing request")
return
}
result, err = client.PerformMaintenanceSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PerformMaintenance", result.Response(), "Failure sending request")
return
}
return
}
// PerformMaintenancePreparer prepares the PerformMaintenance request.
func (client VirtualMachineScaleSetVMsClient) PerformMaintenancePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"instanceId": autorest.Encode("path", instanceID),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
const APIVersion = "2017-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/performMaintenance", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// PerformMaintenanceSender sends the PerformMaintenance request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetVMsClient) PerformMaintenanceSender(req *http.Request) (future VirtualMachineScaleSetVMsPerformMaintenanceFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// PerformMaintenanceResponder handles the response to the PerformMaintenance request. The method always
// closes the http.Response Body.
func (client VirtualMachineScaleSetVMsClient) PerformMaintenanceResponder(resp *http.Response) (result OperationStatusResponse, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// PowerOff power off (stop) a virtual machine in a VM scale set. Note that resources are still attached and you are
// getting charged for the resources. Instead, use deallocate to release resources and avoid charges.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID
// is the instance ID of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// instanceID - the instance ID of the virtual machine.
func (client VirtualMachineScaleSetVMsClient) PowerOff(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsPowerOffFuture, err error) {
req, err := client.PowerOffPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
if err != nil {
@ -469,15 +553,17 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(ctx context.Conte
// PowerOffSender sends the PowerOff request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetVMsClient) PowerOffSender(req *http.Request) (future VirtualMachineScaleSetVMsPowerOffFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -494,10 +580,84 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffResponder(resp *http.Respo
return
}
// Redeploy redeploys a virtual machine in a VM scale set.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// instanceID - the instance ID of the virtual machine.
func (client VirtualMachineScaleSetVMsClient) Redeploy(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsRedeployFuture, err error) {
req, err := client.RedeployPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Redeploy", nil, "Failure preparing request")
return
}
result, err = client.RedeploySender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Redeploy", result.Response(), "Failure sending request")
return
}
return
}
// RedeployPreparer prepares the Redeploy request.
func (client VirtualMachineScaleSetVMsClient) RedeployPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"instanceId": autorest.Encode("path", instanceID),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vmScaleSetName": autorest.Encode("path", VMScaleSetName),
}
const APIVersion = "2017-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/redeploy", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// RedeploySender sends the Redeploy request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetVMsClient) RedeploySender(req *http.Request) (future VirtualMachineScaleSetVMsRedeployFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// RedeployResponder handles the response to the Redeploy request. The method always
// closes the http.Response Body.
func (client VirtualMachineScaleSetVMsClient) RedeployResponder(resp *http.Response) (result OperationStatusResponse, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Reimage reimages (upgrade the operating system) a specific virtual machine in a VM scale set.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID
// is the instance ID of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// instanceID - the instance ID of the virtual machine.
func (client VirtualMachineScaleSetVMsClient) Reimage(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsReimageFuture, err error) {
req, err := client.ReimagePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
if err != nil {
@ -539,15 +699,17 @@ func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(ctx context.Contex
// ReimageSender sends the Reimage request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetVMsClient) ReimageSender(req *http.Request) (future VirtualMachineScaleSetVMsReimageFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -566,9 +728,10 @@ func (client VirtualMachineScaleSetVMsClient) ReimageResponder(resp *http.Respon
// ReimageAll allows you to re-image all the disks ( including data disks ) in the a VM scale set instance. This
// operation is only supported for managed disks.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID
// is the instance ID of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// instanceID - the instance ID of the virtual machine.
func (client VirtualMachineScaleSetVMsClient) ReimageAll(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsReimageAllFuture, err error) {
req, err := client.ReimageAllPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
if err != nil {
@ -610,15 +773,17 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllPreparer(ctx context.Con
// ReimageAllSender sends the ReimageAll request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetVMsClient) ReimageAllSender(req *http.Request) (future VirtualMachineScaleSetVMsReimageAllFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -636,9 +801,10 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllResponder(resp *http.Res
}
// Restart restarts a virtual machine in a VM scale set.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID
// is the instance ID of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// instanceID - the instance ID of the virtual machine.
func (client VirtualMachineScaleSetVMsClient) Restart(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsRestartFuture, err error) {
req, err := client.RestartPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
if err != nil {
@ -680,15 +846,17 @@ func (client VirtualMachineScaleSetVMsClient) RestartPreparer(ctx context.Contex
// RestartSender sends the Restart request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetVMsClient) RestartSender(req *http.Request) (future VirtualMachineScaleSetVMsRestartFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -706,9 +874,10 @@ func (client VirtualMachineScaleSetVMsClient) RestartResponder(resp *http.Respon
}
// Start starts a virtual machine in a VM scale set.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID
// is the instance ID of the virtual machine.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set.
// instanceID - the instance ID of the virtual machine.
func (client VirtualMachineScaleSetVMsClient) Start(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsStartFuture, err error) {
req, err := client.StartPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID)
if err != nil {
@ -750,15 +919,17 @@ func (client VirtualMachineScaleSetVMsClient) StartPreparer(ctx context.Context,
// StartSender sends the Start request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetVMsClient) StartSender(req *http.Request) (future VirtualMachineScaleSetVMsStartFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@ -776,10 +947,11 @@ func (client VirtualMachineScaleSetVMsClient) StartResponder(resp *http.Response
}
// Update updates a virtual machine of a VM scale set.
//
// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set where the
// extension should be create or updated. instanceID is the instance ID of the virtual machine. parameters is
// parameters supplied to the Update Virtual Machine Scale Sets VM operation.
// Parameters:
// resourceGroupName - the name of the resource group.
// VMScaleSetName - the name of the VM scale set where the extension should be create or updated.
// instanceID - the instance ID of the virtual machine.
// parameters - parameters supplied to the Update Virtual Machine Scale Sets VM operation.
func (client VirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters VirtualMachineScaleSetVM) (result VirtualMachineScaleSetVMsUpdateFuture, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
@ -844,15 +1016,17 @@ func (client VirtualMachineScaleSetVMsClient) UpdatePreparer(ctx context.Context
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualMachineScaleSetVMsClient) UpdateSender(req *http.Request) (future VirtualMachineScaleSetVMsUpdateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}

View File

@ -41,8 +41,8 @@ func NewVirtualMachineSizesClientWithBaseURI(baseURI string, subscriptionID stri
}
// List lists all available virtual machine sizes for a subscription in a location.
//
// location is the location upon which virtual-machine-sizes is queried.
// Parameters:
// location - the location upon which virtual-machine-sizes is queried.
func (client VirtualMachineSizesClient) List(ctx context.Context, location string) (result VirtualMachineSizeListResult, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: location,

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,176 @@
package graphrbac
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"net/http"
)
// OAuth2Client is the the Graph RBAC Management Client
type OAuth2Client struct {
BaseClient
}
// NewOAuth2Client creates an instance of the OAuth2Client client.
func NewOAuth2Client(tenantID string) OAuth2Client {
return NewOAuth2ClientWithBaseURI(DefaultBaseURI, tenantID)
}
// NewOAuth2ClientWithBaseURI creates an instance of the OAuth2Client client.
func NewOAuth2ClientWithBaseURI(baseURI string, tenantID string) OAuth2Client {
return OAuth2Client{NewWithBaseURI(baseURI, tenantID)}
}
// Get queries OAuth2 permissions for the relevant SP ObjectId of an app.
// Parameters:
// filter - this is the Service Principal ObjectId associated with the app
func (client OAuth2Client) Get(ctx context.Context, filter string) (result Permissions, err error) {
req, err := client.GetPreparer(ctx, filter)
if err != nil {
err = autorest.NewErrorWithError(err, "graphrbac.OAuth2Client", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "graphrbac.OAuth2Client", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "graphrbac.OAuth2Client", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client OAuth2Client) GetPreparer(ctx context.Context, filter string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"tenantID": autorest.Encode("path", client.TenantID),
}
const APIVersion = "1.6"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(filter) > 0 {
queryParameters["$filter"] = autorest.Encode("query", filter)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/{tenantID}/oauth2PermissionGrants", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client OAuth2Client) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client OAuth2Client) GetResponder(resp *http.Response) (result Permissions, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Post grants OAuth2 permissions for the relevant resource Ids of an app.
// Parameters:
// body - the relevant app Service Principal Object Id and the Service Principal Objecit Id you want to grant.
func (client OAuth2Client) Post(ctx context.Context, body *Permissions) (result Permissions, err error) {
req, err := client.PostPreparer(ctx, body)
if err != nil {
err = autorest.NewErrorWithError(err, "graphrbac.OAuth2Client", "Post", nil, "Failure preparing request")
return
}
resp, err := client.PostSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "graphrbac.OAuth2Client", "Post", resp, "Failure sending request")
return
}
result, err = client.PostResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "graphrbac.OAuth2Client", "Post", resp, "Failure responding to request")
}
return
}
// PostPreparer prepares the Post request.
func (client OAuth2Client) PostPreparer(ctx context.Context, body *Permissions) (*http.Request, error) {
pathParameters := map[string]interface{}{
"tenantID": autorest.Encode("path", client.TenantID),
}
const APIVersion = "1.6"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/{tenantID}/oauth2PermissionGrants", pathParameters),
autorest.WithQueryParameters(queryParameters))
if body != nil {
preparer = autorest.DecoratePreparer(preparer,
autorest.WithJSON(body))
}
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// PostSender sends the Post request. The method will close the
// http.Response Body if it receives an error.
func (client OAuth2Client) PostSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// PostResponder handles the response to the Post request. The method always
// closes the http.Response Body.
func (client OAuth2Client) PostResponder(resp *http.Response) (result Permissions, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}

View File

@ -178,7 +178,7 @@ func (client ServicePrincipalsClient) DeleteResponder(resp *http.Response) (resu
return
}
// Get gets service principal information from the directory.
// Get gets service principal information from the directory. Query by objectId or pass a filter to query by appId
// Parameters:
// objectID - the object ID of the service principal to get.
func (client ServicePrincipalsClient) Get(ctx context.Context, objectID string) (result ServicePrincipal, err error) {

View File

@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"net/http"
)
@ -52,6 +53,15 @@ func NewRoleAssignmentsClientWithBaseURI(baseURI string, subscriptionID string)
// roleAssignmentName - the name of the role assignment to create. It can be any valid GUID.
// parameters - parameters for the role assignment.
func (client RoleAssignmentsClient) Create(ctx context.Context, scope string, roleAssignmentName string, parameters RoleAssignmentCreateParameters) (result RoleAssignment, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.RoleAssignmentProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "parameters.RoleAssignmentProperties.RoleDefinitionID", Name: validation.Null, Rule: true, Chain: nil},
{Target: "parameters.RoleAssignmentProperties.PrincipalID", Name: validation.Null, Rule: true, Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("authorization.RoleAssignmentsClient", "Create", err.Error())
}
req, err := client.CreatePreparer(ctx, scope, roleAssignmentName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "Create", nil, "Failure preparing request")
@ -120,6 +130,15 @@ func (client RoleAssignmentsClient) CreateResponder(resp *http.Response) (result
// roleID - the ID of the role assignment to create.
// parameters - parameters for the role assignment.
func (client RoleAssignmentsClient) CreateByID(ctx context.Context, roleID string, parameters RoleAssignmentCreateParameters) (result RoleAssignment, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.RoleAssignmentProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "parameters.RoleAssignmentProperties.RoleDefinitionID", Name: validation.Null, Rule: true, Chain: nil},
{Target: "parameters.RoleAssignmentProperties.PrincipalID", Name: validation.Null, Rule: true, Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("authorization.RoleAssignmentsClient", "CreateByID", err.Error())
}
req, err := client.CreateByIDPreparer(ctx, roleID, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "CreateByID", nil, "Failure preparing request")

View File

@ -18,4 +18,4 @@ package version
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// Number contains the semantic version of this SDK.
const Number = "v18.0.0"
const Number = "v21.1.0"

View File

@ -38,7 +38,7 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
return sf(r)
}
// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the
// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the
// http.Request and pass it along or, first, pass the http.Request along then react to the
// http.Response result.
type SendDecorator func(Sender) Sender

View File

@ -29,12 +29,12 @@ import (
"net"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/version"
"github.com/dgrijalva/jwt-go"
)
@ -96,18 +96,27 @@ type RefresherWithContext interface {
type TokenRefreshCallback func(Token) error
// Token encapsulates the access token used to authorize Azure requests.
// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response
type Token struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn string `json:"expires_in"`
ExpiresOn string `json:"expires_on"`
NotBefore string `json:"not_before"`
ExpiresIn json.Number `json:"expires_in"`
ExpiresOn json.Number `json:"expires_on"`
NotBefore json.Number `json:"not_before"`
Resource string `json:"resource"`
Type string `json:"token_type"`
}
func newToken() Token {
return Token{
ExpiresIn: "0",
ExpiresOn: "0",
NotBefore: "0",
}
}
// IsZero returns true if the token object is zero-initialized.
func (t Token) IsZero() bool {
return t == Token{}
@ -115,12 +124,12 @@ func (t Token) IsZero() bool {
// Expires returns the time.Time when the Token expires.
func (t Token) Expires() time.Time {
s, err := strconv.Atoi(t.ExpiresOn)
s, err := t.ExpiresOn.Float64()
if err != nil {
s = -3600
}
expiration := date.NewUnixTimeFromSeconds(float64(s))
expiration := date.NewUnixTimeFromSeconds(s)
return time.Time(expiration).UTC()
}
@ -413,6 +422,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso
}
spt := &ServicePrincipalToken{
inner: servicePrincipalToken{
Token: newToken(),
OauthConfig: oauthConfig,
Secret: secret,
ClientID: id,
@ -652,6 +662,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
spt := &ServicePrincipalToken{
inner: servicePrincipalToken{
Token: newToken(),
OauthConfig: OAuthConfig{
TokenEndpoint: *msiEndpointURL,
},
@ -778,6 +789,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
if err != nil {
return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err)
}
req.Header.Add("User-Agent", version.UserAgent())
req = req.WithContext(ctx)
if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
v := url.Values{}

View File

@ -119,7 +119,10 @@ func (f *Future) Done(sender autorest.Sender) (bool, error) {
if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil {
return false, err
}
if err := f.pt.updateHeaders(); err != nil {
if err := f.pt.initPollingMethod(); err != nil {
return false, err
}
if err := f.pt.updatePollingMethod(); err != nil {
return false, err
}
return f.pt.hasTerminated(), f.pt.pollingError()
@ -165,8 +168,12 @@ func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) e
// polling duration has been exceeded. It will retry failed polling attempts based on
// the retry value defined in the client up to the maximum retry attempts.
func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) error {
ctx, cancel := context.WithTimeout(ctx, client.PollingDuration)
defer cancel()
if d := client.PollingDuration; d != 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, d)
defer cancel()
}
done, err := f.Done(client)
for attempts := 0; !done; done, err = f.Done(client) {
if attempts >= client.RetryAttempts {
@ -264,7 +271,7 @@ type pollingTracker interface {
// these methods can differ per tracker
// checks the response headers and status code to determine the polling mechanism
updateHeaders() error
updatePollingMethod() error
// checks the response for tracker-specific error conditions
checkForErrors() error
@ -274,6 +281,10 @@ type pollingTracker interface {
// methods common to all trackers
// initializes a tracker's polling URL and method, called for each iteration.
// these values can be overridden by each polling tracker as required.
initPollingMethod() error
// initializes the tracker's internal state, call this when the tracker is created
initializeState() error
@ -348,6 +359,10 @@ func (pt *pollingTrackerBase) initializeState() error {
case http.StatusOK:
if ps := pt.getProvisioningState(); ps != nil {
pt.State = *ps
if pt.hasFailed() {
pt.updateErrorFromResponse()
return pt.pollingError()
}
} else {
pt.State = operationSucceeded
}
@ -364,8 +379,9 @@ func (pt *pollingTrackerBase) initializeState() error {
default:
pt.State = operationFailed
pt.updateErrorFromResponse()
return pt.pollingError()
}
return nil
return pt.initPollingMethod()
}
func (pt pollingTrackerBase) getProvisioningState() *string {
@ -416,12 +432,14 @@ func (pt *pollingTrackerBase) pollForStatus(sender autorest.Sender) error {
} else {
// check response body for error content
pt.updateErrorFromResponse()
err = pt.pollingError()
}
return err
}
// attempts to unmarshal a ServiceError type from the response body.
// if that fails then make a best attempt at creating something meaningful.
// NOTE: this assumes that the async operation has failed.
func (pt *pollingTrackerBase) updateErrorFromResponse() {
var err error
if pt.resp.ContentLength != 0 {
@ -431,8 +449,7 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() {
re := respErr{}
defer pt.resp.Body.Close()
var b []byte
b, err = ioutil.ReadAll(pt.resp.Body)
if err != nil {
if b, err = ioutil.ReadAll(pt.resp.Body); err != nil {
goto Default
}
if err = json.Unmarshal(b, &re); err != nil {
@ -445,20 +462,29 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() {
goto Default
}
}
if re.ServiceError != nil {
// the unmarshaller will ensure re.ServiceError is non-nil
// even if there was no content unmarshalled so check the code.
if re.ServiceError.Code != "" {
pt.Err = re.ServiceError
return
}
}
Default:
se := &ServiceError{
Code: fmt.Sprintf("HTTP status code %v", pt.resp.StatusCode),
Message: pt.resp.Status,
Code: pt.pollingStatus(),
Message: "The async operation failed.",
}
if err != nil {
se.InnerError = make(map[string]interface{})
se.InnerError["unmarshalError"] = err.Error()
}
// stick the response body into the error object in hopes
// it contains something useful to help diagnose the failure.
if len(pt.rawBody) > 0 {
se.AdditionalInfo = []map[string]interface{}{
pt.rawBody,
}
}
pt.Err = se
}
@ -538,13 +564,33 @@ func (pt pollingTrackerBase) baseCheckForErrors() error {
return nil
}
// default initialization of polling URL/method. each verb tracker will update this as required.
func (pt *pollingTrackerBase) initPollingMethod() error {
if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil {
return err
} else if ao != "" {
pt.URI = ao
pt.Pm = PollingAsyncOperation
return nil
}
if lh, err := getURLFromLocationHeader(pt.resp); err != nil {
return err
} else if lh != "" {
pt.URI = lh
pt.Pm = PollingLocation
return nil
}
// it's ok if we didn't find a polling header, this will be handled elsewhere
return nil
}
// DELETE
type pollingTrackerDelete struct {
pollingTrackerBase
}
func (pt *pollingTrackerDelete) updateHeaders() error {
func (pt *pollingTrackerDelete) updatePollingMethod() error {
// for 201 the Location header is required
if pt.resp.StatusCode == http.StatusCreated {
if lh, err := getURLFromLocationHeader(pt.resp); err != nil {
@ -600,7 +646,7 @@ type pollingTrackerPatch struct {
pollingTrackerBase
}
func (pt *pollingTrackerPatch) updateHeaders() error {
func (pt *pollingTrackerPatch) updatePollingMethod() error {
// by default we can use the original URL for polling and final GET
if pt.URI == "" {
pt.URI = pt.resp.Request.URL.String()
@ -658,7 +704,7 @@ type pollingTrackerPost struct {
pollingTrackerBase
}
func (pt *pollingTrackerPost) updateHeaders() error {
func (pt *pollingTrackerPost) updatePollingMethod() error {
// 201 requires Location header
if pt.resp.StatusCode == http.StatusCreated {
if lh, err := getURLFromLocationHeader(pt.resp); err != nil {
@ -714,7 +760,7 @@ type pollingTrackerPut struct {
pollingTrackerBase
}
func (pt *pollingTrackerPut) updateHeaders() error {
func (pt *pollingTrackerPut) updatePollingMethod() error {
// by default we can use the original URL for polling and final GET
if pt.URI == "" {
pt.URI = pt.resp.Request.URL.String()
@ -808,7 +854,7 @@ func createPollingTracker(resp *http.Response) (pollingTracker, error) {
// this initializes the polling header values, we do this during creation in case the
// initial response send us invalid values; this way the API call will return a non-nil
// error (not doing this means the error shows up in Future.Done)
return pt, pt.updateHeaders()
return pt, pt.updatePollingMethod()
}
// gets the polling URL from the Azure-AsyncOperation header.

View File

@ -135,29 +135,21 @@ func (settings settings) getAuthorizer() (autorest.Authorizer, error) {
// NewAuthorizerFromFile creates an Authorizer configured from a configuration file.
func NewAuthorizerFromFile(baseURI string) (autorest.Authorizer, error) {
fileLocation := os.Getenv("AZURE_AUTH_LOCATION")
if fileLocation == "" {
return nil, errors.New("auth file not found. Environment variable AZURE_AUTH_LOCATION is not set")
}
contents, err := ioutil.ReadFile(fileLocation)
file, err := getAuthFile()
if err != nil {
return nil, err
}
// Auth file might be encoded
decoded, err := decode(contents)
resource, err := getResourceForToken(*file, baseURI)
if err != nil {
return nil, err
}
return NewAuthorizerFromFileWithResource(resource)
}
file := file{}
err = json.Unmarshal(decoded, &file)
if err != nil {
return nil, err
}
resource, err := getResourceForToken(file, baseURI)
// NewAuthorizerFromFileWithResource creates an Authorizer configured from a configuration file.
func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, error) {
file, err := getAuthFile()
if err != nil {
return nil, err
}
@ -175,6 +167,32 @@ func NewAuthorizerFromFile(baseURI string) (autorest.Authorizer, error) {
return autorest.NewBearerAuthorizer(spToken), nil
}
func getAuthFile() (*file, error) {
fileLocation := os.Getenv("AZURE_AUTH_LOCATION")
if fileLocation == "" {
return nil, errors.New("environment variable AZURE_AUTH_LOCATION is not set")
}
contents, err := ioutil.ReadFile(fileLocation)
if err != nil {
return nil, err
}
// Auth file might be encoded
decoded, err := decode(contents)
if err != nil {
return nil, err
}
authFile := file{}
err = json.Unmarshal(decoded, &authFile)
if err != nil {
return nil, err
}
return &authFile, nil
}
// File represents the authentication file
type file struct {
ClientID string `json:"clientId,omitempty"`

View File

@ -44,11 +44,12 @@ const (
// ServiceError encapsulates the error response from an Azure service.
// It adhears to the OData v4 specification for error responses.
type ServiceError struct {
Code string `json:"code"`
Message string `json:"message"`
Target *string `json:"target"`
Details []map[string]interface{} `json:"details"`
InnerError map[string]interface{} `json:"innererror"`
Code string `json:"code"`
Message string `json:"message"`
Target *string `json:"target"`
Details []map[string]interface{} `json:"details"`
InnerError map[string]interface{} `json:"innererror"`
AdditionalInfo []map[string]interface{} `json:"additionalInfo"`
}
func (se ServiceError) Error() string {
@ -74,6 +75,14 @@ func (se ServiceError) Error() string {
result += fmt.Sprintf(" InnerError=%v", string(d))
}
if se.AdditionalInfo != nil {
d, err := json.Marshal(se.AdditionalInfo)
if err != nil {
result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo)
}
result += fmt.Sprintf(" AdditionalInfo=%v", string(d))
}
return result
}
@ -86,44 +95,47 @@ func (se *ServiceError) UnmarshalJSON(b []byte) error {
// http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091
type serviceError1 struct {
Code string `json:"code"`
Message string `json:"message"`
Target *string `json:"target"`
Details []map[string]interface{} `json:"details"`
InnerError map[string]interface{} `json:"innererror"`
Code string `json:"code"`
Message string `json:"message"`
Target *string `json:"target"`
Details []map[string]interface{} `json:"details"`
InnerError map[string]interface{} `json:"innererror"`
AdditionalInfo []map[string]interface{} `json:"additionalInfo"`
}
type serviceError2 struct {
Code string `json:"code"`
Message string `json:"message"`
Target *string `json:"target"`
Details map[string]interface{} `json:"details"`
InnerError map[string]interface{} `json:"innererror"`
Code string `json:"code"`
Message string `json:"message"`
Target *string `json:"target"`
Details map[string]interface{} `json:"details"`
InnerError map[string]interface{} `json:"innererror"`
AdditionalInfo []map[string]interface{} `json:"additionalInfo"`
}
se1 := serviceError1{}
err := json.Unmarshal(b, &se1)
if err == nil {
se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError)
se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo)
return nil
}
se2 := serviceError2{}
err = json.Unmarshal(b, &se2)
if err == nil {
se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError)
se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo)
se.Details = append(se.Details, se2.Details)
return nil
}
return err
}
func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}) {
func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) {
se.Code = code
se.Message = message
se.Target = target
se.Details = details
se.InnerError = inner
se.AdditionalInfo = additional
}
// RequestError describes an error response returned by Azure service.

View File

@ -140,8 +140,8 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError
}
// poll for registered provisioning state
now := time.Now()
for err == nil && time.Since(now) < client.PollingDuration {
registrationStartTime := time.Now()
for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) {
// taken from the resources SDK
// https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45
preparer := autorest.CreatePreparer(
@ -183,7 +183,7 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError
return originalReq.Context().Err()
}
}
if !(time.Since(now) < client.PollingDuration) {
if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) {
return errors.New("polling for resource provider registration has exceeded the polling duration")
}
return err

View File

@ -22,8 +22,11 @@ import (
"log"
"net/http"
"net/http/cookiejar"
"runtime"
"strings"
"time"
"github.com/Azure/go-autorest/logger"
"github.com/Azure/go-autorest/version"
)
const (
@ -41,15 +44,6 @@ const (
)
var (
// defaultUserAgent builds a string containing the Go version, system archityecture and OS,
// and the go-autorest version.
defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
runtime.Version(),
runtime.GOARCH,
runtime.GOOS,
Version(),
)
// StatusCodesForRetry are a defined group of status code for which the client will retry
StatusCodesForRetry = []int{
http.StatusRequestTimeout, // 408
@ -153,6 +147,7 @@ type Client struct {
PollingDelay time.Duration
// PollingDuration sets the maximum polling time after which an error is returned.
// Setting this to zero will use the provided context to control the duration.
PollingDuration time.Duration
// RetryAttempts sets the default number of retry attempts for client.
@ -179,7 +174,7 @@ func NewClientWithUserAgent(ua string) Client {
PollingDuration: DefaultPollingDuration,
RetryAttempts: DefaultRetryAttempts,
RetryDuration: DefaultRetryDuration,
UserAgent: defaultUserAgent,
UserAgent: version.UserAgent(),
}
c.Sender = c.sender()
c.AddToUserAgent(ua)
@ -216,8 +211,17 @@ func (c Client) Do(r *http.Request) (*http.Response, error) {
}
return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed")
}
logger.Instance.WriteRequest(r, logger.Filter{
Header: func(k string, v []string) (bool, []string) {
// remove the auth token from the log
if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") {
v = []string{"**REDACTED**"}
}
return true, v
},
})
resp, err := SendWithSender(c.sender(), r)
logger.Instance.WriteResponse(resp, logger.Filter{})
Respond(resp, c.ByInspecting())
return resp, err
}

View File

@ -234,7 +234,7 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se
}
delayed := DelayWithRetryAfter(resp, r.Context().Done())
if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) {
return nil, r.Context().Err()
return resp, r.Context().Err()
}
// don't count a 429 against the number of attempts
// so that we continue to retry until it succeeds

View File

@ -1,5 +1,7 @@
package autorest
import "github.com/Azure/go-autorest/version"
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
@ -16,5 +18,5 @@ package autorest
// Version returns the semantic version (see http://semver.org).
func Version() string {
return "v10.12.0"
return version.Number
}

328
vendor/github.com/Azure/go-autorest/logger/logger.go generated vendored Normal file
View File

@ -0,0 +1,328 @@
package logger
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"strings"
"sync"
"time"
)
// LevelType tells a logger the minimum level to log. When code reports a log entry,
// the LogLevel indicates the level of the log entry. The logger only records entries
// whose level is at least the level it was told to log. See the Log* constants.
// For example, if a logger is configured with LogError, then LogError, LogPanic,
// and LogFatal entries will be logged; lower level entries are ignored.
type LevelType uint32
const (
// LogNone tells a logger not to log any entries passed to it.
LogNone LevelType = iota
// LogFatal tells a logger to log all LogFatal entries passed to it.
LogFatal
// LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it.
LogPanic
// LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it.
LogError
// LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it.
LogWarning
// LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
LogInfo
// LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
LogDebug
)
const (
logNone = "NONE"
logFatal = "FATAL"
logPanic = "PANIC"
logError = "ERROR"
logWarning = "WARNING"
logInfo = "INFO"
logDebug = "DEBUG"
logUnknown = "UNKNOWN"
)
// ParseLevel converts the specified string into the corresponding LevelType.
func ParseLevel(s string) (lt LevelType, err error) {
switch strings.ToUpper(s) {
case logFatal:
lt = LogFatal
case logPanic:
lt = LogPanic
case logError:
lt = LogError
case logWarning:
lt = LogWarning
case logInfo:
lt = LogInfo
case logDebug:
lt = LogDebug
default:
err = fmt.Errorf("bad log level '%s'", s)
}
return
}
// String implements the stringer interface for LevelType.
func (lt LevelType) String() string {
switch lt {
case LogNone:
return logNone
case LogFatal:
return logFatal
case LogPanic:
return logPanic
case LogError:
return logError
case LogWarning:
return logWarning
case LogInfo:
return logInfo
case LogDebug:
return logDebug
default:
return logUnknown
}
}
// Filter defines functions for filtering HTTP request/response content.
type Filter struct {
// URL returns a potentially modified string representation of a request URL.
URL func(u *url.URL) string
// Header returns a potentially modified set of values for the specified key.
// To completely exclude the header key/values return false.
Header func(key string, val []string) (bool, []string)
// Body returns a potentially modified request/response body.
Body func(b []byte) []byte
}
func (f Filter) processURL(u *url.URL) string {
if f.URL == nil {
return u.String()
}
return f.URL(u)
}
func (f Filter) processHeader(k string, val []string) (bool, []string) {
if f.Header == nil {
return true, val
}
return f.Header(k, val)
}
func (f Filter) processBody(b []byte) []byte {
if f.Body == nil {
return b
}
return f.Body(b)
}
// Writer defines methods for writing to a logging facility.
type Writer interface {
// Writeln writes the specified message with the standard log entry header and new-line character.
Writeln(level LevelType, message string)
// Writef writes the specified format specifier with the standard log entry header and no new-line character.
Writef(level LevelType, format string, a ...interface{})
// WriteRequest writes the specified HTTP request to the logger if the log level is greater than
// or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher.
// Custom filters can be specified to exclude URL, header, and/or body content from the log.
// By default no request content is excluded.
WriteRequest(req *http.Request, filter Filter)
// WriteResponse writes the specified HTTP response to the logger if the log level is greater than
// or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher.
// Custom filters can be specified to exclude URL, header, and/or body content from the log.
// By default no respone content is excluded.
WriteResponse(resp *http.Response, filter Filter)
}
// Instance is the default log writer initialized during package init.
// This can be replaced with a custom implementation as required.
var Instance Writer
// default log level
var logLevel = LogNone
// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL.
// If no value was specified the default value is LogNone.
// Custom loggers can call this to retrieve the configured log level.
func Level() LevelType {
return logLevel
}
func init() {
// separated for testing purposes
initDefaultLogger()
}
func initDefaultLogger() {
// init with nilLogger so callers don't have to do a nil check on Default
Instance = nilLogger{}
llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL"))
if llStr == "" {
return
}
var err error
logLevel, err = ParseLevel(llStr)
if err != nil {
fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error())
return
}
if logLevel == LogNone {
return
}
// default to stderr
dest := os.Stderr
lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE")
if strings.EqualFold(lfStr, "stdout") {
dest = os.Stdout
} else if lfStr != "" {
lf, err := os.Create(lfStr)
if err == nil {
dest = lf
} else {
fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error())
}
}
Instance = fileLogger{
logLevel: logLevel,
mu: &sync.Mutex{},
logFile: dest,
}
}
// the nil logger does nothing
type nilLogger struct{}
func (nilLogger) Writeln(LevelType, string) {}
func (nilLogger) Writef(LevelType, string, ...interface{}) {}
func (nilLogger) WriteRequest(*http.Request, Filter) {}
func (nilLogger) WriteResponse(*http.Response, Filter) {}
// A File is used instead of a Logger so the stream can be flushed after every write.
type fileLogger struct {
logLevel LevelType
mu *sync.Mutex // for synchronizing writes to logFile
logFile *os.File
}
func (fl fileLogger) Writeln(level LevelType, message string) {
fl.Writef(level, "%s\n", message)
}
func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) {
if fl.logLevel >= level {
fl.mu.Lock()
defer fl.mu.Unlock()
fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...))
fl.logFile.Sync()
}
}
func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) {
if req == nil || fl.logLevel < LogInfo {
return
}
b := &bytes.Buffer{}
fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL))
// dump headers
for k, v := range req.Header {
if ok, mv := filter.processHeader(k, v); ok {
fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ","))
}
}
if fl.shouldLogBody(req.Header, req.Body) {
// dump body
body, err := ioutil.ReadAll(req.Body)
if err == nil {
fmt.Fprintln(b, string(filter.processBody(body)))
if nc, ok := req.Body.(io.Seeker); ok {
// rewind to the beginning
nc.Seek(0, io.SeekStart)
} else {
// recreate the body
req.Body = ioutil.NopCloser(bytes.NewReader(body))
}
} else {
fmt.Fprintf(b, "failed to read body: %v\n", err)
}
}
fl.mu.Lock()
defer fl.mu.Unlock()
fmt.Fprint(fl.logFile, b.String())
fl.logFile.Sync()
}
func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) {
if resp == nil || fl.logLevel < LogInfo {
return
}
b := &bytes.Buffer{}
fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL))
// dump headers
for k, v := range resp.Header {
if ok, mv := filter.processHeader(k, v); ok {
fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ","))
}
}
if fl.shouldLogBody(resp.Header, resp.Body) {
// dump body
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err == nil {
fmt.Fprintln(b, string(filter.processBody(body)))
resp.Body = ioutil.NopCloser(bytes.NewReader(body))
} else {
fmt.Fprintf(b, "failed to read body: %v\n", err)
}
}
fl.mu.Lock()
defer fl.mu.Unlock()
fmt.Fprint(fl.logFile, b.String())
fl.logFile.Sync()
}
// returns true if the provided body should be included in the log
func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool {
ct := header.Get("Content-Type")
return fl.logLevel >= LogDebug && body != nil && strings.Index(ct, "application/octet-stream") == -1
}
// creates standard header for log entries, it contains a timestamp and the log level
func entryHeader(level LevelType) string {
// this format provides a fixed number of digits so the size of the timestamp is constant
return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String())
}

37
vendor/github.com/Azure/go-autorest/version/version.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
package version
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"fmt"
"runtime"
)
// Number contains the semantic version of this SDK.
const Number = "v11.0.0"
var (
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
runtime.Version(),
runtime.GOARCH,
runtime.GOOS,
Number,
)
)
// UserAgent returns a string containing the Go version, system archityecture and OS, and the go-autorest version.
func UserAgent() string {
return userAgent
}

View File

@ -2,6 +2,7 @@ package statsd
import (
"net"
"sync"
"time"
)
@ -19,6 +20,7 @@ type udsWriter struct {
conn net.Conn
// write timeout
writeTimeout time.Duration
sync.Mutex // used to lock conn / writer can replace it
}
// New returns a pointer to a new udsWriter given a socket file path as addr.
@ -41,6 +43,8 @@ func (w *udsWriter) SetWriteTimeout(d time.Duration) error {
// Write data to the UDS connection with write timeout and minimal error handling:
// create the connection if nil, and destroy it if the statsd server has disconnected
func (w *udsWriter) Write(data []byte) (int, error) {
w.Lock()
defer w.Unlock()
// Try connecting (first packet or connection lost)
if w.conn == nil {
conn, err := net.Dial(w.addr.Network(), w.addr.String())

View File

@ -1,137 +1,137 @@
package winio
import (
"bytes"
"encoding/binary"
"errors"
)
type fileFullEaInformation struct {
NextEntryOffset uint32
Flags uint8
NameLength uint8
ValueLength uint16
}
var (
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
errEaNameTooLarge = errors.New("extended attribute name too large")
errEaValueTooLarge = errors.New("extended attribute value too large")
)
// ExtendedAttribute represents a single Windows EA.
type ExtendedAttribute struct {
Name string
Value []byte
Flags uint8
}
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
var info fileFullEaInformation
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
if err != nil {
err = errInvalidEaBuffer
return
}
nameOffset := fileFullEaInformationSize
nameLen := int(info.NameLength)
valueOffset := nameOffset + int(info.NameLength) + 1
valueLen := int(info.ValueLength)
nextOffset := int(info.NextEntryOffset)
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
err = errInvalidEaBuffer
return
}
ea.Name = string(b[nameOffset : nameOffset+nameLen])
ea.Value = b[valueOffset : valueOffset+valueLen]
ea.Flags = info.Flags
if info.NextEntryOffset != 0 {
nb = b[info.NextEntryOffset:]
}
return
}
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
for len(b) != 0 {
ea, nb, err := parseEa(b)
if err != nil {
return nil, err
}
eas = append(eas, ea)
b = nb
}
return
}
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
if int(uint8(len(ea.Name))) != len(ea.Name) {
return errEaNameTooLarge
}
if int(uint16(len(ea.Value))) != len(ea.Value) {
return errEaValueTooLarge
}
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
withPadding := (entrySize + 3) &^ 3
nextOffset := uint32(0)
if !last {
nextOffset = withPadding
}
info := fileFullEaInformation{
NextEntryOffset: nextOffset,
Flags: ea.Flags,
NameLength: uint8(len(ea.Name)),
ValueLength: uint16(len(ea.Value)),
}
err := binary.Write(buf, binary.LittleEndian, &info)
if err != nil {
return err
}
_, err = buf.Write([]byte(ea.Name))
if err != nil {
return err
}
err = buf.WriteByte(0)
if err != nil {
return err
}
_, err = buf.Write(ea.Value)
if err != nil {
return err
}
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
if err != nil {
return err
}
return nil
}
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
// buffer for use with BackupWrite, ZwSetEaFile, etc.
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
var buf bytes.Buffer
for i := range eas {
last := false
if i == len(eas)-1 {
last = true
}
err := writeEa(&buf, &eas[i], last)
if err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}
package winio
import (
"bytes"
"encoding/binary"
"errors"
)
type fileFullEaInformation struct {
NextEntryOffset uint32
Flags uint8
NameLength uint8
ValueLength uint16
}
var (
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
errEaNameTooLarge = errors.New("extended attribute name too large")
errEaValueTooLarge = errors.New("extended attribute value too large")
)
// ExtendedAttribute represents a single Windows EA.
type ExtendedAttribute struct {
Name string
Value []byte
Flags uint8
}
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
var info fileFullEaInformation
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
if err != nil {
err = errInvalidEaBuffer
return
}
nameOffset := fileFullEaInformationSize
nameLen := int(info.NameLength)
valueOffset := nameOffset + int(info.NameLength) + 1
valueLen := int(info.ValueLength)
nextOffset := int(info.NextEntryOffset)
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
err = errInvalidEaBuffer
return
}
ea.Name = string(b[nameOffset : nameOffset+nameLen])
ea.Value = b[valueOffset : valueOffset+valueLen]
ea.Flags = info.Flags
if info.NextEntryOffset != 0 {
nb = b[info.NextEntryOffset:]
}
return
}
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
for len(b) != 0 {
ea, nb, err := parseEa(b)
if err != nil {
return nil, err
}
eas = append(eas, ea)
b = nb
}
return
}
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
if int(uint8(len(ea.Name))) != len(ea.Name) {
return errEaNameTooLarge
}
if int(uint16(len(ea.Value))) != len(ea.Value) {
return errEaValueTooLarge
}
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
withPadding := (entrySize + 3) &^ 3
nextOffset := uint32(0)
if !last {
nextOffset = withPadding
}
info := fileFullEaInformation{
NextEntryOffset: nextOffset,
Flags: ea.Flags,
NameLength: uint8(len(ea.Name)),
ValueLength: uint16(len(ea.Value)),
}
err := binary.Write(buf, binary.LittleEndian, &info)
if err != nil {
return err
}
_, err = buf.Write([]byte(ea.Name))
if err != nil {
return err
}
err = buf.WriteByte(0)
if err != nil {
return err
}
_, err = buf.Write(ea.Value)
if err != nil {
return err
}
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
if err != nil {
return err
}
return nil
}
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
// buffer for use with BackupWrite, ZwSetEaFile, etc.
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
var buf bytes.Buffer
for i := range eas {
last := false
if i == len(eas)-1 {
last = true
}
err := writeEa(&buf, &eas[i], last)
if err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}

View File

@ -20,7 +20,8 @@ const (
// FileBasicInfo contains file access time and file attributes information.
type FileBasicInfo struct {
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
FileAttributes uintptr // includes padding
FileAttributes uint32
pad uint32 // padding
}
// GetFileBasicInfo retrieves times and attributes for a file.

View File

@ -15,7 +15,6 @@ import (
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
@ -139,12 +138,14 @@ func (s pipeAddress) String() string {
}
// DialPipe connects to a named pipe by path, timing out if the connection
// takes longer than the specified duration. If timeout is nil, then the timeout
// is the default timeout established by the pipe server.
// takes longer than the specified duration. If timeout is nil, then we use
// a default timeout of 5 seconds. (We do not use WaitNamedPipe.)
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
var absTimeout time.Time
if timeout != nil {
absTimeout = time.Now().Add(*timeout)
} else {
absTimeout = time.Now().Add(time.Second * 2)
}
var err error
var h syscall.Handle
@ -153,22 +154,13 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
if err != cERROR_PIPE_BUSY {
break
}
now := time.Now()
var ms uint32
if absTimeout.IsZero() {
ms = cNMPWAIT_USE_DEFAULT_WAIT
} else if now.After(absTimeout) {
ms = cNMPWAIT_NOWAIT
} else {
ms = uint32(absTimeout.Sub(now).Nanoseconds() / 1000 / 1000)
}
err = waitNamedPipe(path, ms)
if err != nil {
if err == cERROR_SEM_TIMEOUT {
return nil, ErrTimeout
}
break
if time.Now().After(absTimeout) {
return nil, ErrTimeout
}
// Wait 10 msec and try again. This is a rather simplistic
// view, as we always try each 10 milliseconds.
time.Sleep(time.Millisecond * 10)
}
if err != nil {
return nil, &os.PathError{Op: "open", Path: path, Err: err}
@ -349,13 +341,23 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
if err != nil {
return nil, err
}
// Immediately open and then close a client handle so that the named pipe is
// created but not currently accepting connections.
// Create a client handle and connect it. This results in the pipe
// instance always existing, so that clients see ERROR_PIPE_BUSY
// rather than ERROR_FILE_NOT_FOUND. This ties the first instance
// up so that no other instances can be used. This would have been
// cleaner if the Win32 API matched CreateFile with ConnectNamedPipe
// instead of CreateNamedPipe. (Apparently created named pipes are
// considered to be in listening state regardless of whether any
// active calls to ConnectNamedPipe are outstanding.)
h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
if err != nil {
syscall.Close(h)
return nil, err
}
// Close the client handle. The server side of the instance will
// still be busy, leading to ERROR_PIPE_BUSY instead of
// ERROR_NOT_FOUND, as long as we don't close the server handle,
// or disconnect the client with DisconnectNamedPipe.
syscall.Close(h2)
l := &win32PipeListener{
firstHandle: h,

View File

@ -6,6 +6,10 @@ response body, for clients which support it. Although it's usually simpler to
leave that to a reverse proxy (like nginx or Varnish), this package is useful
when that's undesirable.
## Install
```bash
go get -u github.com/NYTimes/gziphandler
```
## Usage

View File

@ -29,6 +29,7 @@ const (
// The examples seem to indicate that it is.
DefaultQValue = 1.0
// DefaultMinSize is the default minimum size until we enable gzip compression.
// 1500 bytes is the MTU size for the internet since that is the largest size allowed at the network layer.
// If you take a file that is 1300 bytes and compress it to 800 bytes, its still transmitted in that same 1500 byte packet regardless, so youve gained nothing.
// That being the case, you should restrict the gzip compression to files with a size greater than a single packet, 1400 bytes (1.4KB) is a safe value.
@ -82,6 +83,7 @@ type GzipResponseWriter struct {
minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed.
buf []byte // Holds the first part of the write before reaching the minSize or the end of the write.
ignore bool // If true, then we immediately passthru writes to the underlying ResponseWriter.
contentTypes []parsedContentType // Only compress if the response is one of these content-types. All are accepted if empty.
}
@ -96,38 +98,56 @@ func (w GzipResponseWriterWithCloseNotify) CloseNotify() <-chan bool {
// Write appends data to the gzip writer.
func (w *GzipResponseWriter) Write(b []byte) (int, error) {
// If content type is not set.
if _, ok := w.Header()[contentType]; !ok {
// It infer it from the uncompressed body.
w.Header().Set(contentType, http.DetectContentType(b))
}
// GZIP responseWriter is initialized. Use the GZIP responseWriter.
if w.gw != nil {
n, err := w.gw.Write(b)
return n, err
return w.gw.Write(b)
}
// If we have already decided not to use GZIP, immediately passthrough.
if w.ignore {
return w.ResponseWriter.Write(b)
}
// Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter.
// On the first write, w.buf changes from nil to a valid slice
w.buf = append(w.buf, b...)
// If the global writes are bigger than the minSize and we're about to write
// a response containing a content type we want to handle, enable
// compression.
if len(w.buf) >= w.minSize && handleContentType(w.contentTypes, w) && w.Header().Get(contentEncoding) == "" {
err := w.startGzip()
if err != nil {
return 0, err
var (
cl, _ = strconv.Atoi(w.Header().Get(contentLength))
ct = w.Header().Get(contentType)
ce = w.Header().Get(contentEncoding)
)
// Only continue if they didn't already choose an encoding or a known unhandled content length or type.
if ce == "" && (cl == 0 || cl >= w.minSize) && (ct == "" || handleContentType(w.contentTypes, ct)) {
// If the current buffer is less than minSize and a Content-Length isn't set, then wait until we have more data.
if len(w.buf) < w.minSize && cl == 0 {
return len(b), nil
}
// If the Content-Length is larger than minSize or the current buffer is larger than minSize, then continue.
if cl >= w.minSize || len(w.buf) >= w.minSize {
// If a Content-Type wasn't specified, infer it from the current buffer.
if ct == "" {
ct = http.DetectContentType(w.buf)
w.Header().Set(contentType, ct)
}
// If the Content-Type is acceptable to GZIP, initialize the GZIP writer.
if handleContentType(w.contentTypes, ct) {
if err := w.startGzip(); err != nil {
return 0, err
}
return len(b), nil
}
}
}
// If we got here, we should not GZIP this response.
if err := w.startPlain(); err != nil {
return 0, err
}
return len(b), nil
}
// startGzip initialize any GZIP specific informations.
// startGzip initializes a GZIP writer and writes the buffer.
func (w *GzipResponseWriter) startGzip() error {
// Set the GZIP header.
w.Header().Set(contentEncoding, "gzip")
@ -139,22 +159,49 @@ func (w *GzipResponseWriter) startGzip() error {
// Write the header to gzip response.
if w.code != 0 {
w.ResponseWriter.WriteHeader(w.code)
// Ensure that no other WriteHeader's happen
w.code = 0
}
// Initialize the GZIP response.
w.init()
// Initialize and flush the buffer into the gzip response if there are any bytes.
// If there aren't any, we shouldn't initialize it yet because on Close it will
// write the gzip header even if nothing was ever written.
if len(w.buf) > 0 {
// Initialize the GZIP response.
w.init()
n, err := w.gw.Write(w.buf)
// Flush the buffer into the gzip response.
n, err := w.gw.Write(w.buf)
// This should never happen (per io.Writer docs), but if the write didn't
// accept the entire buffer but returned no specific error, we have no clue
// what's going on, so abort just to be safe.
if err == nil && n < len(w.buf) {
err = io.ErrShortWrite
}
return err
}
return nil
}
// startPlain writes to sent bytes and buffer the underlying ResponseWriter without gzip.
func (w *GzipResponseWriter) startPlain() error {
if w.code != 0 {
w.ResponseWriter.WriteHeader(w.code)
// Ensure that no other WriteHeader's happen
w.code = 0
}
w.ignore = true
// If Write was never called then don't call Write on the underlying ResponseWriter.
if w.buf == nil {
return nil
}
n, err := w.ResponseWriter.Write(w.buf)
w.buf = nil
// This should never happen (per io.Writer docs), but if the write didn't
// accept the entire buffer but returned no specific error, we have no clue
// what's going on, so abort just to be safe.
if err == nil && n < len(w.buf) {
return io.ErrShortWrite
err = io.ErrShortWrite
}
w.buf = nil
return err
}
@ -177,21 +224,20 @@ func (w *GzipResponseWriter) init() {
// Close will close the gzip.Writer and will put it back in the gzipWriterPool.
func (w *GzipResponseWriter) Close() error {
if w.gw == nil {
// Gzip not trigged yet, write out regular response.
if w.code != 0 {
w.ResponseWriter.WriteHeader(w.code)
}
if w.buf != nil {
_, writeErr := w.ResponseWriter.Write(w.buf)
// Returns the error if any at write.
if writeErr != nil {
return fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", writeErr.Error())
}
}
if w.ignore {
return nil
}
if w.gw == nil {
// GZIP not triggered yet, write out regular response.
err := w.startPlain()
// Returns the error if any at write.
if err != nil {
err = fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", err.Error())
}
return err
}
err := w.gw.Close()
gzipWriterPools[w.index].Put(w.gw)
w.gw = nil
@ -202,15 +248,17 @@ func (w *GzipResponseWriter) Close() error {
// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter
// an http.Flusher.
func (w *GzipResponseWriter) Flush() {
if w.gw == nil {
// Only flush once startGzip has been called.
if w.gw == nil && !w.ignore {
// Only flush once startGzip or startPlain has been called.
//
// Flush is thus a no-op until the written body
// exceeds minSize.
// Flush is thus a no-op until we're certain whether a plain
// or gzipped response will be served.
return
}
w.gw.Flush()
if w.gw != nil {
w.gw.Flush()
}
if fw, ok := w.ResponseWriter.(http.Flusher); ok {
fw.Flush()
@ -405,13 +453,12 @@ func acceptsGzip(r *http.Request) bool {
}
// returns true if we've been configured to compress the specific content type.
func handleContentType(contentTypes []parsedContentType, w http.ResponseWriter) bool {
func handleContentType(contentTypes []parsedContentType, ct string) bool {
// If contentTypes is empty we handle all content types.
if len(contentTypes) == 0 {
return true
}
ct := w.Header().Get(contentType)
mediaType, params, err := mime.ParseMediaType(ct)
if err != nil {
return false

View File

@ -33,7 +33,7 @@ import (
)
// DriverVersion is the version number of the hdb driver.
const DriverVersion = "0.12.1"
const DriverVersion = "0.13.1"
// DriverName is the driver name to use with sql.Open for hdb databases.
const DriverName = "hdb"
@ -108,7 +108,8 @@ func init() {
// check if driver implements all required interfaces
var (
_ driver.Driver = (*hdbDrv)(nil)
_ driver.Driver = (*hdbDrv)(nil)
_ driver.DriverContext = (*hdbDrv)(nil)
)
type hdbDrv struct{}
@ -121,6 +122,10 @@ func (d *hdbDrv) Open(dsn string) (driver.Conn, error) {
return connector.Connect(context.Background())
}
func (d *hdbDrv) OpenConnector(dsn string) (driver.Connector, error) {
return NewDSNConnector(dsn)
}
// database connection
// check if conn implements all required interfaces

View File

@ -1,32 +0,0 @@
// +build go1.10
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"database/sql/driver"
)
// driver
// check if driver implements all required interfaces
var _ driver.DriverContext = (*hdbDrv)(nil)
func (d *hdbDrv) OpenConnector(dsn string) (driver.Connector, error) {
return NewDSNConnector(dsn)
}

View File

@ -1,23 +0,0 @@
// +build amd64 386 arm arm64 ppc64le mipsle mips64le
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
//amd64, 386 architectures: little endian
//arm, arm64: go supports little endian only
var archEndian = littleEndian

View File

@ -198,13 +198,13 @@ func NewSession(ctx context.Context, prm sessionPrm) (*Session, error) {
wr := bufio.NewWriter(conn)
s := &Session{
prm: prm,
conn: conn,
rd: rd,
wr: wr,
mh: new(messageHeader),
sh: new(segmentHeader),
ph: new(partHeader),
prm: prm,
conn: conn,
rd: rd,
wr: wr,
mh: new(messageHeader),
sh: new(segmentHeader),
ph: new(partHeader),
scramsha256InitialRequest: new(scramsha256InitialRequest),
scramsha256InitialReply: new(scramsha256InitialReply),
scramsha256FinalRequest: new(scramsha256FinalRequest),
@ -743,7 +743,7 @@ func (s *Session) initRequest() error {
req.protocol.major = 4
req.protocol.minor = 1
req.numOptions = 1
req.endianess = archEndian
req.endianess = littleEndian
if err := req.write(s.wr); err != nil {
return err
}

View File

@ -42,9 +42,9 @@ func completeROASignParams(request requests.AcsRequest, signer Signer, regionId
// complete query params
queryParams := request.GetQueryParams()
if _, ok := queryParams["RegionId"]; !ok {
queryParams["RegionId"] = regionId
}
//if _, ok := queryParams["RegionId"]; !ok {
// queryParams["RegionId"] = regionId
//}
if extraParam := signer.GetExtraParam(); extraParam != nil {
for key, value := range extraParam {
if key == "SecurityToken" {

View File

@ -57,7 +57,7 @@ func (resolver *LocationResolver) TryResolve(param *ResolveParam) (endpoint stri
getEndpointRequest.Product = "Location"
getEndpointRequest.Version = "2015-06-12"
getEndpointRequest.ApiName = "DescribeEndpoints"
getEndpointRequest.Domain = "location.aliyuncs.com"
getEndpointRequest.Domain = "location-readonly.aliyuncs.com"
getEndpointRequest.Method = "GET"
getEndpointRequest.Scheme = requests.HTTPS

View File

@ -24,7 +24,7 @@ const (
UnsupportedCredentialErrorMessage = "Specified credential (type = %s) is not supported, please check"
CanNotResolveEndpointErrorCode = "SDK.CanNotResolveEndpoint"
CanNotResolveEndpointErrorMessage = "Can not resolve endpoint(param = %s), please check the user guide\n %s"
CanNotResolveEndpointErrorMessage = "Can not resolve endpoint(param = %s), please check your accessKey with secret, and read the user guide\n %s"
UnsupportedParamPositionErrorCode = "SDK.UnsupportedParamPosition"
UnsupportedParamPositionErrorMessage = "Specified param position (%s) is not supported, please upgrade sdk and retry"

View File

@ -77,7 +77,9 @@ func (request *RoaRequest) buildQueries(needParamEncode bool) string {
// append urlBuilder
urlBuilder := bytes.Buffer{}
urlBuilder.WriteString(path)
urlBuilder.WriteString("?")
if len(queryKeys) > 0 {
urlBuilder.WriteString("?")
}
for i := 0; i < len(queryKeys); i++ {
queryKey := queryKeys[i]
urlBuilder.WriteString(queryKey)

View File

@ -47,6 +47,11 @@ func Unmarshal(response AcsResponse, httpResponse *http.Response, format string)
// common response need not unmarshal
return
}
if len(response.GetHttpContentBytes()) == 0 {
return
}
if strings.ToUpper(format) == "JSON" {
initJsonParserOnce()
err = jsonParser.Unmarshal(response.GetHttpContentBytes(), response)

207
vendor/github.com/apple/foundationdb/LICENSE generated vendored Normal file
View File

@ -0,0 +1,207 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------------------------------------------------------------------------
SOFTWARE DISTRIBUTED WITH FOUNDATIONDB:
The FoundationDB software includes a number of subcomponents with separate
copyright notices and license terms - please see the file ACKNOWLEDGEMENTS.
-------------------------------------------------------------------------------

View File

@ -0,0 +1,74 @@
/*
* cluster.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go API
package fdb
/*
#define FDB_API_VERSION 600
#include <foundationdb/fdb_c.h>
*/
import "C"
import (
"runtime"
)
// Cluster is a handle to a FoundationDB cluster. Cluster is a lightweight
// object that may be efficiently copied, and is safe for concurrent use by
// multiple goroutines.
//
// It is generally preferable to use Open or OpenDefault to obtain a database
// handle directly.
type Cluster struct {
*cluster
}
type cluster struct {
ptr *C.FDBCluster
}
func (c *cluster) destroy() {
C.fdb_cluster_destroy(c.ptr)
}
// OpenDatabase returns a database handle from the FoundationDB cluster. It is
// generally preferable to use Open or OpenDefault to obtain a database handle
// directly.
//
// In the current release, the database name must be []byte("DB").
func (c Cluster) OpenDatabase(dbName []byte) (Database, error) {
f := C.fdb_cluster_create_database(c.ptr, byteSliceToPtr(dbName), C.int(len(dbName)))
fdb_future_block_until_ready(f)
var outd *C.FDBDatabase
if err := C.fdb_future_get_database(f, &outd); err != 0 {
return Database{}, Error{int(err)}
}
C.fdb_future_destroy(f)
d := &database{outd}
runtime.SetFinalizer(d, (*database).destroy)
return Database{d}, nil
}

View File

@ -0,0 +1,238 @@
/*
* database.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go API
package fdb
/*
#define FDB_API_VERSION 600
#include <foundationdb/fdb_c.h>
*/
import "C"
import (
"runtime"
)
// Database is a handle to a FoundationDB database. Database is a lightweight
// object that may be efficiently copied, and is safe for concurrent use by
// multiple goroutines.
//
// Although Database provides convenience methods for reading and writing data,
// modifications to a database are usually made via transactions, which are
// usually created and committed automatically by the (Database).Transact
// method.
type Database struct {
*database
}
type database struct {
ptr *C.FDBDatabase
}
// DatabaseOptions is a handle with which to set options that affect a Database
// object. A DatabaseOptions instance should be obtained with the
// (Database).Options method.
type DatabaseOptions struct {
d *database
}
func (opt DatabaseOptions) setOpt(code int, param []byte) error {
return setOpt(func(p *C.uint8_t, pl C.int) C.fdb_error_t {
return C.fdb_database_set_option(opt.d.ptr, C.FDBDatabaseOption(code), p, pl)
}, param)
}
func (d *database) destroy() {
C.fdb_database_destroy(d.ptr)
}
// CreateTransaction returns a new FoundationDB transaction. It is generally
// preferable to use the (Database).Transact method, which handles
// automatically creating and committing a transaction with appropriate retry
// behavior.
func (d Database) CreateTransaction() (Transaction, error) {
var outt *C.FDBTransaction
if err := C.fdb_database_create_transaction(d.ptr, &outt); err != 0 {
return Transaction{}, Error{int(err)}
}
t := &transaction{outt, d}
runtime.SetFinalizer(t, (*transaction).destroy)
return Transaction{t}, nil
}
func retryable(wrapped func() (interface{}, error), onError func(Error) FutureNil) (ret interface{}, e error) {
for {
ret, e = wrapped()
/* No error means success! */
if e == nil {
return
}
ep, ok := e.(Error)
if ok {
e = onError(ep).Get()
}
/* If OnError returns an error, then it's not
/* retryable; otherwise take another pass at things */
if e != nil {
return
}
}
}
// Transact runs a caller-provided function inside a retry loop, providing it
// with a newly created Transaction. After the function returns, the Transaction
// will be committed automatically. Any error during execution of the function
// (by panic or return) or the commit will cause the function and commit to be
// retried or, if fatal, return the error to the caller.
//
// When working with Future objects in a transactional function, you may either
// explicitly check and return error values using Get, or call MustGet. Transact
// will recover a panicked Error and either retry the transaction or return the
// error.
//
// Do not return Future objects from the function provided to Transact. The
// Transaction created by Transact may be finalized at any point after Transact
// returns, resulting in the cancellation of any outstanding
// reads. Additionally, any errors returned or panicked by the Future will no
// longer be able to trigger a retry of the caller-provided function.
//
// See the Transactor interface for an example of using Transact with
// Transaction and Database objects.
func (d Database) Transact(f func(Transaction) (interface{}, error)) (interface{}, error) {
tr, e := d.CreateTransaction()
/* Any error here is non-retryable */
if e != nil {
return nil, e
}
wrapped := func() (ret interface{}, e error) {
defer panicToError(&e)
ret, e = f(tr)
if e == nil {
e = tr.Commit().Get()
}
return
}
return retryable(wrapped, tr.OnError)
}
// ReadTransact runs a caller-provided function inside a retry loop, providing
// it with a newly created Transaction (as a ReadTransaction). Any error during
// execution of the function (by panic or return) will cause the function to be
// retried or, if fatal, return the error to the caller.
//
// When working with Future objects in a read-only transactional function, you
// may either explicitly check and return error values using Get, or call
// MustGet. ReadTransact will recover a panicked Error and either retry the
// transaction or return the error.
//
// Do not return Future objects from the function provided to ReadTransact. The
// Transaction created by ReadTransact may be finalized at any point after
// ReadTransact returns, resulting in the cancellation of any outstanding
// reads. Additionally, any errors returned or panicked by the Future will no
// longer be able to trigger a retry of the caller-provided function.
//
// See the ReadTransactor interface for an example of using ReadTransact with
// Transaction, Snapshot and Database objects.
func (d Database) ReadTransact(f func(ReadTransaction) (interface{}, error)) (interface{}, error) {
tr, e := d.CreateTransaction()
/* Any error here is non-retryable */
if e != nil {
return nil, e
}
wrapped := func() (ret interface{}, e error) {
defer panicToError(&e)
ret, e = f(tr)
if e == nil {
e = tr.Commit().Get()
}
return
}
return retryable(wrapped, tr.OnError)
}
// Options returns a DatabaseOptions instance suitable for setting options
// specific to this database.
func (d Database) Options() DatabaseOptions {
return DatabaseOptions{d.database}
}
// LocalityGetBoundaryKeys returns a slice of keys that fall within the provided
// range. Each key is located at the start of a contiguous range stored on a
// single server.
//
// If limit is non-zero, only the first limit keys will be returned. In large
// databases, the number of boundary keys may be large. In these cases, a
// non-zero limit should be used, along with multiple calls to
// LocalityGetBoundaryKeys.
//
// If readVersion is non-zero, the boundary keys as of readVersion will be
// returned.
func (d Database) LocalityGetBoundaryKeys(er ExactRange, limit int, readVersion int64) ([]Key, error) {
tr, e := d.CreateTransaction()
if e != nil {
return nil, e
}
if readVersion != 0 {
tr.SetReadVersion(readVersion)
}
tr.Options().SetReadSystemKeys()
tr.Options().SetLockAware()
bk, ek := er.FDBRangeKeys()
ffer := KeyRange{append(Key("\xFF/keyServers/"), bk.FDBKey()...), append(Key("\xFF/keyServers/"), ek.FDBKey()...)}
kvs, e := tr.Snapshot().GetRange(ffer, RangeOptions{Limit: limit}).GetSliceWithError()
if e != nil {
return nil, e
}
size := len(kvs)
if limit != 0 && limit < size {
size = limit
}
boundaries := make([]Key, size)
for i := 0; i < size; i++ {
boundaries[i] = kvs[i].Key[13:]
}
return boundaries, nil
}

View File

@ -0,0 +1,166 @@
/*
* allocator.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go Directory Layer
package directory
import (
"bytes"
"encoding/binary"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
"math/rand"
"sync"
)
var oneBytes = []byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
var allocatorMutex = sync.Mutex{}
type highContentionAllocator struct {
counters, recent subspace.Subspace
}
func newHCA(s subspace.Subspace) highContentionAllocator {
var hca highContentionAllocator
hca.counters = s.Sub(0)
hca.recent = s.Sub(1)
return hca
}
func windowSize(start int64) int64 {
// Larger window sizes are better for high contention, smaller sizes for
// keeping the keys small. But if there are many allocations, the keys
// can't be too small. So start small and scale up. We don't want this to
// ever get *too* big because we have to store about window_size/2 recent
// items.
if start < 255 {
return 64
}
if start < 65535 {
return 1024
}
return 8192
}
func (hca highContentionAllocator) allocate(tr fdb.Transaction, s subspace.Subspace) (subspace.Subspace, error) {
for {
rr := tr.Snapshot().GetRange(hca.counters, fdb.RangeOptions{Limit: 1, Reverse: true})
kvs, e := rr.GetSliceWithError()
if e != nil {
return nil, e
}
var start int64
var window int64
if len(kvs) == 1 {
t, e := hca.counters.Unpack(kvs[0].Key)
if e != nil {
return nil, e
}
start = t[0].(int64)
}
windowAdvanced := false
for {
allocatorMutex.Lock()
if windowAdvanced {
tr.ClearRange(fdb.KeyRange{hca.counters, hca.counters.Sub(start)})
tr.Options().SetNextWriteNoWriteConflictRange()
tr.ClearRange(fdb.KeyRange{hca.recent, hca.recent.Sub(start)})
}
// Increment the allocation count for the current window
tr.Add(hca.counters.Sub(start), oneBytes)
countFuture := tr.Snapshot().Get(hca.counters.Sub(start))
allocatorMutex.Unlock()
countStr, e := countFuture.Get()
if e != nil {
return nil, e
}
var count int64
if countStr == nil {
count = 0
} else {
e = binary.Read(bytes.NewBuffer(countStr), binary.LittleEndian, &count)
if e != nil {
return nil, e
}
}
window = windowSize(start)
if count*2 < window {
break
}
start += window
windowAdvanced = true
}
for {
// As of the snapshot being read from, the window is less than half
// full, so this should be expected to take 2 tries. Under high
// contention (and when the window advances), there is an additional
// subsequent risk of conflict for this transaction.
candidate := rand.Int63n(window) + start
key := hca.recent.Sub(candidate)
allocatorMutex.Lock()
latestCounter := tr.Snapshot().GetRange(hca.counters, fdb.RangeOptions{Limit: 1, Reverse: true})
candidateValue := tr.Get(key)
tr.Options().SetNextWriteNoWriteConflictRange()
tr.Set(key, []byte(""))
allocatorMutex.Unlock()
kvs, e = latestCounter.GetSliceWithError()
if e != nil {
return nil, e
}
if len(kvs) > 0 {
t, e := hca.counters.Unpack(kvs[0].Key)
if e != nil {
return nil, e
}
currentStart := t[0].(int64)
if currentStart > start {
break
}
}
v, e := candidateValue.Get()
if e != nil {
return nil, e
}
if v == nil {
tr.AddWriteConflictKey(key)
return s.Sub(candidate), nil
}
}
}
}

View File

@ -0,0 +1,241 @@
/*
* directory.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go Directory Layer
// Package directory provides a tool for managing related subspaces. Directories
// are a recommended approach for administering applications. Each application
// should create or open at least one directory to manage its subspaces.
//
// For general guidance on directory usage, see the Directories section of the
// Developer Guide
// (https://apple.github.io/foundationdb/developer-guide.html#directories).
//
// Directories are identified by hierarchical paths analogous to the paths in a
// Unix-like file system. A path is represented as a slice of strings. Each
// directory has an associated subspace used to store its content. The directory
// layer maps each path to a short prefix used for the corresponding
// subspace. In effect, directories provide a level of indirection for access to
// subspaces.
//
// Directory operations are transactional. A byte slice layer option is used as
// a metadata identifier when opening a directory.
package directory
import (
"errors"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
)
const (
_SUBDIRS int = 0
// []int32{1,0,0} by any other name
_MAJORVERSION int32 = 1
_MINORVERSION int32 = 0
_MICROVERSION int32 = 0
)
// Directory represents a subspace of keys in a FoundationDB database,
// identified by a hierarchical path.
type Directory interface {
// CreateOrOpen opens the directory specified by path (relative to this
// Directory), and returns the directory and its contents as a
// DirectorySubspace. If the directory does not exist, it is created
// (creating parent directories if necessary).
//
// If the byte slice layer is specified and the directory is new, it is
// recorded as the layer; if layer is specified and the directory already
// exists, it is compared against the layer specified when the directory was
// created, and an error is returned if they differ.
CreateOrOpen(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error)
// Open opens the directory specified by path (relative to this Directory),
// and returns the directory and its contents as a DirectorySubspace (or an
// error if the directory does not exist).
//
// If the byte slice layer is specified, it is compared against the layer
// specified when the directory was created, and an error is returned if
// they differ.
Open(rt fdb.ReadTransactor, path []string, layer []byte) (DirectorySubspace, error)
// Create creates a directory specified by path (relative to this
// Directory), and returns the directory and its contents as a
// DirectorySubspace (or an error if the directory already exists).
//
// If the byte slice layer is specified, it is recorded as the layer and
// will be checked when opening the directory in the future.
Create(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error)
// CreatePrefix behaves like Create, but uses a manually specified byte
// slice prefix to physically store the contents of this directory, rather
// than an automatically allocated prefix.
//
// If this Directory was created in a root directory that does not allow
// manual prefixes, CreatePrefix will return an error. The default root
// directory does not allow manual prefixes.
CreatePrefix(t fdb.Transactor, path []string, layer []byte, prefix []byte) (DirectorySubspace, error)
// Move moves the directory at oldPath to newPath (both relative to this
// Directory), and returns the directory (at its new location) and its
// contents as a DirectorySubspace. Move will return an error if a directory
// does not exist at oldPath, a directory already exists at newPath, or the
// parent directory of newPath does not exist.
//
// There is no effect on the physical prefix of the given directory or on
// clients that already have the directory open.
Move(t fdb.Transactor, oldPath []string, newPath []string) (DirectorySubspace, error)
// MoveTo moves this directory to newAbsolutePath (relative to the root
// directory of this Directory), and returns the directory (at its new
// location) and its contents as a DirectorySubspace. MoveTo will return an
// error if a directory already exists at newAbsolutePath or the parent
// directory of newAbsolutePath does not exist.
//
// There is no effect on the physical prefix of the given directory or on
// clients that already have the directory open.
MoveTo(t fdb.Transactor, newAbsolutePath []string) (DirectorySubspace, error)
// Remove removes the directory at path (relative to this Directory), its
// content, and all subdirectories. Remove returns true if a directory
// existed at path and was removed, and false if no directory exists at
// path.
//
// Note that clients that have already opened this directory might still
// insert data into its contents after removal.
Remove(t fdb.Transactor, path []string) (bool, error)
// Exists returns true if the directory at path (relative to this Directory)
// exists, and false otherwise.
Exists(rt fdb.ReadTransactor, path []string) (bool, error)
// List returns the names of the immediate subdirectories of the directory
// at path (relative to this Directory) as a slice of strings. Each string
// is the name of the last component of a subdirectory's path.
List(rt fdb.ReadTransactor, path []string) ([]string, error)
// GetLayer returns the layer specified when this Directory was created.
GetLayer() []byte
// GetPath returns the path with which this Directory was opened.
GetPath() []string
}
func stringsEqual(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
func moveTo(t fdb.Transactor, dl directoryLayer, path, newAbsolutePath []string) (DirectorySubspace, error) {
partition_len := len(dl.path)
if !stringsEqual(newAbsolutePath[:partition_len], dl.path) {
return nil, errors.New("cannot move between partitions")
}
return dl.Move(t, path[partition_len:], newAbsolutePath[partition_len:])
}
var root = NewDirectoryLayer(subspace.FromBytes([]byte{0xFE}), subspace.AllKeys(), false)
// CreateOrOpen opens the directory specified by path (resolved relative to the
// default root directory), and returns the directory and its contents as a
// DirectorySubspace. If the directory does not exist, it is created (creating
// parent directories if necessary).
//
// If the byte slice layer is specified and the directory is new, it is recorded
// as the layer; if layer is specified and the directory already exists, it is
// compared against the layer specified when the directory was created, and an
// error is returned if they differ.
func CreateOrOpen(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error) {
return root.CreateOrOpen(t, path, layer)
}
// Open opens the directory specified by path (resolved relative to the default
// root directory), and returns the directory and its contents as a
// DirectorySubspace (or an error if the directory does not exist).
//
// If the byte slice layer is specified, it is compared against the layer
// specified when the directory was created, and an error is returned if they
// differ.
func Open(rt fdb.ReadTransactor, path []string, layer []byte) (DirectorySubspace, error) {
return root.Open(rt, path, layer)
}
// Create creates a directory specified by path (resolved relative to the
// default root directory), and returns the directory and its contents as a
// DirectorySubspace (or an error if the directory already exists).
//
// If the byte slice layer is specified, it is recorded as the layer and will be
// checked when opening the directory in the future.
func Create(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error) {
return root.Create(t, path, layer)
}
// Move moves the directory at oldPath to newPath (both resolved relative to the
// default root directory), and returns the directory (at its new location) and
// its contents as a DirectorySubspace. Move will return an error if a directory
// does not exist at oldPath, a directory already exists at newPath, or the
// parent directory of newPath does not exit.
//
// There is no effect on the physical prefix of the given directory or on
// clients that already have the directory open.
func Move(t fdb.Transactor, oldPath []string, newPath []string) (DirectorySubspace, error) {
return root.Move(t, oldPath, newPath)
}
// Exists returns true if the directory at path (relative to the default root
// directory) exists, and false otherwise.
func Exists(rt fdb.ReadTransactor, path []string) (bool, error) {
return root.Exists(rt, path)
}
// List returns the names of the immediate subdirectories of the default root
// directory as a slice of strings. Each string is the name of the last
// component of a subdirectory's path.
func List(rt fdb.ReadTransactor, path []string) ([]string, error) {
return root.List(rt, path)
}
// Root returns the default root directory. Any attempt to move or remove the
// root directory will return an error.
//
// The default root directory stores directory layer metadata in keys beginning
// with 0xFE, and allocates newly created directories in (unused) prefixes
// starting with 0x00 through 0xFD. This is appropriate for otherwise empty
// databases, but may conflict with other formal or informal partitionings of
// keyspace. If you already have other content in your database, you may wish to
// use NewDirectoryLayer to construct a non-standard root directory to control
// where metadata and keys are stored.
//
// As an alternative to Root, you may use the package-level functions
// CreateOrOpen, Open, Create, CreatePrefix, Move, Exists and List to operate
// directly on the default DirectoryLayer.
func Root() Directory {
return root
}

View File

@ -0,0 +1,613 @@
/*
* directoryLayer.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go Directory Layer
package directory
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
)
type directoryLayer struct {
nodeSS subspace.Subspace
contentSS subspace.Subspace
allowManualPrefixes bool
allocator highContentionAllocator
rootNode subspace.Subspace
path []string
}
// NewDirectoryLayer returns a new root directory (as a Directory). The
// subspaces nodeSS and contentSS control where the directory metadata and
// contents are stored. The default root directory has a nodeSS of
// subspace.FromBytes([]byte{0xFE}) and a contentSS of
// subspace.AllKeys(). Specifying more restrictive values for nodeSS and
// contentSS will allow using the directory layer alongside other content in a
// database.
//
// If allowManualPrefixes is false, all calls to CreatePrefix on the returned
// Directory (or any subdirectories) will fail, and all directory prefixes will
// be automatically allocated. The default root directory does not allow manual
// prefixes.
func NewDirectoryLayer(nodeSS, contentSS subspace.Subspace, allowManualPrefixes bool) Directory {
var dl directoryLayer
dl.nodeSS = subspace.FromBytes(nodeSS.Bytes())
dl.contentSS = subspace.FromBytes(contentSS.Bytes())
dl.allowManualPrefixes = allowManualPrefixes
dl.rootNode = dl.nodeSS.Sub(dl.nodeSS.Bytes())
dl.allocator = newHCA(dl.rootNode.Sub([]byte("hca")))
return dl
}
func (dl directoryLayer) createOrOpen(rtr fdb.ReadTransaction, tr *fdb.Transaction, path []string, layer []byte, prefix []byte, allowCreate, allowOpen bool) (DirectorySubspace, error) {
if e := dl.checkVersion(rtr, nil); e != nil {
return nil, e
}
if prefix != nil && !dl.allowManualPrefixes {
if len(dl.path) == 0 {
return nil, errors.New("cannot specify a prefix unless manual prefixes are enabled")
}
return nil, errors.New("cannot specify a prefix in a partition")
}
if len(path) == 0 {
return nil, errors.New("the root directory cannot be opened")
}
existingNode := dl.find(rtr, path).prefetchMetadata(rtr)
if existingNode.exists() {
if existingNode.isInPartition(nil, false) {
subpath := existingNode.getPartitionSubpath()
enc, e := existingNode.getContents(dl, nil)
if e != nil {
return nil, e
}
return enc.(directoryPartition).createOrOpen(rtr, tr, subpath, layer, prefix, allowCreate, allowOpen)
}
if !allowOpen {
return nil, errors.New("the directory already exists")
}
if layer != nil {
if l, e := existingNode._layer.Get(); e != nil || bytes.Compare(l, layer) != 0 {
return nil, errors.New("the directory was created with an incompatible layer")
}
}
return existingNode.getContents(dl, nil)
}
if !allowCreate {
return nil, errors.New("the directory does not exist")
}
if e := dl.checkVersion(rtr, tr); e != nil {
return nil, e
}
if prefix == nil {
newss, e := dl.allocator.allocate(*tr, dl.contentSS)
if e != nil {
return nil, fmt.Errorf("unable to allocate new directory prefix (%s)", e.Error())
}
if !isRangeEmpty(rtr, newss) {
return nil, fmt.Errorf("the database has keys stored at the prefix chosen by the automatic prefix allocator: %v", prefix)
}
prefix = newss.Bytes()
pf, e := dl.isPrefixFree(rtr.Snapshot(), prefix)
if e != nil {
return nil, e
}
if !pf {
return nil, errors.New("the directory layer has manually allocated prefixes that conflict with the automatic prefix allocator")
}
} else {
pf, e := dl.isPrefixFree(rtr, prefix)
if e != nil {
return nil, e
}
if !pf {
return nil, errors.New("the given prefix is already in use")
}
}
var parentNode subspace.Subspace
if len(path) > 1 {
pd, e := dl.createOrOpen(rtr, tr, path[:len(path)-1], nil, nil, true, true)
if e != nil {
return nil, e
}
parentNode = dl.nodeWithPrefix(pd.Bytes())
} else {
parentNode = dl.rootNode
}
if parentNode == nil {
return nil, errors.New("the parent directory does not exist")
}
node := dl.nodeWithPrefix(prefix)
tr.Set(parentNode.Sub(_SUBDIRS, path[len(path)-1]), prefix)
if layer == nil {
layer = []byte{}
}
tr.Set(node.Sub([]byte("layer")), layer)
return dl.contentsOfNode(node, path, layer)
}
func (dl directoryLayer) CreateOrOpen(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error) {
r, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
return dl.createOrOpen(tr, &tr, path, layer, nil, true, true)
})
if e != nil {
return nil, e
}
return r.(DirectorySubspace), nil
}
func (dl directoryLayer) Create(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error) {
r, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
return dl.createOrOpen(tr, &tr, path, layer, nil, true, false)
})
if e != nil {
return nil, e
}
return r.(DirectorySubspace), nil
}
func (dl directoryLayer) CreatePrefix(t fdb.Transactor, path []string, layer []byte, prefix []byte) (DirectorySubspace, error) {
if prefix == nil {
prefix = []byte{}
}
r, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
return dl.createOrOpen(tr, &tr, path, layer, prefix, true, false)
})
if e != nil {
return nil, e
}
return r.(DirectorySubspace), nil
}
func (dl directoryLayer) Open(rt fdb.ReadTransactor, path []string, layer []byte) (DirectorySubspace, error) {
r, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
return dl.createOrOpen(rtr, nil, path, layer, nil, false, true)
})
if e != nil {
return nil, e
}
return r.(DirectorySubspace), nil
}
func (dl directoryLayer) Exists(rt fdb.ReadTransactor, path []string) (bool, error) {
r, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
if e := dl.checkVersion(rtr, nil); e != nil {
return false, e
}
node := dl.find(rtr, path).prefetchMetadata(rtr)
if !node.exists() {
return false, nil
}
if node.isInPartition(nil, false) {
nc, e := node.getContents(dl, nil)
if e != nil {
return false, e
}
return nc.Exists(rtr, node.getPartitionSubpath())
}
return true, nil
})
if e != nil {
return false, e
}
return r.(bool), nil
}
func (dl directoryLayer) List(rt fdb.ReadTransactor, path []string) ([]string, error) {
r, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
if e := dl.checkVersion(rtr, nil); e != nil {
return nil, e
}
node := dl.find(rtr, path).prefetchMetadata(rtr)
if !node.exists() {
return nil, errors.New("the directory does not exist")
}
if node.isInPartition(nil, true) {
nc, e := node.getContents(dl, nil)
if e != nil {
return nil, e
}
return nc.List(rtr, node.getPartitionSubpath())
}
return dl.subdirNames(rtr, node.subspace)
})
if e != nil {
return nil, e
}
return r.([]string), nil
}
func (dl directoryLayer) MoveTo(t fdb.Transactor, newAbsolutePath []string) (DirectorySubspace, error) {
return nil, errors.New("the root directory cannot be moved")
}
func (dl directoryLayer) Move(t fdb.Transactor, oldPath []string, newPath []string) (DirectorySubspace, error) {
r, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
if e := dl.checkVersion(tr, &tr); e != nil {
return nil, e
}
sliceEnd := len(oldPath)
if sliceEnd > len(newPath) {
sliceEnd = len(newPath)
}
if stringsEqual(oldPath, newPath[:sliceEnd]) {
return nil, errors.New("the destination directory cannot be a subdirectory of the source directory")
}
oldNode := dl.find(tr, oldPath).prefetchMetadata(tr)
newNode := dl.find(tr, newPath).prefetchMetadata(tr)
if !oldNode.exists() {
return nil, errors.New("the source directory does not exist")
}
if oldNode.isInPartition(nil, false) || newNode.isInPartition(nil, false) {
if !oldNode.isInPartition(nil, false) || !newNode.isInPartition(nil, false) || !stringsEqual(oldNode.path, newNode.path) {
return nil, errors.New("cannot move between partitions")
}
nnc, e := newNode.getContents(dl, nil)
if e != nil {
return nil, e
}
return nnc.Move(tr, oldNode.getPartitionSubpath(), newNode.getPartitionSubpath())
}
if newNode.exists() {
return nil, errors.New("the destination directory already exists. Remove it first")
}
parentNode := dl.find(tr, newPath[:len(newPath)-1])
if !parentNode.exists() {
return nil, errors.New("the parent of the destination directory does not exist. Create it first")
}
p, e := dl.nodeSS.Unpack(oldNode.subspace)
if e != nil {
return nil, e
}
tr.Set(parentNode.subspace.Sub(_SUBDIRS, newPath[len(newPath)-1]), p[0].([]byte))
dl.removeFromParent(tr, oldPath)
l, e := oldNode._layer.Get()
if e != nil {
return nil, e
}
return dl.contentsOfNode(oldNode.subspace, newPath, l)
})
if e != nil {
return nil, e
}
return r.(DirectorySubspace), nil
}
func (dl directoryLayer) Remove(t fdb.Transactor, path []string) (bool, error) {
r, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
if e := dl.checkVersion(tr, &tr); e != nil {
return false, e
}
if len(path) == 0 {
return false, errors.New("the root directory cannot be removed")
}
node := dl.find(tr, path).prefetchMetadata(tr)
if !node.exists() {
return false, nil
}
if node.isInPartition(nil, false) {
nc, e := node.getContents(dl, nil)
if e != nil {
return false, e
}
return nc.(directoryPartition).Remove(tr, node.getPartitionSubpath())
}
if e := dl.removeRecursive(tr, node.subspace); e != nil {
return false, e
}
dl.removeFromParent(tr, path)
return true, nil
})
if e != nil {
return false, e
}
return r.(bool), nil
}
func (dl directoryLayer) removeRecursive(tr fdb.Transaction, node subspace.Subspace) error {
nodes := dl.subdirNodes(tr, node)
for i := range nodes {
if e := dl.removeRecursive(tr, nodes[i]); e != nil {
return e
}
}
p, e := dl.nodeSS.Unpack(node)
if e != nil {
return e
}
kr, e := fdb.PrefixRange(p[0].([]byte))
if e != nil {
return e
}
tr.ClearRange(kr)
tr.ClearRange(node)
return nil
}
func (dl directoryLayer) removeFromParent(tr fdb.Transaction, path []string) {
parent := dl.find(tr, path[:len(path)-1])
tr.Clear(parent.subspace.Sub(_SUBDIRS, path[len(path)-1]))
}
func (dl directoryLayer) GetLayer() []byte {
return []byte{}
}
func (dl directoryLayer) GetPath() []string {
return dl.path
}
func (dl directoryLayer) subdirNames(rtr fdb.ReadTransaction, node subspace.Subspace) ([]string, error) {
sd := node.Sub(_SUBDIRS)
rr := rtr.GetRange(sd, fdb.RangeOptions{})
ri := rr.Iterator()
var ret []string
for ri.Advance() {
kv, e := ri.Get()
if e != nil {
return nil, e
}
p, e := sd.Unpack(kv.Key)
if e != nil {
return nil, e
}
ret = append(ret, p[0].(string))
}
return ret, nil
}
func (dl directoryLayer) subdirNodes(tr fdb.Transaction, node subspace.Subspace) []subspace.Subspace {
sd := node.Sub(_SUBDIRS)
rr := tr.GetRange(sd, fdb.RangeOptions{})
ri := rr.Iterator()
var ret []subspace.Subspace
for ri.Advance() {
kv := ri.MustGet()
ret = append(ret, dl.nodeWithPrefix(kv.Value))
}
return ret
}
func (dl directoryLayer) nodeContainingKey(rtr fdb.ReadTransaction, key []byte) (subspace.Subspace, error) {
if bytes.HasPrefix(key, dl.nodeSS.Bytes()) {
return dl.rootNode, nil
}
bk, _ := dl.nodeSS.FDBRangeKeys()
kr := fdb.KeyRange{bk, fdb.Key(append(dl.nodeSS.Pack(tuple.Tuple{key}), 0x00))}
kvs, e := rtr.GetRange(kr, fdb.RangeOptions{Reverse: true, Limit: 1}).GetSliceWithError()
if e != nil {
return nil, e
}
if len(kvs) == 1 {
pp, e := dl.nodeSS.Unpack(kvs[0].Key)
if e != nil {
return nil, e
}
prevPrefix := pp[0].([]byte)
if bytes.HasPrefix(key, prevPrefix) {
return dl.nodeWithPrefix(prevPrefix), nil
}
}
return nil, nil
}
func (dl directoryLayer) isPrefixFree(rtr fdb.ReadTransaction, prefix []byte) (bool, error) {
if len(prefix) == 0 {
return false, nil
}
nck, e := dl.nodeContainingKey(rtr, prefix)
if e != nil {
return false, e
}
if nck != nil {
return false, nil
}
kr, e := fdb.PrefixRange(prefix)
if e != nil {
return false, e
}
bk, ek := kr.FDBRangeKeys()
if !isRangeEmpty(rtr, fdb.KeyRange{dl.nodeSS.Pack(tuple.Tuple{bk}), dl.nodeSS.Pack(tuple.Tuple{ek})}) {
return false, nil
}
return true, nil
}
func (dl directoryLayer) checkVersion(rtr fdb.ReadTransaction, tr *fdb.Transaction) error {
version, err := rtr.Get(dl.rootNode.Sub([]byte("version"))).Get()
if err != nil {
return err
}
if version == nil {
if tr != nil {
dl.initializeDirectory(*tr)
}
return nil
}
var versions []int32
buf := bytes.NewBuffer(version)
for i := 0; i < 3; i++ {
var v int32
err := binary.Read(buf, binary.LittleEndian, &v)
if err != nil {
return errors.New("cannot determine directory version present in database")
}
versions = append(versions, v)
}
if versions[0] > _MAJORVERSION {
return fmt.Errorf("cannot load directory with version %d.%d.%d using directory layer %d.%d.%d", versions[0], versions[1], versions[2], _MAJORVERSION, _MINORVERSION, _MICROVERSION)
}
if versions[1] > _MINORVERSION && tr != nil /* aka write access allowed */ {
return fmt.Errorf("directory with version %d.%d.%d is read-only when opened using directory layer %d.%d.%d", versions[0], versions[1], versions[2], _MAJORVERSION, _MINORVERSION, _MICROVERSION)
}
return nil
}
func (dl directoryLayer) initializeDirectory(tr fdb.Transaction) {
buf := new(bytes.Buffer)
// bytes.Buffer claims that Write will always return a nil error, which
// means the error return here can only be an encoding issue. So long as we
// don't set our own versions to something completely invalid, we should be
// OK to ignore error returns.
binary.Write(buf, binary.LittleEndian, _MAJORVERSION)
binary.Write(buf, binary.LittleEndian, _MINORVERSION)
binary.Write(buf, binary.LittleEndian, _MICROVERSION)
tr.Set(dl.rootNode.Sub([]byte("version")), buf.Bytes())
}
func (dl directoryLayer) contentsOfNode(node subspace.Subspace, path []string, layer []byte) (DirectorySubspace, error) {
p, e := dl.nodeSS.Unpack(node)
if e != nil {
return nil, e
}
prefix := p[0]
newPath := make([]string, len(dl.path)+len(path))
copy(newPath, dl.path)
copy(newPath[len(dl.path):], path)
pb := prefix.([]byte)
ss := subspace.FromBytes(pb)
if bytes.Compare(layer, []byte("partition")) == 0 {
nssb := make([]byte, len(pb)+1)
copy(nssb, pb)
nssb[len(pb)] = 0xFE
ndl := NewDirectoryLayer(subspace.FromBytes(nssb), ss, false).(directoryLayer)
ndl.path = newPath
return directoryPartition{ndl, dl}, nil
}
return directorySubspace{ss, dl, newPath, layer}, nil
}
func (dl directoryLayer) nodeWithPrefix(prefix []byte) subspace.Subspace {
if prefix == nil {
return nil
}
return dl.nodeSS.Sub(prefix)
}
func (dl directoryLayer) find(rtr fdb.ReadTransaction, path []string) *node {
n := &node{dl.rootNode, []string{}, path, nil}
for i := range path {
n = &node{dl.nodeWithPrefix(rtr.Get(n.subspace.Sub(_SUBDIRS, path[i])).MustGet()), path[:i+1], path, nil}
if !n.exists() || bytes.Compare(n.layer(rtr).MustGet(), []byte("partition")) == 0 {
return n
}
}
return n
}
func (dl directoryLayer) partitionSubpath(lpath, rpath []string) []string {
r := make([]string, len(lpath)-len(dl.path)+len(rpath))
copy(r, lpath[len(dl.path):])
copy(r[len(lpath)-len(dl.path):], rpath)
return r
}
func isRangeEmpty(rtr fdb.ReadTransaction, r fdb.Range) bool {
kvs := rtr.GetRange(r, fdb.RangeOptions{Limit: 1}).GetSliceOrPanic()
return len(kvs) == 0
}

View File

@ -0,0 +1,91 @@
/*
* directoryPartition.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go Directory Layer
package directory
import (
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
)
type directoryPartition struct {
directoryLayer
parentDirectoryLayer directoryLayer
}
func (dp directoryPartition) Sub(el ...tuple.TupleElement) subspace.Subspace {
panic("cannot open subspace in the root of a directory partition")
}
func (dp directoryPartition) Bytes() []byte {
panic("cannot get key for the root of a directory partition")
}
func (dp directoryPartition) Pack(t tuple.Tuple) fdb.Key {
panic("cannot pack keys using the root of a directory partition")
}
func (dp directoryPartition) Unpack(k fdb.KeyConvertible) (tuple.Tuple, error) {
panic("cannot unpack keys using the root of a directory partition")
}
func (dp directoryPartition) Contains(k fdb.KeyConvertible) bool {
panic("cannot check whether a key belongs to the root of a directory partition")
}
func (dp directoryPartition) FDBKey() fdb.Key {
panic("cannot use the root of a directory partition as a key")
}
func (dp directoryPartition) FDBRangeKeys() (fdb.KeyConvertible, fdb.KeyConvertible) {
panic("cannot get range for the root of a directory partition")
}
func (dp directoryPartition) FDBRangeKeySelectors() (fdb.Selectable, fdb.Selectable) {
panic("cannot get range for the root of a directory partition")
}
func (dp directoryPartition) GetLayer() []byte {
return []byte("partition")
}
func (dp directoryPartition) getLayerForPath(path []string) directoryLayer {
if len(path) == 0 {
return dp.parentDirectoryLayer
}
return dp.directoryLayer
}
func (dp directoryPartition) MoveTo(t fdb.Transactor, newAbsolutePath []string) (DirectorySubspace, error) {
return moveTo(t, dp.parentDirectoryLayer, dp.path, newAbsolutePath)
}
func (dp directoryPartition) Remove(t fdb.Transactor, path []string) (bool, error) {
dl := dp.getLayerForPath(path)
return dl.Remove(t, dl.partitionSubpath(dp.path, path))
}
func (dp directoryPartition) Exists(rt fdb.ReadTransactor, path []string) (bool, error) {
dl := dp.getLayerForPath(path)
return dl.Exists(rt, dl.partitionSubpath(dp.path, path))
}

View File

@ -0,0 +1,88 @@
/*
* directorySubspace.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go Directory Layer
package directory
import (
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
)
// DirectorySubspace represents a Directory that may also be used as a Subspace
// to store key/value pairs. Subdirectories of a root directory (as returned by
// Root or NewDirectoryLayer) are DirectorySubspaces, and provide all methods of
// the Directory and subspace.Subspace interfaces.
type DirectorySubspace interface {
subspace.Subspace
Directory
}
type directorySubspace struct {
subspace.Subspace
dl directoryLayer
path []string
layer []byte
}
func (d directorySubspace) CreateOrOpen(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error) {
return d.dl.CreateOrOpen(t, d.dl.partitionSubpath(d.path, path), layer)
}
func (d directorySubspace) Create(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error) {
return d.dl.Create(t, d.dl.partitionSubpath(d.path, path), layer)
}
func (d directorySubspace) CreatePrefix(t fdb.Transactor, path []string, layer []byte, prefix []byte) (DirectorySubspace, error) {
return d.dl.CreatePrefix(t, d.dl.partitionSubpath(d.path, path), layer, prefix)
}
func (d directorySubspace) Open(rt fdb.ReadTransactor, path []string, layer []byte) (DirectorySubspace, error) {
return d.dl.Open(rt, d.dl.partitionSubpath(d.path, path), layer)
}
func (d directorySubspace) MoveTo(t fdb.Transactor, newAbsolutePath []string) (DirectorySubspace, error) {
return moveTo(t, d.dl, d.path, newAbsolutePath)
}
func (d directorySubspace) Move(t fdb.Transactor, oldPath []string, newPath []string) (DirectorySubspace, error) {
return d.dl.Move(t, d.dl.partitionSubpath(d.path, oldPath), d.dl.partitionSubpath(d.path, newPath))
}
func (d directorySubspace) Remove(t fdb.Transactor, path []string) (bool, error) {
return d.dl.Remove(t, d.dl.partitionSubpath(d.path, path))
}
func (d directorySubspace) Exists(rt fdb.ReadTransactor, path []string) (bool, error) {
return d.dl.Exists(rt, d.dl.partitionSubpath(d.path, path))
}
func (d directorySubspace) List(rt fdb.ReadTransactor, path []string) (subdirs []string, e error) {
return d.dl.List(rt, d.dl.partitionSubpath(d.path, path))
}
func (d directorySubspace) GetLayer() []byte {
return d.layer
}
func (d directorySubspace) GetPath() []string {
return d.path
}

View File

@ -0,0 +1,75 @@
/*
* node.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go Directory Layer
package directory
import (
"bytes"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
)
type node struct {
subspace subspace.Subspace
path []string
targetPath []string
_layer fdb.FutureByteSlice
}
func (n *node) exists() bool {
if n.subspace == nil {
return false
}
return true
}
func (n *node) prefetchMetadata(rtr fdb.ReadTransaction) *node {
if n.exists() {
n.layer(rtr)
}
return n
}
func (n *node) layer(rtr fdb.ReadTransaction) fdb.FutureByteSlice {
if n._layer == nil {
fv := rtr.Get(n.subspace.Sub([]byte("layer")))
n._layer = fv
}
return n._layer
}
func (n *node) isInPartition(tr *fdb.Transaction, includeEmptySubpath bool) bool {
return n.exists() && bytes.Compare(n._layer.MustGet(), []byte("partition")) == 0 && (includeEmptySubpath || len(n.targetPath) > len(n.path))
}
func (n *node) getPartitionSubpath() []string {
return n.targetPath[len(n.path):]
}
func (n *node) getContents(dl directoryLayer, tr *fdb.Transaction) (DirectorySubspace, error) {
l, err := n._layer.Get()
if err != nil {
return nil, err
}
return dl.contentsOfNode(n.subspace, n.path, l)
}

View File

@ -0,0 +1,209 @@
/*
* doc.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go API
/*
Package fdb provides an interface to FoundationDB databases (version 2.0 or higher).
To build and run programs using this package, you must have an installed copy of
the FoundationDB client libraries (version 2.0.0 or later), available for Linux,
Windows and OS X at https://www.foundationdb.org/download/.
This documentation specifically applies to the FoundationDB Go binding. For more
extensive guidance to programming with FoundationDB, as well as API
documentation for the other FoundationDB interfaces, please see
https://apple.github.io/foundationdb/index.html.
Basic Usage
A basic interaction with the FoundationDB API is demonstrated below:
package main
import (
"github.com/apple/foundationdb/bindings/go/src/fdb"
"log"
"fmt"
)
func main() {
// Different API versions may expose different runtime behaviors.
fdb.MustAPIVersion(600)
// Open the default database from the system cluster
db := fdb.MustOpenDefault()
// Database reads and writes happen inside transactions
ret, e := db.Transact(func(tr fdb.Transaction) (interface{}, error) {
tr.Set(fdb.Key("hello"), []byte("world"))
return tr.Get(fdb.Key("foo")).MustGet(), nil
// db.Transact automatically commits (and if necessary,
// retries) the transaction
})
if e != nil {
log.Fatalf("Unable to perform FDB transaction (%v)", e)
}
fmt.Printf("hello is now world, foo was: %s\n", string(ret.([]byte)))
}
Futures
Many functions in this package are asynchronous and return Future objects. A
Future represents a value (or error) to be available at some later
time. Functions documented as blocking on a Future will block the calling
goroutine until the Future is ready (although if the Future is already ready,
the call will not block at all). While a goroutine is blocked on a Future, other
goroutines are free to execute and interact with the FoundationDB API.
It is possible (and often recommended) to call several asynchronous operations
and have multiple Future objects outstanding inside a single goroutine. All
operations will execute in parallel, and the calling goroutine will not block
until a blocking method on any one of the Futures is called.
On Panics
Idiomatic Go code strongly frowns at panics that escape library/package
boundaries, in favor of explicitly returned errors. Idiomatic FoundationDB
client programs, however, are built around the idea of retryable
programmer-provided transactional functions. Retryable transactions can be
implemented using only error values:
ret, e := db.Transact(func (tr Transaction) (interface{}, error) {
// FoundationDB futures represent a value that will become available
futureValueOne := tr.Get(fdb.Key("foo"))
futureValueTwo := tr.Get(fdb.Key("bar"))
// Both reads are being carried out in parallel
// Get the first value (or any error)
valueOne, e := futureValueOne.Get()
if e != nil {
return nil, e
}
// Get the second value (or any error)
valueTwo, e := futureValueTwo.Get()
if e != nil {
return nil, e
}
// Return the two values
return []string{valueOne, valueTwo}, nil
})
If either read encounters an error, it will be returned to Transact, which will
determine if the error is retryable or not (using (Transaction).OnError). If the
error is an FDB Error and retryable (such as a conflict with with another
transaction), then the programmer-provided function will be run again. If the
error is fatal (or not an FDB Error), then the error will be returned to the
caller of Transact.
In practice, checking for an error from every asynchronous future type in the
FoundationDB API quickly becomes frustrating. As a convenience, every Future
type also has a MustGet method, which returns the same type and value as Get,
but exposes FoundationDB Errors via a panic rather than an explicitly returned
error. The above example may be rewritten as:
ret, e := db.Transact(func (tr Transaction) (interface{}, error) {
// FoundationDB futures represent a value that will become available
futureValueOne := tr.Get(fdb.Key("foo"))
futureValueTwo := tr.Get(fdb.Key("bar"))
// Both reads are being carried out in parallel
// Get the first value
valueOne := futureValueOne.MustGet()
// Get the second value
valueTwo := futureValueTwo.MustGet()
// Return the two values
return []string{valueOne, valueTwo}, nil
})
Any panic that occurs during execution of the caller-provided function will be
recovered by the (Database).Transact method. If the error is an FDB Error, it
will either result in a retry of the function or be returned by Transact. If the
error is any other type (panics from code other than MustGet), Transact will
re-panic the original value.
Note that (Transaction).Transact also recovers panics, but does not itself
retry. If the recovered value is an FDB Error, it will be returned to the caller
of (Transaction).Transact; all other values will be re-panicked.
Transactions and Goroutines
When using a Transactor in the fdb package, particular care must be taken if
goroutines are created inside of the function passed to the Transact method. Any
panic from the goroutine will not be recovered by Transact, and (unless
otherwise recovered) will result in the termination of that goroutine.
Furthermore, any errors returned or panicked by fdb methods called in the
goroutine must be safely returned to the function passed to Transact, and either
returned or panicked, to allow Transact to appropriately retry or terminate the
transactional function.
Lastly, a transactional function may be retried indefinitely. It is advisable to
make sure any goroutines created during the transactional function have
completed before returning from the transactional function, or a potentially
unbounded number of goroutines may be created.
Given these complexities, it is generally best practice to use a single
goroutine for each logical thread of interaction with FoundationDB, and allow
each goroutine to block when necessary to wait for Futures to become ready.
Streaming Modes
When using GetRange methods in the FoundationDB API, clients can request large
ranges of the database to iterate over. Making such a request doesn't
necessarily mean that the client will consume all of the data in the range --
sometimes the client doesn't know how far it intends to iterate in
advance. FoundationDB tries to balance latency and bandwidth by requesting data
for iteration in batches.
The Mode field of the RangeOptions struct allows a client to customize this
performance tradeoff by providing extra information about how the iterator will
be used.
The default value of Mode is StreamingModeIterator, which tries to provide a
reasonable default balance. Other streaming modes that prioritize throughput or
latency are available -- see the documented StreamingMode values for specific
options.
Atomic Operations
The FDB package provides a number of atomic operations on the Database and
Transaction objects. An atomic operation is a single database command that
carries out several logical steps: reading the value of a key, performing a
transformation on that value, and writing the result. Different atomic
operations perform different transformations. Like other database operations, an
atomic operation is used within a transaction.
For more information on atomic operations in FoundationDB, please see
https://apple.github.io/foundationdb/developer-guide.html#atomic-operations. The
operands to atomic operations in this API must be provided as appropriately
encoded byte slices. To convert a Go type to a byte slice, see the binary
package.
The current atomic operations in this API are Add, BitAnd, BitOr, BitXor, Max, Min,
SetVersionstampedKey, SetVersionstampedValue (all methods on Transaction).
*/
package fdb

View File

@ -0,0 +1,59 @@
/*
* errors.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go API
package fdb
/*
#define FDB_API_VERSION 600
#include <foundationdb/fdb_c.h>
*/
import "C"
import (
"fmt"
)
// Error represents a low-level error returned by the FoundationDB C library. An
// Error may be returned by any FoundationDB API function that returns error, or
// as a panic from any FoundationDB API function whose name ends with OrPanic.
//
// You may compare the Code field of an Error against the list of FoundationDB
// error codes at https://apple.github.io/foundationdb/api-error-codes.html,
// but generally an Error should be passed to (Transaction).OnError. When using
// (Database).Transact, non-fatal errors will be retried automatically.
type Error struct {
Code int
}
func (e Error) Error() string {
return fmt.Sprintf("FoundationDB error code %d (%s)", e.Code, C.GoString(C.fdb_get_error(C.fdb_error_t(e.Code))))
}
// SOMEDAY: these (along with others) should be coming from fdb.options?
var (
errNetworkNotSetup = Error{2008}
errAPIVersionUnset = Error{2200}
errAPIVersionAlreadySet = Error{2201}
errAPIVersionNotSupported = Error{2203}
)

View File

@ -0,0 +1,389 @@
/*
* fdb.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go API
package fdb
/*
#define FDB_API_VERSION 600
#include <foundationdb/fdb_c.h>
#include <stdlib.h>
*/
import "C"
import (
"fmt"
"log"
"runtime"
"sync"
"unsafe"
)
/* Would put this in futures.go but for the documented issue with
/* exports and functions in preamble
/* (https://code.google.com/p/go-wiki/wiki/cgo#Global_functions) */
//export unlockMutex
func unlockMutex(p unsafe.Pointer) {
m := (*sync.Mutex)(p)
m.Unlock()
}
// A Transactor can execute a function that requires a Transaction. Functions
// written to accept a Transactor are called transactional functions, and may be
// called with either a Database or a Transaction.
type Transactor interface {
// Transact executes the caller-provided function, providing it with a
// Transaction (itself a Transactor, allowing composition of transactional
// functions).
Transact(func(Transaction) (interface{}, error)) (interface{}, error)
// All Transactors are also ReadTransactors, allowing them to be used with
// read-only transactional functions.
ReadTransactor
}
// A ReadTransactor can execute a function that requires a
// ReadTransaction. Functions written to accept a ReadTransactor are called
// read-only transactional functions, and may be called with a Database,
// Transaction or Snapshot.
type ReadTransactor interface {
// ReadTransact executes the caller-provided function, providing it with a
// ReadTransaction (itself a ReadTransactor, allowing composition of
// read-only transactional functions).
ReadTransact(func(ReadTransaction) (interface{}, error)) (interface{}, error)
}
func setOpt(setter func(*C.uint8_t, C.int) C.fdb_error_t, param []byte) error {
if err := setter(byteSliceToPtr(param), C.int(len(param))); err != 0 {
return Error{int(err)}
}
return nil
}
// NetworkOptions is a handle with which to set options that affect the entire
// FoundationDB client. A NetworkOptions instance should be obtained with the
// fdb.Options function.
type NetworkOptions struct {
}
// Options returns a NetworkOptions instance suitable for setting options that
// affect the entire FoundationDB client.
func Options() NetworkOptions {
return NetworkOptions{}
}
func (opt NetworkOptions) setOpt(code int, param []byte) error {
networkMutex.Lock()
defer networkMutex.Unlock()
if apiVersion == 0 {
return errAPIVersionUnset
}
return setOpt(func(p *C.uint8_t, pl C.int) C.fdb_error_t {
return C.fdb_network_set_option(C.FDBNetworkOption(code), p, pl)
}, param)
}
// APIVersion determines the runtime behavior the fdb package. If the requested
// version is not supported by both the fdb package and the FoundationDB C
// library, an error will be returned. APIVersion must be called prior to any
// other functions in the fdb package.
//
// Currently, this package supports API versions 200 through 600.
//
// Warning: When using the multi-version client API, setting an API version that
// is not supported by a particular client library will prevent that client from
// being used to connect to the cluster. In particular, you should not advance
// the API version of your application after upgrading your client until the
// cluster has also been upgraded.
func APIVersion(version int) error {
headerVersion := 600
networkMutex.Lock()
defer networkMutex.Unlock()
if apiVersion != 0 {
if apiVersion == version {
return nil
}
return errAPIVersionAlreadySet
}
if version < 200 || version > 600 {
return errAPIVersionNotSupported
}
if e := C.fdb_select_api_version_impl(C.int(version), C.int(headerVersion)); e != 0 {
if e != 0 {
if e == 2203 {
maxSupportedVersion := C.fdb_get_max_api_version()
if headerVersion > int(maxSupportedVersion) {
return fmt.Errorf("This version of the FoundationDB Go binding is not supported by the installed FoundationDB C library. The binding requires a library that supports API version %d, but the installed library supports a maximum version of %d.", version, maxSupportedVersion)
}
return fmt.Errorf("API version %d is not supported by the installed FoundationDB C library.", version)
}
return Error{int(e)}
}
}
apiVersion = version
return nil
}
// Determines if an API version has already been selected, i.e., if
// APIVersion or MustAPIVersion have already been called.
func IsAPIVersionSelected() bool {
return apiVersion != 0
}
// Returns the API version that has been selected through APIVersion
// or MustAPIVersion. If the version has already been selected, then
// the first value returned is the API version and the error is
// nil. If the API version has not yet been set, then the error
// will be non-nil.
func GetAPIVersion() (int, error) {
if IsAPIVersionSelected() {
return apiVersion, nil
}
return 0, errAPIVersionUnset
}
// MustAPIVersion is like APIVersion but panics if the API version is not
// supported.
func MustAPIVersion(version int) {
err := APIVersion(version)
if err != nil {
panic(err)
}
}
// MustGetAPIVersion is like GetAPIVersion but panics if the API version
// has not yet been set.
func MustGetAPIVersion() int {
apiVersion, err := GetAPIVersion()
if err != nil {
panic(err)
}
return apiVersion
}
var apiVersion int
var networkStarted bool
var networkMutex sync.Mutex
type DatabaseId struct {
clusterFile string
dbName string
}
var openClusters map[string]Cluster
var openDatabases map[DatabaseId]Database
func init() {
openClusters = make(map[string]Cluster)
openDatabases = make(map[DatabaseId]Database)
}
func startNetwork() error {
if e := C.fdb_setup_network(); e != 0 {
return Error{int(e)}
}
go func() {
e := C.fdb_run_network()
if e != 0 {
log.Printf("Unhandled error in FoundationDB network thread: %v (%v)\n", C.GoString(C.fdb_get_error(e)), e)
}
}()
networkStarted = true
return nil
}
// StartNetwork initializes the FoundationDB client networking engine. It is not
// necessary to call StartNetwork when using the fdb.Open or fdb.OpenDefault
// functions to obtain a database handle. StartNetwork must not be called more
// than once.
func StartNetwork() error {
networkMutex.Lock()
defer networkMutex.Unlock()
if apiVersion == 0 {
return errAPIVersionUnset
}
return startNetwork()
}
// DefaultClusterFile should be passed to fdb.Open or fdb.CreateCluster to allow
// the FoundationDB C library to select the platform-appropriate default cluster
// file on the current machine.
const DefaultClusterFile string = ""
// OpenDefault returns a database handle to the default database from the
// FoundationDB cluster identified by the DefaultClusterFile on the current
// machine. The FoundationDB client networking engine will be initialized first,
// if necessary.
func OpenDefault() (Database, error) {
return Open(DefaultClusterFile, []byte("DB"))
}
// MustOpenDefault is like OpenDefault but panics if the default database cannot
// be opened.
func MustOpenDefault() Database {
db, err := OpenDefault()
if err != nil {
panic(err)
}
return db
}
// Open returns a database handle to the named database from the FoundationDB
// cluster identified by the provided cluster file and database name. The
// FoundationDB client networking engine will be initialized first, if
// necessary.
//
// In the current release, the database name must be []byte("DB").
func Open(clusterFile string, dbName []byte) (Database, error) {
networkMutex.Lock()
defer networkMutex.Unlock()
if apiVersion == 0 {
return Database{}, errAPIVersionUnset
}
var e error
if !networkStarted {
e = startNetwork()
if e != nil {
return Database{}, e
}
}
cluster, ok := openClusters[clusterFile]
if !ok {
cluster, e = createCluster(clusterFile)
if e != nil {
return Database{}, e
}
openClusters[clusterFile] = cluster
}
db, ok := openDatabases[DatabaseId{clusterFile, string(dbName)}]
if !ok {
db, e = cluster.OpenDatabase(dbName)
if e != nil {
return Database{}, e
}
openDatabases[DatabaseId{clusterFile, string(dbName)}] = db
}
return db, nil
}
// MustOpen is like Open but panics if the database cannot be opened.
func MustOpen(clusterFile string, dbName []byte) Database {
db, err := Open(clusterFile, dbName)
if err != nil {
panic(err)
}
return db
}
func createCluster(clusterFile string) (Cluster, error) {
var cf *C.char
if len(clusterFile) != 0 {
cf = C.CString(clusterFile)
defer C.free(unsafe.Pointer(cf))
}
f := C.fdb_create_cluster(cf)
fdb_future_block_until_ready(f)
var outc *C.FDBCluster
if err := C.fdb_future_get_cluster(f, &outc); err != 0 {
return Cluster{}, Error{int(err)}
}
C.fdb_future_destroy(f)
c := &cluster{outc}
runtime.SetFinalizer(c, (*cluster).destroy)
return Cluster{c}, nil
}
// CreateCluster returns a cluster handle to the FoundationDB cluster identified
// by the provided cluster file.
func CreateCluster(clusterFile string) (Cluster, error) {
networkMutex.Lock()
defer networkMutex.Unlock()
if apiVersion == 0 {
return Cluster{}, errAPIVersionUnset
}
if !networkStarted {
return Cluster{}, errNetworkNotSetup
}
return createCluster(clusterFile)
}
func byteSliceToPtr(b []byte) *C.uint8_t {
if len(b) > 0 {
return (*C.uint8_t)(unsafe.Pointer(&b[0]))
}
return nil
}
// A KeyConvertible can be converted to a FoundationDB Key. All functions in the
// FoundationDB API that address a specific key accept a KeyConvertible.
type KeyConvertible interface {
FDBKey() Key
}
// Key represents a FoundationDB key, a lexicographically-ordered sequence of
// bytes. Key implements the KeyConvertible interface.
type Key []byte
// FDBKey allows Key to (trivially) satisfy the KeyConvertible interface.
func (k Key) FDBKey() Key {
return k
}
func panicToError(e *error) {
if r := recover(); r != nil {
fe, ok := r.(Error)
if ok {
*e = fe
} else {
panic(r)
}
}
}

View File

@ -0,0 +1,376 @@
/*
* futures.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go API
package fdb
/*
#cgo LDFLAGS: -lfdb_c -lm
#define FDB_API_VERSION 600
#include <foundationdb/fdb_c.h>
#include <string.h>
extern void unlockMutex(void*);
void go_callback(FDBFuture* f, void* m) {
unlockMutex(m);
}
void go_set_callback(void* f, void* m) {
fdb_future_set_callback(f, (FDBCallback)&go_callback, m);
}
*/
import "C"
import (
"runtime"
"sync"
"unsafe"
)
// A Future represents a value (or error) to be available at some later
// time. Asynchronous FDB API functions return one of the types that implement
// the Future interface. All Future types additionally implement Get and MustGet
// methods with different return types. Calling BlockUntilReady, Get or MustGet
// will block the calling goroutine until the Future is ready.
type Future interface {
// BlockUntilReady blocks the calling goroutine until the future is ready. A
// future becomes ready either when it receives a value of its enclosed type
// (if any) or is set to an error state.
BlockUntilReady()
// IsReady returns true if the future is ready, and false otherwise, without
// blocking. A future is ready either when has received a value of its
// enclosed type (if any) or has been set to an error state.
IsReady() bool
// Cancel cancels a future and its associated asynchronous operation. If
// called before the future becomes ready, attempts to access the future
// will return an error. Cancel has no effect if the future is already
// ready.
//
// Note that even if a future is not ready, the associated asynchronous
// operation may already have completed and be unable to be cancelled.
Cancel()
}
type future struct {
ptr *C.FDBFuture
}
func newFuture(ptr *C.FDBFuture) *future {
f := &future{ptr}
runtime.SetFinalizer(f, func(f *future) { C.fdb_future_destroy(f.ptr) })
return f
}
func fdb_future_block_until_ready(f *C.FDBFuture) {
if C.fdb_future_is_ready(f) != 0 {
return
}
m := &sync.Mutex{}
m.Lock()
C.go_set_callback(unsafe.Pointer(f), unsafe.Pointer(m))
m.Lock()
}
func (f future) BlockUntilReady() {
fdb_future_block_until_ready(f.ptr)
}
func (f future) IsReady() bool {
return C.fdb_future_is_ready(f.ptr) != 0
}
func (f future) Cancel() {
C.fdb_future_cancel(f.ptr)
}
// FutureByteSlice represents the asynchronous result of a function that returns
// a value from a database. FutureByteSlice is a lightweight object that may be
// efficiently copied, and is safe for concurrent use by multiple goroutines.
type FutureByteSlice interface {
// Get returns a database value (or nil if there is no value), or an error
// if the asynchronous operation associated with this future did not
// successfully complete. The current goroutine will be blocked until the
// future is ready.
Get() ([]byte, error)
// MustGet returns a database value (or nil if there is no value), or panics
// if the asynchronous operation associated with this future did not
// successfully complete. The current goroutine will be blocked until the
// future is ready.
MustGet() []byte
Future
}
type futureByteSlice struct {
*future
v []byte
e error
o sync.Once
}
func (f *futureByteSlice) Get() ([]byte, error) {
f.o.Do(func() {
var present C.fdb_bool_t
var value *C.uint8_t
var length C.int
f.BlockUntilReady()
if err := C.fdb_future_get_value(f.ptr, &present, &value, &length); err != 0 {
f.e = Error{int(err)}
} else {
if present != 0 {
f.v = C.GoBytes(unsafe.Pointer(value), length)
}
}
C.fdb_future_release_memory(f.ptr)
})
return f.v, f.e
}
func (f *futureByteSlice) MustGet() []byte {
val, err := f.Get()
if err != nil {
panic(err)
}
return val
}
// FutureKey represents the asynchronous result of a function that returns a key
// from a database. FutureKey is a lightweight object that may be efficiently
// copied, and is safe for concurrent use by multiple goroutines.
type FutureKey interface {
// Get returns a database key or an error if the asynchronous operation
// associated with this future did not successfully complete. The current
// goroutine will be blocked until the future is ready.
Get() (Key, error)
// MustGet returns a database key, or panics if the asynchronous operation
// associated with this future did not successfully complete. The current
// goroutine will be blocked until the future is ready.
MustGet() Key
Future
}
type futureKey struct {
*future
k Key
e error
o sync.Once
}
func (f *futureKey) Get() (Key, error) {
f.o.Do(func() {
var value *C.uint8_t
var length C.int
f.BlockUntilReady()
if err := C.fdb_future_get_key(f.ptr, &value, &length); err != 0 {
f.e = Error{int(err)}
} else {
f.k = C.GoBytes(unsafe.Pointer(value), length)
}
C.fdb_future_release_memory(f.ptr)
})
return f.k, f.e
}
func (f *futureKey) MustGet() Key {
val, err := f.Get()
if err != nil {
panic(err)
}
return val
}
// FutureNil represents the asynchronous result of a function that has no return
// value. FutureNil is a lightweight object that may be efficiently copied, and
// is safe for concurrent use by multiple goroutines.
type FutureNil interface {
// Get returns an error if the asynchronous operation associated with this
// future did not successfully complete. The current goroutine will be
// blocked until the future is ready.
Get() error
// MustGet panics if the asynchronous operation associated with this future
// did not successfully complete. The current goroutine will be blocked
// until the future is ready.
MustGet()
Future
}
type futureNil struct {
*future
}
func (f futureNil) Get() error {
f.BlockUntilReady()
if err := C.fdb_future_get_error(f.ptr); err != 0 {
return Error{int(err)}
}
return nil
}
func (f futureNil) MustGet() {
if err := f.Get(); err != nil {
panic(err)
}
}
type futureKeyValueArray struct {
*future
}
func stringRefToSlice(ptr unsafe.Pointer) []byte {
size := *((*C.int)(unsafe.Pointer(uintptr(ptr) + 8)))
if size == 0 {
return []byte{}
}
src := unsafe.Pointer(*(**C.uint8_t)(unsafe.Pointer(ptr)))
return C.GoBytes(src, size)
}
func (f futureKeyValueArray) Get() ([]KeyValue, bool, error) {
f.BlockUntilReady()
var kvs *C.FDBKeyValue
var count C.int
var more C.fdb_bool_t
if err := C.fdb_future_get_keyvalue_array(f.ptr, &kvs, &count, &more); err != 0 {
return nil, false, Error{int(err)}
}
ret := make([]KeyValue, int(count))
for i := 0; i < int(count); i++ {
kvptr := unsafe.Pointer(uintptr(unsafe.Pointer(kvs)) + uintptr(i*24))
ret[i].Key = stringRefToSlice(kvptr)
ret[i].Value = stringRefToSlice(unsafe.Pointer(uintptr(kvptr) + 12))
}
return ret, (more != 0), nil
}
// FutureInt64 represents the asynchronous result of a function that returns a
// database version. FutureInt64 is a lightweight object that may be efficiently
// copied, and is safe for concurrent use by multiple goroutines.
type FutureInt64 interface {
// Get returns a database version or an error if the asynchronous operation
// associated with this future did not successfully complete. The current
// goroutine will be blocked until the future is ready.
Get() (int64, error)
// MustGet returns a database version, or panics if the asynchronous
// operation associated with this future did not successfully complete. The
// current goroutine will be blocked until the future is ready.
MustGet() int64
Future
}
type futureInt64 struct {
*future
}
func (f futureInt64) Get() (int64, error) {
f.BlockUntilReady()
var ver C.int64_t
if err := C.fdb_future_get_version(f.ptr, &ver); err != 0 {
return 0, Error{int(err)}
}
return int64(ver), nil
}
func (f futureInt64) MustGet() int64 {
val, err := f.Get()
if err != nil {
panic(err)
}
return val
}
// FutureStringSlice represents the asynchronous result of a function that
// returns a slice of strings. FutureStringSlice is a lightweight object that
// may be efficiently copied, and is safe for concurrent use by multiple
// goroutines.
type FutureStringSlice interface {
// Get returns a slice of strings or an error if the asynchronous operation
// associated with this future did not successfully complete. The current
// goroutine will be blocked until the future is ready.
Get() ([]string, error)
// MustGet returns a slice of strings or panics if the asynchronous
// operation associated with this future did not successfully complete. The
// current goroutine will be blocked until the future is ready.
MustGet() []string
Future
}
type futureStringSlice struct {
*future
}
func (f futureStringSlice) Get() ([]string, error) {
f.BlockUntilReady()
var strings **C.char
var count C.int
if err := C.fdb_future_get_string_array(f.ptr, (***C.char)(unsafe.Pointer(&strings)), &count); err != 0 {
return nil, Error{int(err)}
}
ret := make([]string, int(count))
for i := 0; i < int(count); i++ {
ret[i] = C.GoString((*C.char)(*(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(strings)) + uintptr(i*8)))))
}
return ret, nil
}
func (f futureStringSlice) MustGet() []string {
val, err := f.Get()
if err != nil {
panic(err)
}
return val
}

View File

@ -0,0 +1,554 @@
/*
* generated.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// DO NOT EDIT THIS FILE BY HAND. This file was generated using
// translate_fdb_options.go, part of the FoundationDB repository, and a copy of
// the fdb.options file (installed as part of the FoundationDB client, typically
// found as /usr/include/foundationdb/fdb.options).
// To regenerate this file, from the top level of a FoundationDB repository
// checkout, run:
// $ go run bindings/go/src/_util/translate_fdb_options.go < fdbclient/vexillographer/fdb.options > bindings/go/src/fdb/generated.go
package fdb
import (
"bytes"
"encoding/binary"
)
func int64ToBytes(i int64) ([]byte, error) {
buf := new(bytes.Buffer)
if e := binary.Write(buf, binary.LittleEndian, i); e != nil {
return nil, e
}
return buf.Bytes(), nil
}
// Deprecated
//
// Parameter: IP:PORT
func (o NetworkOptions) SetLocalAddress(param string) error {
return o.setOpt(10, []byte(param))
}
// Deprecated
//
// Parameter: path to cluster file
func (o NetworkOptions) SetClusterFile(param string) error {
return o.setOpt(20, []byte(param))
}
// Enables trace output to a file in a directory of the clients choosing
//
// Parameter: path to output directory (or NULL for current working directory)
func (o NetworkOptions) SetTraceEnable(param string) error {
return o.setOpt(30, []byte(param))
}
// Sets the maximum size in bytes of a single trace output file. This value should be in the range ``[0, INT64_MAX]``. If the value is set to 0, there is no limit on individual file size. The default is a maximum size of 10,485,760 bytes.
//
// Parameter: max size of a single trace output file
func (o NetworkOptions) SetTraceRollSize(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(31, b)
}
// Sets the maximum size of all the trace output files put together. This value should be in the range ``[0, INT64_MAX]``. If the value is set to 0, there is no limit on the total size of the files. The default is a maximum size of 104,857,600 bytes. If the default roll size is used, this means that a maximum of 10 trace files will be written at a time.
//
// Parameter: max total size of trace files
func (o NetworkOptions) SetTraceMaxLogsSize(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(32, b)
}
// Sets the 'LogGroup' attribute with the specified value for all events in the trace output files. The default log group is 'default'.
//
// Parameter: value of the LogGroup attribute
func (o NetworkOptions) SetTraceLogGroup(param string) error {
return o.setOpt(33, []byte(param))
}
// Set internal tuning or debugging knobs
//
// Parameter: knob_name=knob_value
func (o NetworkOptions) SetKnob(param string) error {
return o.setOpt(40, []byte(param))
}
// Deprecated
//
// Parameter: file path or linker-resolved name
func (o NetworkOptions) SetTLSPlugin(param string) error {
return o.setOpt(41, []byte(param))
}
// Set the certificate chain
//
// Parameter: certificates
func (o NetworkOptions) SetTLSCertBytes(param []byte) error {
return o.setOpt(42, param)
}
// Set the file from which to load the certificate chain
//
// Parameter: file path
func (o NetworkOptions) SetTLSCertPath(param string) error {
return o.setOpt(43, []byte(param))
}
// Set the private key corresponding to your own certificate
//
// Parameter: key
func (o NetworkOptions) SetTLSKeyBytes(param []byte) error {
return o.setOpt(45, param)
}
// Set the file from which to load the private key corresponding to your own certificate
//
// Parameter: file path
func (o NetworkOptions) SetTLSKeyPath(param string) error {
return o.setOpt(46, []byte(param))
}
// Set the peer certificate field verification criteria
//
// Parameter: verification pattern
func (o NetworkOptions) SetTLSVerifyPeers(param []byte) error {
return o.setOpt(47, param)
}
// Not yet implemented.
func (o NetworkOptions) SetBuggifyEnable() error {
return o.setOpt(48, nil)
}
// Not yet implemented.
func (o NetworkOptions) SetBuggifyDisable() error {
return o.setOpt(49, nil)
}
// Set the probability of a BUGGIFY section being active for the current execution. Only applies to code paths first traversed AFTER this option is changed.
//
// Parameter: probability expressed as a percentage between 0 and 100
func (o NetworkOptions) SetBuggifySectionActivatedProbability(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(50, b)
}
// Set the probability of an active BUGGIFY section being fired
//
// Parameter: probability expressed as a percentage between 0 and 100
func (o NetworkOptions) SetBuggifySectionFiredProbability(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(51, b)
}
// Set the ca bundle
//
// Parameter: ca bundle
func (o NetworkOptions) SetTLSCaBytes(param []byte) error {
return o.setOpt(52, param)
}
// Set the file from which to load the certificate authority bundle
//
// Parameter: file path
func (o NetworkOptions) SetTLSCaPath(param string) error {
return o.setOpt(53, []byte(param))
}
// Set the passphrase for encrypted private key. Password should be set before setting the key for the password to be used.
//
// Parameter: key passphrase
func (o NetworkOptions) SetTLSPassword(param string) error {
return o.setOpt(54, []byte(param))
}
// Disables the multi-version client API and instead uses the local client directly. Must be set before setting up the network.
func (o NetworkOptions) SetDisableMultiVersionClientApi() error {
return o.setOpt(60, nil)
}
// If set, callbacks from external client libraries can be called from threads created by the FoundationDB client library. Otherwise, callbacks will be called from either the thread used to add the callback or the network thread. Setting this option can improve performance when connected using an external client, but may not be safe to use in all environments. Must be set before setting up the network. WARNING: This feature is considered experimental at this time.
func (o NetworkOptions) SetCallbacksOnExternalThreads() error {
return o.setOpt(61, nil)
}
// Adds an external client library for use by the multi-version client API. Must be set before setting up the network.
//
// Parameter: path to client library
func (o NetworkOptions) SetExternalClientLibrary(param string) error {
return o.setOpt(62, []byte(param))
}
// Searches the specified path for dynamic libraries and adds them to the list of client libraries for use by the multi-version client API. Must be set before setting up the network.
//
// Parameter: path to directory containing client libraries
func (o NetworkOptions) SetExternalClientDirectory(param string) error {
return o.setOpt(63, []byte(param))
}
// Prevents connections through the local client, allowing only connections through externally loaded client libraries. Intended primarily for testing.
func (o NetworkOptions) SetDisableLocalClient() error {
return o.setOpt(64, nil)
}
// Disables logging of client statistics, such as sampled transaction activity.
func (o NetworkOptions) SetDisableClientStatisticsLogging() error {
return o.setOpt(70, nil)
}
// Enables debugging feature to perform slow task profiling. Requires trace logging to be enabled. WARNING: this feature is not recommended for use in production.
func (o NetworkOptions) SetEnableSlowTaskProfiling() error {
return o.setOpt(71, nil)
}
// Set the size of the client location cache. Raising this value can boost performance in very large databases where clients access data in a near-random pattern. Defaults to 100000.
//
// Parameter: Max location cache entries
func (o DatabaseOptions) SetLocationCacheSize(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(10, b)
}
// Set the maximum number of watches allowed to be outstanding on a database connection. Increasing this number could result in increased resource usage. Reducing this number will not cancel any outstanding watches. Defaults to 10000 and cannot be larger than 1000000.
//
// Parameter: Max outstanding watches
func (o DatabaseOptions) SetMaxWatches(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(20, b)
}
// Specify the machine ID that was passed to fdbserver processes running on the same machine as this client, for better location-aware load balancing.
//
// Parameter: Hexadecimal ID
func (o DatabaseOptions) SetMachineId(param string) error {
return o.setOpt(21, []byte(param))
}
// Specify the datacenter ID that was passed to fdbserver processes running in the same datacenter as this client, for better location-aware load balancing.
//
// Parameter: Hexadecimal ID
func (o DatabaseOptions) SetDatacenterId(param string) error {
return o.setOpt(22, []byte(param))
}
// The transaction, if not self-conflicting, may be committed a second time after commit succeeds, in the event of a fault
func (o TransactionOptions) SetCausalWriteRisky() error {
return o.setOpt(10, nil)
}
// The read version will be committed, and usually will be the latest committed, but might not be the latest committed in the event of a fault or partition
func (o TransactionOptions) SetCausalReadRisky() error {
return o.setOpt(20, nil)
}
// Not yet implemented.
func (o TransactionOptions) SetCausalReadDisable() error {
return o.setOpt(21, nil)
}
// The next write performed on this transaction will not generate a write conflict range. As a result, other transactions which read the key(s) being modified by the next write will not conflict with this transaction. Care needs to be taken when using this option on a transaction that is shared between multiple threads. When setting this option, write conflict ranges will be disabled on the next write operation, regardless of what thread it is on.
func (o TransactionOptions) SetNextWriteNoWriteConflictRange() error {
return o.setOpt(30, nil)
}
// Reads performed by a transaction will not see any prior mutations that occured in that transaction, instead seeing the value which was in the database at the transaction's read version. This option may provide a small performance benefit for the client, but also disables a number of client-side optimizations which are beneficial for transactions which tend to read and write the same keys within a single transaction.
func (o TransactionOptions) SetReadYourWritesDisable() error {
return o.setOpt(51, nil)
}
// Deprecated
func (o TransactionOptions) SetReadAheadDisable() error {
return o.setOpt(52, nil)
}
// Not yet implemented.
func (o TransactionOptions) SetDurabilityDatacenter() error {
return o.setOpt(110, nil)
}
// Not yet implemented.
func (o TransactionOptions) SetDurabilityRisky() error {
return o.setOpt(120, nil)
}
// Deprecated
func (o TransactionOptions) SetDurabilityDevNullIsWebScale() error {
return o.setOpt(130, nil)
}
// Specifies that this transaction should be treated as highest priority and that lower priority transactions should block behind this one. Use is discouraged outside of low-level tools
func (o TransactionOptions) SetPrioritySystemImmediate() error {
return o.setOpt(200, nil)
}
// Specifies that this transaction should be treated as low priority and that default priority transactions should be processed first. Useful for doing batch work simultaneously with latency-sensitive work
func (o TransactionOptions) SetPriorityBatch() error {
return o.setOpt(201, nil)
}
// This is a write-only transaction which sets the initial configuration. This option is designed for use by database system tools only.
func (o TransactionOptions) SetInitializeNewDatabase() error {
return o.setOpt(300, nil)
}
// Allows this transaction to read and modify system keys (those that start with the byte 0xFF)
func (o TransactionOptions) SetAccessSystemKeys() error {
return o.setOpt(301, nil)
}
// Allows this transaction to read system keys (those that start with the byte 0xFF)
func (o TransactionOptions) SetReadSystemKeys() error {
return o.setOpt(302, nil)
}
// Not yet implemented.
func (o TransactionOptions) SetDebugRetryLogging(param string) error {
return o.setOpt(401, []byte(param))
}
// Enables tracing for this transaction and logs results to the client trace logs. Client trace logging must be enabled to get log output.
//
// Parameter: String identifier to be used in the logs when tracing this transaction. The identifier must not exceed 100 characters.
func (o TransactionOptions) SetTransactionLoggingEnable(param string) error {
return o.setOpt(402, []byte(param))
}
// Set a timeout in milliseconds which, when elapsed, will cause the transaction automatically to be cancelled. Valid parameter values are ``[0, INT_MAX]``. If set to 0, will disable all timeouts. All pending and any future uses of the transaction will throw an exception. The transaction can be used again after it is reset. Like all transaction options, a timeout must be reset after a call to onError. This behavior allows the user to make the timeout dynamic.
//
// Parameter: value in milliseconds of timeout
func (o TransactionOptions) SetTimeout(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(500, b)
}
// Set a maximum number of retries after which additional calls to onError will throw the most recently seen error code. Valid parameter values are ``[-1, INT_MAX]``. If set to -1, will disable the retry limit. Like all transaction options, the retry limit must be reset after a call to onError. This behavior allows the user to make the retry limit dynamic.
//
// Parameter: number of times to retry
func (o TransactionOptions) SetRetryLimit(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(501, b)
}
// Set the maximum amount of backoff delay incurred in the call to onError if the error is retryable. Defaults to 1000 ms. Valid parameter values are ``[0, INT_MAX]``. Like all transaction options, the maximum retry delay must be reset after a call to onError. If the maximum retry delay is less than the current retry delay of the transaction, then the current retry delay will be clamped to the maximum retry delay.
//
// Parameter: value in milliseconds of maximum delay
func (o TransactionOptions) SetMaxRetryDelay(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(502, b)
}
// Snapshot read operations will see the results of writes done in the same transaction.
func (o TransactionOptions) SetSnapshotRywEnable() error {
return o.setOpt(600, nil)
}
// Snapshot read operations will not see the results of writes done in the same transaction.
func (o TransactionOptions) SetSnapshotRywDisable() error {
return o.setOpt(601, nil)
}
// The transaction can read and write to locked databases, and is resposible for checking that it took the lock.
func (o TransactionOptions) SetLockAware() error {
return o.setOpt(700, nil)
}
// By default, operations that are performed on a transaction while it is being committed will not only fail themselves, but they will attempt to fail other in-flight operations (such as the commit) as well. This behavior is intended to help developers discover situations where operations could be unintentionally executed after the transaction has been reset. Setting this option removes that protection, causing only the offending operation to fail.
func (o TransactionOptions) SetUsedDuringCommitProtectionDisable() error {
return o.setOpt(701, nil)
}
// The transaction can read from locked databases.
func (o TransactionOptions) SetReadLockAware() error {
return o.setOpt(702, nil)
}
type StreamingMode int
const (
// Client intends to consume the entire range and would like it all
// transferred as early as possible.
StreamingModeWantAll StreamingMode = -1
// The default. The client doesn't know how much of the range it is likely
// to used and wants different performance concerns to be balanced. Only a
// small portion of data is transferred to the client initially (in order to
// minimize costs if the client doesn't read the entire range), and as the
// caller iterates over more items in the range larger batches will be
// transferred in order to minimize latency.
StreamingModeIterator StreamingMode = 0
// Infrequently used. The client has passed a specific row limit and wants
// that many rows delivered in a single batch. Because of iterator operation
// in client drivers make request batches transparent to the user, consider
// ``WANT_ALL`` StreamingMode instead. A row limit must be specified if this
// mode is used.
StreamingModeExact StreamingMode = 1
// Infrequently used. Transfer data in batches small enough to not be much
// more expensive than reading individual rows, to minimize cost if
// iteration stops early.
StreamingModeSmall StreamingMode = 2
// Infrequently used. Transfer data in batches sized in between small and
// large.
StreamingModeMedium StreamingMode = 3
// Infrequently used. Transfer data in batches large enough to be, in a
// high-concurrency environment, nearly as efficient as possible. If the
// client stops iteration early, some disk and network bandwidth may be
// wasted. The batch size may still be too small to allow a single client to
// get high throughput from the database, so if that is what you need
// consider the SERIAL StreamingMode.
StreamingModeLarge StreamingMode = 4
// Transfer data in batches large enough that an individual client can get
// reasonable read bandwidth from the database. If the client stops
// iteration early, considerable disk and network bandwidth may be wasted.
StreamingModeSerial StreamingMode = 5
)
// Performs an addition of little-endian integers. If the existing value in the database is not present or shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. The integers to be added must be stored in a little-endian representation. They can be signed in two's complement representation or unsigned. You can add to an integer at a known offset in the value by prepending the appropriate number of zero bytes to ``param`` and padding with zero bytes to match the length of the value. However, this offset technique requires that you know the addition will not cause the integer field within the value to overflow.
func (t Transaction) Add(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 2)
}
// Deprecated
func (t Transaction) And(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 6)
}
// Performs a bitwise ``and`` operation. If the existing value in the database is not present, then ``param`` is stored in the database. If the existing value in the database is shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``.
func (t Transaction) BitAnd(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 6)
}
// Deprecated
func (t Transaction) Or(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 7)
}
// Performs a bitwise ``or`` operation. If the existing value in the database is not present or shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``.
func (t Transaction) BitOr(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 7)
}
// Deprecated
func (t Transaction) Xor(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 8)
}
// Performs a bitwise ``xor`` operation. If the existing value in the database is not present or shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``.
func (t Transaction) BitXor(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 8)
}
// Appends ``param`` to the end of the existing value already in the database at the given key (or creates the key and sets the value to ``param`` if the key is empty). This will only append the value if the final concatenated value size is less than or equal to the maximum value size (i.e., if it fits). WARNING: No error is surfaced back to the user if the final value is too large because the mutation will not be applied until after the transaction has been committed. Therefore, it is only safe to use this mutation type if one can guarantee that one will keep the total value size under the maximum size.
func (t Transaction) AppendIfFits(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 9)
}
// Performs a little-endian comparison of byte strings. If the existing value in the database is not present or shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. The larger of the two values is then stored in the database.
func (t Transaction) Max(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 12)
}
// Performs a little-endian comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored in the database. If the existing value in the database is shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. The smaller of the two values is then stored in the database.
func (t Transaction) Min(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 13)
}
// Transforms ``key`` using a versionstamp for the transaction. Sets the transformed key in the database to ``param``. The key is transformed by removing the final four bytes from the key and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the key from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the key is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java and Python bindings. Also, note that prior to API version 520, the offset was computed from only the final two bytes rather than the final four bytes.
func (t Transaction) SetVersionstampedKey(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 14)
}
// Transforms ``param`` using a versionstamp for the transaction. Sets the ``key`` given to the transformed ``param``. The parameter is transformed by removing the final four bytes from ``param`` and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the parameter from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the parameter is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java and Python bindings. Also, note that prior to API version 520, the versionstamp was always placed at the beginning of the parameter rather than computing an offset.
func (t Transaction) SetVersionstampedValue(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 15)
}
// Performs lexicographic comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored. Otherwise the smaller of the two values is then stored in the database.
func (t Transaction) ByteMin(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 16)
}
// Performs lexicographic comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored. Otherwise the larger of the two values is then stored in the database.
func (t Transaction) ByteMax(key KeyConvertible, param []byte) {
t.atomicOp(key.FDBKey(), param, 17)
}
type conflictRangeType int
const (
// Used to add a read conflict range
conflictRangeTypeRead conflictRangeType = 0
// Used to add a write conflict range
conflictRangeTypeWrite conflictRangeType = 1
)
type ErrorPredicate int
const (
// Returns ``true`` if the error indicates the operations in the
// transactions should be retried because of transient error.
ErrorPredicateRetryable ErrorPredicate = 50000
// Returns ``true`` if the error indicates the transaction may have
// succeeded, though not in a way the system can verify.
ErrorPredicateMaybeCommitted ErrorPredicate = 50001
// Returns ``true`` if the error indicates the transaction has not
// committed, though in a way that can be retried.
ErrorPredicateRetryableNotCommitted ErrorPredicate = 50002
)

View File

@ -0,0 +1,74 @@
/*
* keyselector.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go API
package fdb
// A Selectable can be converted to a FoundationDB KeySelector. All functions in
// the FoundationDB API that resolve a key selector to a key accept Selectable.
type Selectable interface {
FDBKeySelector() KeySelector
}
// KeySelector represents a description of a key in a FoundationDB database. A
// KeySelector may be resolved to a specific key with the GetKey method, or used
// as the endpoints of a SelectorRange to be used with a GetRange function.
//
// The most common key selectors are constructed with the functions documented
// below. For details of how KeySelectors are specified and resolved, see
// https://apple.github.io/foundationdb/developer-guide.html#key-selectors.
type KeySelector struct {
Key KeyConvertible
OrEqual bool
Offset int
}
func (ks KeySelector) FDBKeySelector() KeySelector {
return ks
}
// LastLessThan returns the KeySelector specifying the lexigraphically greatest
// key present in the database which is lexigraphically strictly less than the
// given key.
func LastLessThan(key KeyConvertible) KeySelector {
return KeySelector{key, false, 0}
}
// LastLessOrEqual returns the KeySelector specifying the lexigraphically
// greatest key present in the database which is lexigraphically less than or
// equal to the given key.
func LastLessOrEqual(key KeyConvertible) KeySelector {
return KeySelector{key, true, 0}
}
// FirstGreaterThan returns the KeySelector specifying the lexigraphically least
// key present in the database which is lexigraphically strictly greater than
// the given key.
func FirstGreaterThan(key KeyConvertible) KeySelector {
return KeySelector{key, true, 1}
}
// FirstGreaterOrEqual returns the KeySelector specifying the lexigraphically
// least key present in the database which is lexigraphically greater than or
// equal to the given key.
func FirstGreaterOrEqual(key KeyConvertible) KeySelector {
return KeySelector{key, false, 1}
}

View File

@ -0,0 +1,317 @@
/*
* range.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go API
package fdb
/*
#define FDB_API_VERSION 600
#include <foundationdb/fdb_c.h>
*/
import "C"
import (
"fmt"
)
// KeyValue represents a single key-value pair in the database.
type KeyValue struct {
Key Key
Value []byte
}
// RangeOptions specify how a database range read operation is carried
// out. RangeOptions objects are passed to GetRange methods of Database,
// Transaction and Snapshot.
//
// The zero value of RangeOptions represents the default range read
// configuration (no limit, lexicographic order, to be used as an iterator).
type RangeOptions struct {
// Limit restricts the number of key-value pairs returned as part of a range
// read. A value of 0 indicates no limit.
Limit int
// Mode sets the streaming mode of the range read, allowing the database to
// balance latency and bandwidth for this read.
Mode StreamingMode
// Reverse indicates that the read should be performed in lexicographic
// (false) or reverse lexicographic (true) order. When Reverse is true and
// Limit is non-zero, the last Limit key-value pairs in the range are
// returned.
Reverse bool
}
// A Range describes all keys between a begin (inclusive) and end (exclusive)
// key selector.
type Range interface {
// FDBRangeKeySelectors returns a pair of key selectors that describe the
// beginning and end of a range.
FDBRangeKeySelectors() (begin, end Selectable)
}
// An ExactRange describes all keys between a begin (inclusive) and end
// (exclusive) key. If you need to specify an ExactRange and you have only a
// Range, you must resolve the selectors returned by
// (Range).FDBRangeKeySelectors to keys using the (Transaction).GetKey method.
//
// Any object that implements ExactRange also implements Range, and may be used
// accordingly.
type ExactRange interface {
// FDBRangeKeys returns a pair of keys that describe the beginning and end
// of a range.
FDBRangeKeys() (begin, end KeyConvertible)
// An object that implements ExactRange must also implement Range
// (logically, by returning FirstGreaterOrEqual of the keys returned by
// FDBRangeKeys).
Range
}
// KeyRange is an ExactRange constructed from a pair of KeyConvertibles. Note
// that the default zero-value of KeyRange specifies an empty range before all
// keys in the database.
type KeyRange struct {
Begin, End KeyConvertible
}
// FDBRangeKeys allows KeyRange to satisfy the ExactRange interface.
func (kr KeyRange) FDBRangeKeys() (KeyConvertible, KeyConvertible) {
return kr.Begin, kr.End
}
// FDBRangeKeySelectors allows KeyRange to satisfy the Range interface.
func (kr KeyRange) FDBRangeKeySelectors() (Selectable, Selectable) {
return FirstGreaterOrEqual(kr.Begin), FirstGreaterOrEqual(kr.End)
}
// SelectorRange is a Range constructed directly from a pair of Selectable
// objects. Note that the default zero-value of SelectorRange specifies an empty
// range before all keys in the database.
type SelectorRange struct {
Begin, End Selectable
}
// FDBRangeKeySelectors allows SelectorRange to satisfy the Range interface.
func (sr SelectorRange) FDBRangeKeySelectors() (Selectable, Selectable) {
return sr.Begin, sr.End
}
// RangeResult is a handle to the asynchronous result of a range
// read. RangeResult is safe for concurrent use by multiple goroutines.
//
// A RangeResult should not be returned from a transactional function passed to
// the Transact method of a Transactor.
type RangeResult struct {
t *transaction
sr SelectorRange
options RangeOptions
snapshot bool
f *futureKeyValueArray
}
// GetSliceWithError returns a slice of KeyValue objects satisfying the range
// specified in the read that returned this RangeResult, or an error if any of
// the asynchronous operations associated with this result did not successfully
// complete. The current goroutine will be blocked until all reads have
// completed.
func (rr RangeResult) GetSliceWithError() ([]KeyValue, error) {
var ret []KeyValue
ri := rr.Iterator()
if rr.options.Limit != 0 {
ri.options.Mode = StreamingModeExact
} else {
ri.options.Mode = StreamingModeWantAll
}
for ri.Advance() {
if ri.err != nil {
return nil, ri.err
}
ret = append(ret, ri.kvs...)
ri.index = len(ri.kvs)
ri.fetchNextBatch()
}
return ret, nil
}
// GetSliceOrPanic returns a slice of KeyValue objects satisfying the range
// specified in the read that returned this RangeResult, or panics if any of the
// asynchronous operations associated with this result did not successfully
// complete. The current goroutine will be blocked until all reads have
// completed.
func (rr RangeResult) GetSliceOrPanic() []KeyValue {
kvs, e := rr.GetSliceWithError()
if e != nil {
panic(e)
}
return kvs
}
// Iterator returns a RangeIterator over the key-value pairs satisfying the
// range specified in the read that returned this RangeResult.
func (rr RangeResult) Iterator() *RangeIterator {
return &RangeIterator{
t: rr.t,
f: rr.f,
sr: rr.sr,
options: rr.options,
iteration: 1,
snapshot: rr.snapshot,
}
}
// RangeIterator returns the key-value pairs in the database (as KeyValue
// objects) satisfying the range specified in a range read. RangeIterator is
// constructed with the (RangeResult).Iterator method.
//
// You must call Advance and get a true result prior to calling Get or MustGet.
//
// RangeIterator should not be copied or used concurrently from multiple
// goroutines, but multiple RangeIterators may be constructed from a single
// RangeResult and used concurrently. RangeIterator should not be returned from
// a transactional function passed to the Transact method of a Transactor.
type RangeIterator struct {
t *transaction
f *futureKeyValueArray
sr SelectorRange
options RangeOptions
iteration int
done bool
more bool
kvs []KeyValue
index int
err error
snapshot bool
}
// Advance attempts to advance the iterator to the next key-value pair. Advance
// returns true if there are more key-value pairs satisfying the range, or false
// if the range has been exhausted. You must call this before every call to Get
// or MustGet.
func (ri *RangeIterator) Advance() bool {
if ri.done {
return false
}
if ri.f == nil {
return true
}
ri.kvs, ri.more, ri.err = ri.f.Get()
ri.index = 0
ri.f = nil
if ri.err != nil || len(ri.kvs) > 0 {
return true
}
return false
}
func (ri *RangeIterator) fetchNextBatch() {
if !ri.more || ri.index == ri.options.Limit {
ri.done = true
return
}
if ri.options.Limit > 0 {
// Not worried about this being zero, checked equality above
ri.options.Limit -= ri.index
}
if ri.options.Reverse {
ri.sr.End = FirstGreaterOrEqual(ri.kvs[ri.index-1].Key)
} else {
ri.sr.Begin = FirstGreaterThan(ri.kvs[ri.index-1].Key)
}
ri.iteration++
f := ri.t.doGetRange(ri.sr, ri.options, ri.snapshot, ri.iteration)
ri.f = &f
}
// Get returns the next KeyValue in a range read, or an error if one of the
// asynchronous operations associated with this range did not successfully
// complete. The Advance method of this RangeIterator must have returned true
// prior to calling Get.
func (ri *RangeIterator) Get() (kv KeyValue, e error) {
if ri.err != nil {
e = ri.err
return
}
kv = ri.kvs[ri.index]
ri.index++
if ri.index == len(ri.kvs) {
ri.fetchNextBatch()
}
return
}
// MustGet returns the next KeyValue in a range read, or panics if one of the
// asynchronous operations associated with this range did not successfully
// complete. The Advance method of this RangeIterator must have returned true
// prior to calling MustGet.
func (ri *RangeIterator) MustGet() KeyValue {
kv, e := ri.Get()
if e != nil {
panic(e)
}
return kv
}
func Strinc(prefix []byte) ([]byte, error) {
for i := len(prefix) - 1; i >= 0; i-- {
if prefix[i] != 0xFF {
ret := make([]byte, i+1)
copy(ret, prefix[:i+1])
ret[i]++
return ret, nil
}
}
return nil, fmt.Errorf("Key must contain at least one byte not equal to 0xFF")
}
// PrefixRange returns the KeyRange describing the range of keys k such that
// bytes.HasPrefix(k, prefix) is true. PrefixRange returns an error if prefix is
// empty or entirely 0xFF bytes.
//
// Do not use PrefixRange on objects that already implement the Range or
// ExactRange interfaces. The prefix range of the byte representation of these
// objects may not correspond to their logical range.
func PrefixRange(prefix []byte) (KeyRange, error) {
begin := make([]byte, len(prefix))
copy(begin, prefix)
end, e := Strinc(begin)
if e != nil {
return KeyRange{}, nil
}
return KeyRange{Key(begin), Key(end)}, nil
}

View File

@ -0,0 +1,88 @@
/*
* snapshot.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go API
package fdb
// Snapshot is a handle to a FoundationDB transaction snapshot, suitable for
// performing snapshot reads. Snapshot reads offer a more relaxed isolation
// level than FoundationDB's default serializable isolation, reducing
// transaction conflicts but making it harder to reason about concurrency.
//
// For more information on snapshot reads, see
// https://apple.github.io/foundationdb/developer-guide.html#snapshot-reads.
type Snapshot struct {
*transaction
}
// ReadTransact executes the caller-provided function, passing it the Snapshot
// receiver object (as a ReadTransaction).
//
// A panic of type Error during execution of the function will be recovered and
// returned to the caller as an error, but ReadTransact will not retry the
// function.
//
// By satisfying the ReadTransactor interface, Snapshot may be passed to a
// read-only transactional function from another (possibly read-only)
// transactional function, allowing composition.
//
// See the ReadTransactor interface for an example of using ReadTransact with
// Transaction, Snapshot and Database objects.
func (s Snapshot) ReadTransact(f func(ReadTransaction) (interface{}, error)) (r interface{}, e error) {
defer panicToError(&e)
r, e = f(s)
return
}
// Snapshot returns the receiver and allows Snapshot to satisfy the
// ReadTransaction interface.
func (s Snapshot) Snapshot() Snapshot {
return s
}
// Get is equivalent to (Transaction).Get, performed as a snapshot read.
func (s Snapshot) Get(key KeyConvertible) FutureByteSlice {
return s.get(key.FDBKey(), 1)
}
// GetKey is equivalent to (Transaction).GetKey, performed as a snapshot read.
func (s Snapshot) GetKey(sel Selectable) FutureKey {
return s.getKey(sel.FDBKeySelector(), 1)
}
// GetRange is equivalent to (Transaction).GetRange, performed as a snapshot
// read.
func (s Snapshot) GetRange(r Range, options RangeOptions) RangeResult {
return s.getRange(r, options, true)
}
// GetReadVersion is equivalent to (Transaction).GetReadVersion, performed as
// a snapshot read.
func (s Snapshot) GetReadVersion() FutureInt64 {
return s.getReadVersion()
}
// GetDatabase returns a handle to the database with which this snapshot is
// interacting.
func (s Snapshot) GetDatabase() Database {
return s.transaction.db
}

View File

@ -0,0 +1,141 @@
/*
* subspace.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go Subspace Layer
// Package subspace provides a convenient way to use FoundationDB tuples to
// define namespaces for different categories of data. The namespace is
// specified by a prefix tuple which is prepended to all tuples packed by the
// subspace. When unpacking a key with the subspace, the prefix tuple will be
// removed from the result.
//
// As a best practice, API clients should use at least one subspace for
// application data. For general guidance on subspace usage, see the Subspaces
// section of the Developer Guide
// (https://apple.github.io/foundationdb/developer-guide.html#subspaces).
package subspace
import (
"bytes"
"errors"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
)
// Subspace represents a well-defined region of keyspace in a FoundationDB
// database.
type Subspace interface {
// Sub returns a new Subspace whose prefix extends this Subspace with the
// encoding of the provided element(s). If any of the elements are not a
// valid tuple.TupleElement, Sub will panic.
Sub(el ...tuple.TupleElement) Subspace
// Bytes returns the literal bytes of the prefix of this Subspace.
Bytes() []byte
// Pack returns the key encoding the specified Tuple with the prefix of this
// Subspace prepended.
Pack(t tuple.Tuple) fdb.Key
// Unpack returns the Tuple encoded by the given key with the prefix of this
// Subspace removed. Unpack will return an error if the key is not in this
// Subspace or does not encode a well-formed Tuple.
Unpack(k fdb.KeyConvertible) (tuple.Tuple, error)
// Contains returns true if the provided key starts with the prefix of this
// Subspace, indicating that the Subspace logically contains the key.
Contains(k fdb.KeyConvertible) bool
// All Subspaces implement fdb.KeyConvertible and may be used as
// FoundationDB keys (corresponding to the prefix of this Subspace).
fdb.KeyConvertible
// All Subspaces implement fdb.ExactRange and fdb.Range, and describe all
// keys logically in this Subspace.
fdb.ExactRange
}
type subspace struct {
b []byte
}
// AllKeys returns the Subspace corresponding to all keys in a FoundationDB
// database.
func AllKeys() Subspace {
return subspace{}
}
// Sub returns a new Subspace whose prefix is the encoding of the provided
// element(s). If any of the elements are not a valid tuple.TupleElement, a
// runtime panic will occur.
func Sub(el ...tuple.TupleElement) Subspace {
return subspace{tuple.Tuple(el).Pack()}
}
// FromBytes returns a new Subspace from the provided bytes.
func FromBytes(b []byte) Subspace {
s := make([]byte, len(b))
copy(s, b)
return subspace{s}
}
func (s subspace) Sub(el ...tuple.TupleElement) Subspace {
return subspace{concat(s.Bytes(), tuple.Tuple(el).Pack()...)}
}
func (s subspace) Bytes() []byte {
return s.b
}
func (s subspace) Pack(t tuple.Tuple) fdb.Key {
return fdb.Key(concat(s.b, t.Pack()...))
}
func (s subspace) Unpack(k fdb.KeyConvertible) (tuple.Tuple, error) {
key := k.FDBKey()
if !bytes.HasPrefix(key, s.b) {
return nil, errors.New("key is not in subspace")
}
return tuple.Unpack(key[len(s.b):])
}
func (s subspace) Contains(k fdb.KeyConvertible) bool {
return bytes.HasPrefix(k.FDBKey(), s.b)
}
func (s subspace) FDBKey() fdb.Key {
return fdb.Key(s.b)
}
func (s subspace) FDBRangeKeys() (fdb.KeyConvertible, fdb.KeyConvertible) {
return fdb.Key(concat(s.b, 0x00)), fdb.Key(concat(s.b, 0xFF))
}
func (s subspace) FDBRangeKeySelectors() (fdb.Selectable, fdb.Selectable) {
begin, end := s.FDBRangeKeys()
return fdb.FirstGreaterOrEqual(begin), fdb.FirstGreaterOrEqual(end)
}
func concat(a []byte, b ...byte) []byte {
r := make([]byte, len(a)+len(b))
copy(r, a)
copy(r[len(a):], b)
return r
}

View File

@ -0,0 +1,458 @@
/*
* transaction.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go API
package fdb
/*
#define FDB_API_VERSION 600
#include <foundationdb/fdb_c.h>
*/
import "C"
// A ReadTransaction can asynchronously read from a FoundationDB
// database. Transaction and Snapshot both satisfy the ReadTransaction
// interface.
//
// All ReadTransactions satisfy the ReadTransactor interface and may be used
// with read-only transactional functions.
type ReadTransaction interface {
Get(key KeyConvertible) FutureByteSlice
GetKey(sel Selectable) FutureKey
GetRange(r Range, options RangeOptions) RangeResult
GetReadVersion() FutureInt64
GetDatabase() Database
Snapshot() Snapshot
ReadTransactor
}
// Transaction is a handle to a FoundationDB transaction. Transaction is a
// lightweight object that may be efficiently copied, and is safe for concurrent
// use by multiple goroutines.
//
// In FoundationDB, a transaction is a mutable snapshot of a database. All read
// and write operations on a transaction see and modify an otherwise-unchanging
// version of the database and only change the underlying database if and when
// the transaction is committed. Read operations do see the effects of previous
// write operations on the same transaction. Committing a transaction usually
// succeeds in the absence of conflicts.
//
// Transactions group operations into a unit with the properties of atomicity,
// isolation, and durability. Transactions also provide the ability to maintain
// an applications invariants or integrity constraints, supporting the property
// of consistency. Together these properties are known as ACID.
//
// Transactions are also causally consistent: once a transaction has been
// successfully committed, all subsequently created transactions will see the
// modifications made by it.
type Transaction struct {
*transaction
}
type transaction struct {
ptr *C.FDBTransaction
db Database
}
// TransactionOptions is a handle with which to set options that affect a
// Transaction object. A TransactionOptions instance should be obtained with the
// (Transaction).Options method.
type TransactionOptions struct {
transaction *transaction
}
func (opt TransactionOptions) setOpt(code int, param []byte) error {
return setOpt(func(p *C.uint8_t, pl C.int) C.fdb_error_t {
return C.fdb_transaction_set_option(opt.transaction.ptr, C.FDBTransactionOption(code), p, pl)
}, param)
}
func (t *transaction) destroy() {
C.fdb_transaction_destroy(t.ptr)
}
// GetDatabase returns a handle to the database with which this transaction is
// interacting.
func (t Transaction) GetDatabase() Database {
return t.transaction.db
}
// Transact executes the caller-provided function, passing it the Transaction
// receiver object.
//
// A panic of type Error during execution of the function will be recovered and
// returned to the caller as an error, but Transact will not retry the function
// or commit the Transaction after the caller-provided function completes.
//
// By satisfying the Transactor interface, Transaction may be passed to a
// transactional function from another transactional function, allowing
// composition. The outermost transactional function must have been provided a
// Database, or else the transaction will never be committed.
//
// See the Transactor interface for an example of using Transact with
// Transaction and Database objects.
func (t Transaction) Transact(f func(Transaction) (interface{}, error)) (r interface{}, e error) {
defer panicToError(&e)
r, e = f(t)
return
}
// ReadTransact executes the caller-provided function, passing it the
// Transaction receiver object (as a ReadTransaction).
//
// A panic of type Error during execution of the function will be recovered and
// returned to the caller as an error, but ReadTransact will not retry the
// function.
//
// By satisfying the ReadTransactor interface, Transaction may be passed to a
// read-only transactional function from another (possibly read-only)
// transactional function, allowing composition.
//
// See the ReadTransactor interface for an example of using ReadTransact with
// Transaction, Snapshot and Database objects.
func (t Transaction) ReadTransact(f func(ReadTransaction) (interface{}, error)) (r interface{}, e error) {
defer panicToError(&e)
r, e = f(t)
return
}
// Cancel cancels a transaction. All pending or future uses of the transaction
// will encounter an error. The Transaction object may be reused after calling
// (Transaction).Reset.
//
// Be careful if you are using (Transaction).Reset and (Transaction).Cancel
// concurrently with the same transaction. Since they negate each others
// effects, a race condition between these calls will leave the transaction in
// an unknown state.
//
// If your program attempts to cancel a transaction after (Transaction).Commit
// has been called but before it returns, unpredictable behavior will
// result. While it is guaranteed that the transaction will eventually end up in
// a cancelled state, the commit may or may not occur. Moreover, even if the
// call to (Transaction).Commit appears to return a transaction_cancelled
// error, the commit may have occurred or may occur in the future. This can make
// it more difficult to reason about the order in which transactions occur.
func (t Transaction) Cancel() {
C.fdb_transaction_cancel(t.ptr)
}
// (Infrequently used) SetReadVersion sets the database version that the transaction will read from
// the database. The database cannot guarantee causal consistency if this method
// is used (the transactions reads will be causally consistent only if the
// provided read version has that property).
func (t Transaction) SetReadVersion(version int64) {
C.fdb_transaction_set_read_version(t.ptr, C.int64_t(version))
}
// Snapshot returns a Snapshot object, suitable for performing snapshot
// reads. Snapshot reads offer a more relaxed isolation level than
// FoundationDB's default serializable isolation, reducing transaction conflicts
// but making it harder to reason about concurrency.
//
// For more information on snapshot reads, see
// https://apple.github.io/foundationdb/developer-guide.html#snapshot-reads.
func (t Transaction) Snapshot() Snapshot {
return Snapshot{t.transaction}
}
// OnError determines whether an error returned by a Transaction method is
// retryable. Waiting on the returned future will return the same error when
// fatal, or return nil (after blocking the calling goroutine for a suitable
// delay) for retryable errors.
//
// Typical code will not use OnError directly. (Database).Transact uses
// OnError internally to implement a correct retry loop.
func (t Transaction) OnError(e Error) FutureNil {
return &futureNil{newFuture(C.fdb_transaction_on_error(t.ptr, C.fdb_error_t(e.Code)))}
}
// Commit attempts to commit the modifications made in the transaction to the
// database. Waiting on the returned future will block the calling goroutine
// until the transaction has either been committed successfully or an error is
// encountered. Any error should be passed to (Transaction).OnError to determine
// if the error is retryable or not.
//
// As with other client/server databases, in some failure scenarios a client may
// be unable to determine whether a transaction succeeded. For more information,
// see
// https://apple.github.io/foundationdb/developer-guide.html#transactions-with-unknown-results.
func (t Transaction) Commit() FutureNil {
return &futureNil{newFuture(C.fdb_transaction_commit(t.ptr))}
}
// Watch creates a watch and returns a FutureNil that will become ready when the
// watch reports a change to the value of the specified key.
//
// A watchs behavior is relative to the transaction that created it. A watch
// will report a change in relation to the keys value as readable by that
// transaction. The initial value used for comparison is either that of the
// transactions read version or the value as modified by the transaction itself
// prior to the creation of the watch. If the value changes and then changes
// back to its initial value, the watch might not report the change.
//
// Until the transaction that created it has been committed, a watch will not
// report changes made by other transactions. In contrast, a watch will
// immediately report changes made by the transaction itself. Watches cannot be
// created if the transaction has called SetReadYourWritesDisable on the
// Transaction options, and an attempt to do so will return a watches_disabled
// error.
//
// If the transaction used to create a watch encounters an error during commit,
// then the watch will be set with that error. A transaction whose commit
// result is unknown will set all of its watches with the commit_unknown_result
// error. If an uncommitted transaction is reset or destroyed, then any watches
// it created will be set with the transaction_cancelled error.
//
// By default, each database connection can have no more than 10,000 watches
// that have not yet reported a change. When this number is exceeded, an attempt
// to create a watch will return a too_many_watches error. This limit can be
// changed using SetMaxWatches on the Database. Because a watch outlives the
// transaction that creates it, any watch that is no longer needed should be
// cancelled by calling (FutureNil).Cancel on its returned future.
func (t Transaction) Watch(key KeyConvertible) FutureNil {
kb := key.FDBKey()
return &futureNil{newFuture(C.fdb_transaction_watch(t.ptr, byteSliceToPtr(kb), C.int(len(kb))))}
}
func (t *transaction) get(key []byte, snapshot int) FutureByteSlice {
return &futureByteSlice{future: newFuture(C.fdb_transaction_get(t.ptr, byteSliceToPtr(key), C.int(len(key)), C.fdb_bool_t(snapshot)))}
}
// Get returns the (future) value associated with the specified key. The read is
// performed asynchronously and does not block the calling goroutine. The future
// will become ready when the read is complete.
func (t Transaction) Get(key KeyConvertible) FutureByteSlice {
return t.get(key.FDBKey(), 0)
}
func (t *transaction) doGetRange(r Range, options RangeOptions, snapshot bool, iteration int) futureKeyValueArray {
begin, end := r.FDBRangeKeySelectors()
bsel := begin.FDBKeySelector()
esel := end.FDBKeySelector()
bkey := bsel.Key.FDBKey()
ekey := esel.Key.FDBKey()
return futureKeyValueArray{newFuture(C.fdb_transaction_get_range(t.ptr, byteSliceToPtr(bkey), C.int(len(bkey)), C.fdb_bool_t(boolToInt(bsel.OrEqual)), C.int(bsel.Offset), byteSliceToPtr(ekey), C.int(len(ekey)), C.fdb_bool_t(boolToInt(esel.OrEqual)), C.int(esel.Offset), C.int(options.Limit), C.int(0), C.FDBStreamingMode(options.Mode-1), C.int(iteration), C.fdb_bool_t(boolToInt(snapshot)), C.fdb_bool_t(boolToInt(options.Reverse))))}
}
func (t *transaction) getRange(r Range, options RangeOptions, snapshot bool) RangeResult {
f := t.doGetRange(r, options, snapshot, 1)
begin, end := r.FDBRangeKeySelectors()
return RangeResult{
t: t,
sr: SelectorRange{begin, end},
options: options,
snapshot: snapshot,
f: &f,
}
}
// GetRange performs a range read. The returned RangeResult represents all
// KeyValue objects kv where beginKey <= kv.Key < endKey, ordered by kv.Key
// (where beginKey and endKey are the keys described by the key selectors
// returned by r.FDBKeySelectors). All reads performed as a result of GetRange
// are asynchronous and do not block the calling goroutine.
func (t Transaction) GetRange(r Range, options RangeOptions) RangeResult {
return t.getRange(r, options, false)
}
func (t *transaction) getReadVersion() FutureInt64 {
return &futureInt64{newFuture(C.fdb_transaction_get_read_version(t.ptr))}
}
// (Infrequently used) GetReadVersion returns the (future) transaction read version. The read is
// performed asynchronously and does not block the calling goroutine. The future
// will become ready when the read version is available.
func (t Transaction) GetReadVersion() FutureInt64 {
return t.getReadVersion()
}
// Set associated the given key and value, overwriting any previous association
// with key. Set returns immediately, having modified the snapshot of the
// database represented by the transaction.
func (t Transaction) Set(key KeyConvertible, value []byte) {
kb := key.FDBKey()
C.fdb_transaction_set(t.ptr, byteSliceToPtr(kb), C.int(len(kb)), byteSliceToPtr(value), C.int(len(value)))
}
// Clear removes the specified key (and any associated value), if it
// exists. Clear returns immediately, having modified the snapshot of the
// database represented by the transaction.
func (t Transaction) Clear(key KeyConvertible) {
kb := key.FDBKey()
C.fdb_transaction_clear(t.ptr, byteSliceToPtr(kb), C.int(len(kb)))
}
// ClearRange removes all keys k such that begin <= k < end, and their
// associated values. ClearRange returns immediately, having modified the
// snapshot of the database represented by the transaction.
func (t Transaction) ClearRange(er ExactRange) {
begin, end := er.FDBRangeKeys()
bkb := begin.FDBKey()
ekb := end.FDBKey()
C.fdb_transaction_clear_range(t.ptr, byteSliceToPtr(bkb), C.int(len(bkb)), byteSliceToPtr(ekb), C.int(len(ekb)))
}
// (Infrequently used) GetCommittedVersion returns the version number at which a
// successful commit modified the database. This must be called only after the
// successful (non-error) completion of a call to Commit on this Transaction, or
// the behavior is undefined. Read-only transactions do not modify the database
// when committed and will have a committed version of -1. Keep in mind that a
// transaction which reads keys and then sets them to their current values may
// be optimized to a read-only transaction.
func (t Transaction) GetCommittedVersion() (int64, error) {
var version C.int64_t
if err := C.fdb_transaction_get_committed_version(t.ptr, &version); err != 0 {
return 0, Error{int(err)}
}
return int64(version), nil
}
// (Infrequently used) Returns a future which will contain the versionstamp
// which was used by any versionstamp operations in this transaction. The
// future will be ready only after the successful completion of a call to Commit
// on this Transaction. Read-only transactions do not modify the database when
// committed and will result in the future completing with an error. Keep in
// mind that a transaction which reads keys and then sets them to their current
// values may be optimized to a read-only transaction.
func (t Transaction) GetVersionstamp() FutureKey {
return &futureKey{future: newFuture(C.fdb_transaction_get_versionstamp(t.ptr))}
}
// Reset rolls back a transaction, completely resetting it to its initial
// state. This is logically equivalent to destroying the transaction and
// creating a new one.
func (t Transaction) Reset() {
C.fdb_transaction_reset(t.ptr)
}
func boolToInt(b bool) int {
if b {
return 1
}
return 0
}
func (t *transaction) getKey(sel KeySelector, snapshot int) FutureKey {
key := sel.Key.FDBKey()
return &futureKey{future: newFuture(C.fdb_transaction_get_key(t.ptr, byteSliceToPtr(key), C.int(len(key)), C.fdb_bool_t(boolToInt(sel.OrEqual)), C.int(sel.Offset), C.fdb_bool_t(snapshot)))}
}
// GetKey returns the future key referenced by the provided key selector. The
// read is performed asynchronously and does not block the calling
// goroutine. The future will become ready when the read version is available.
//
// By default, the key is cached for the duration of the transaction, providing
// a potential performance benefit. However, the value of the key is also
// retrieved, using network bandwidth. Invoking
// (TransactionOptions).SetReadYourWritesDisable will avoid both the caching and
// the increased network bandwidth.
func (t Transaction) GetKey(sel Selectable) FutureKey {
return t.getKey(sel.FDBKeySelector(), 0)
}
func (t Transaction) atomicOp(key []byte, param []byte, code int) {
C.fdb_transaction_atomic_op(t.ptr, byteSliceToPtr(key), C.int(len(key)), byteSliceToPtr(param), C.int(len(param)), C.FDBMutationType(code))
}
func addConflictRange(t *transaction, er ExactRange, crtype conflictRangeType) error {
begin, end := er.FDBRangeKeys()
bkb := begin.FDBKey()
ekb := end.FDBKey()
if err := C.fdb_transaction_add_conflict_range(t.ptr, byteSliceToPtr(bkb), C.int(len(bkb)), byteSliceToPtr(ekb), C.int(len(ekb)), C.FDBConflictRangeType(crtype)); err != 0 {
return Error{int(err)}
}
return nil
}
// AddReadConflictRange adds a range of keys to the transactions read conflict
// ranges as if you had read the range. As a result, other transactions that
// write a key in this range could cause the transaction to fail with a
// conflict.
//
// For more information on conflict ranges, see
// https://apple.github.io/foundationdb/developer-guide.html#conflict-ranges.
func (t Transaction) AddReadConflictRange(er ExactRange) error {
return addConflictRange(t.transaction, er, conflictRangeTypeRead)
}
func copyAndAppend(orig []byte, b byte) []byte {
ret := make([]byte, len(orig)+1)
copy(ret, orig)
ret[len(orig)] = b
return ret
}
// AddReadConflictKey adds a key to the transactions read conflict ranges as if
// you had read the key. As a result, other transactions that concurrently write
// this key could cause the transaction to fail with a conflict.
//
// For more information on conflict ranges, see
// https://apple.github.io/foundationdb/developer-guide.html#conflict-ranges.
func (t Transaction) AddReadConflictKey(key KeyConvertible) error {
return addConflictRange(t.transaction, KeyRange{key, Key(copyAndAppend(key.FDBKey(), 0x00))}, conflictRangeTypeRead)
}
// AddWriteConflictRange adds a range of keys to the transactions write
// conflict ranges as if you had cleared the range. As a result, other
// transactions that concurrently read a key in this range could fail with a
// conflict.
//
// For more information on conflict ranges, see
// https://apple.github.io/foundationdb/developer-guide.html#conflict-ranges.
func (t Transaction) AddWriteConflictRange(er ExactRange) error {
return addConflictRange(t.transaction, er, conflictRangeTypeWrite)
}
// AddWriteConflictKey adds a key to the transactions write conflict ranges as
// if you had written the key. As a result, other transactions that concurrently
// read this key could fail with a conflict.
//
// For more information on conflict ranges, see
// https://apple.github.io/foundationdb/developer-guide.html#conflict-ranges.
func (t Transaction) AddWriteConflictKey(key KeyConvertible) error {
return addConflictRange(t.transaction, KeyRange{key, Key(copyAndAppend(key.FDBKey(), 0x00))}, conflictRangeTypeWrite)
}
// Options returns a TransactionOptions instance suitable for setting options
// specific to this transaction.
func (t Transaction) Options() TransactionOptions {
return TransactionOptions{t.transaction}
}
func localityGetAddressesForKey(t *transaction, key KeyConvertible) FutureStringSlice {
kb := key.FDBKey()
return &futureStringSlice{newFuture(C.fdb_transaction_get_addresses_for_key(t.ptr, byteSliceToPtr(kb), C.int(len(kb))))}
}
// LocalityGetAddressesForKey returns the (future) public network addresses of
// each of the storage servers responsible for storing key and its associated
// value. The read is performed asynchronously and does not block the calling
// goroutine. The future will become ready when the read is complete.
func (t Transaction) LocalityGetAddressesForKey(key KeyConvertible) FutureStringSlice {
return localityGetAddressesForKey(t.transaction, key)
}

View File

@ -0,0 +1,438 @@
/*
* tuple.go
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FoundationDB Go Tuple Layer
// Package tuple provides a layer for encoding and decoding multi-element tuples
// into keys usable by FoundationDB. The encoded key maintains the same sort
// order as the original tuple: sorted first by the first element, then by the
// second element, etc. This makes the tuple layer ideal for building a variety
// of higher-level data models.
//
// For general guidance on tuple usage, see the Tuple section of Data Modeling
// (https://apple.github.io/foundationdb/data-modeling.html#tuples).
//
// FoundationDB tuples can currently encode byte and unicode strings, integers,
// floats, doubles, booleans, UUIDs, tuples, and NULL values. In Go these are
// represented as []byte (or fdb.KeyConvertible), string, int64 (or int),
// float32, float64, bool, UUID, Tuple, and nil.
package tuple
import (
"bytes"
"encoding/binary"
"fmt"
"math"
"github.com/apple/foundationdb/bindings/go/src/fdb"
)
// A TupleElement is one of the types that may be encoded in FoundationDB
// tuples. Although the Go compiler cannot enforce this, it is a programming
// error to use an unsupported types as a TupleElement (and will typically
// result in a runtime panic).
//
// The valid types for TupleElement are []byte (or fdb.KeyConvertible), string,
// int64 (or int), float, double, bool, UUID, Tuple, and nil.
type TupleElement interface{}
// Tuple is a slice of objects that can be encoded as FoundationDB tuples. If
// any of the TupleElements are of unsupported types, a runtime panic will occur
// when the Tuple is packed.
//
// Given a Tuple T containing objects only of these types, then T will be
// identical to the Tuple returned by unpacking the byte slice obtained by
// packing T (modulo type normalization to []byte and int64).
type Tuple []TupleElement
// UUID wraps a basic byte array as a UUID. We do not provide any special
// methods for accessing or generating the UUID, but as Go does not provide
// a built-in UUID type, this simple wrapper allows for other libraries
// to write the output of their UUID type as a 16-byte array into
// an instance of this type.
type UUID [16]byte
// Type codes: These prefix the different elements in a packed Tuple
// to indicate what type they are.
const nilCode = 0x00
const bytesCode = 0x01
const stringCode = 0x02
const nestedCode = 0x05
const intZeroCode = 0x14
const posIntEnd = 0x1c
const negIntStart = 0x0c
const floatCode = 0x20
const doubleCode = 0x21
const falseCode = 0x26
const trueCode = 0x27
const uuidCode = 0x30
var sizeLimits = []uint64{
1<<(0*8) - 1,
1<<(1*8) - 1,
1<<(2*8) - 1,
1<<(3*8) - 1,
1<<(4*8) - 1,
1<<(5*8) - 1,
1<<(6*8) - 1,
1<<(7*8) - 1,
1<<(8*8) - 1,
}
func bisectLeft(u uint64) int {
var n int
for sizeLimits[n] < u {
n++
}
return n
}
func adjustFloatBytes(b []byte, encode bool) {
if (encode && b[0]&0x80 != 0x00) || (!encode && b[0]&0x80 == 0x00) {
// Negative numbers: flip all of the bytes.
for i := 0; i < len(b); i++ {
b[i] = b[i] ^ 0xff
}
} else {
// Positive number: flip just the sign bit.
b[0] = b[0] ^ 0x80
}
}
type packer struct {
buf []byte
}
func (p *packer) putByte(b byte) {
p.buf = append(p.buf, b)
}
func (p *packer) putBytes(b []byte) {
p.buf = append(p.buf, b...)
}
func (p *packer) putBytesNil(b []byte, i int) {
for i >= 0 {
p.putBytes(b[:i+1])
p.putByte(0xFF)
b = b[i+1:]
i = bytes.IndexByte(b, 0x00)
}
p.putBytes(b)
}
func (p *packer) encodeBytes(code byte, b []byte) {
p.putByte(code)
if i := bytes.IndexByte(b, 0x00); i >= 0 {
p.putBytesNil(b, i)
} else {
p.putBytes(b)
}
p.putByte(0x00)
}
func (p *packer) encodeInt(i int64) {
if i == 0 {
p.putByte(0x14)
return
}
var n int
var scratch [8]byte
switch {
case i > 0:
n = bisectLeft(uint64(i))
p.putByte(byte(intZeroCode + n))
binary.BigEndian.PutUint64(scratch[:], uint64(i))
case i < 0:
n = bisectLeft(uint64(-i))
p.putByte(byte(0x14 - n))
offsetEncoded := int64(sizeLimits[n]) + i
binary.BigEndian.PutUint64(scratch[:], uint64(offsetEncoded))
}
p.putBytes(scratch[8-n:])
}
func (p *packer) encodeFloat(f float32) {
var scratch [4]byte
binary.BigEndian.PutUint32(scratch[:], math.Float32bits(f))
adjustFloatBytes(scratch[:], true)
p.putByte(floatCode)
p.putBytes(scratch[:])
}
func (p *packer) encodeDouble(d float64) {
var scratch [8]byte
binary.BigEndian.PutUint64(scratch[:], math.Float64bits(d))
adjustFloatBytes(scratch[:], true)
p.putByte(doubleCode)
p.putBytes(scratch[:])
}
func (p *packer) encodeUUID(u UUID) {
p.putByte(uuidCode)
p.putBytes(u[:])
}
func (p *packer) encodeTuple(t Tuple, nested bool) {
if nested {
p.putByte(nestedCode)
}
for i, e := range t {
switch e := e.(type) {
case Tuple:
p.encodeTuple(e, true)
case nil:
p.putByte(nilCode)
if nested {
p.putByte(0xff)
}
case int64:
p.encodeInt(e)
case int:
p.encodeInt(int64(e))
case []byte:
p.encodeBytes(bytesCode, e)
case fdb.KeyConvertible:
p.encodeBytes(bytesCode, []byte(e.FDBKey()))
case string:
p.encodeBytes(stringCode, []byte(e))
case float32:
p.encodeFloat(e)
case float64:
p.encodeDouble(e)
case bool:
if e {
p.putByte(trueCode)
} else {
p.putByte(falseCode)
}
case UUID:
p.encodeUUID(e)
default:
panic(fmt.Sprintf("unencodable element at index %d (%v, type %T)", i, t[i], t[i]))
}
}
if nested {
p.putByte(0x00)
}
}
// Pack returns a new byte slice encoding the provided tuple. Pack will panic if
// the tuple contains an element of any type other than []byte,
// fdb.KeyConvertible, string, int64, int, float32, float64, bool, tuple.UUID,
// nil, or a Tuple with elements of valid types.
//
// Tuple satisfies the fdb.KeyConvertible interface, so it is not necessary to
// call Pack when using a Tuple with a FoundationDB API function that requires a
// key.
func (t Tuple) Pack() []byte {
p := packer{buf: make([]byte, 0, 64)}
p.encodeTuple(t, false)
return p.buf
}
func findTerminator(b []byte) int {
bp := b
var length int
for {
idx := bytes.IndexByte(bp, 0x00)
length += idx
if idx+1 == len(bp) || bp[idx+1] != 0xFF {
break
}
length += 2
bp = bp[idx+2:]
}
return length
}
func decodeBytes(b []byte) ([]byte, int) {
idx := findTerminator(b[1:])
return bytes.Replace(b[1:idx+1], []byte{0x00, 0xFF}, []byte{0x00}, -1), idx + 2
}
func decodeString(b []byte) (string, int) {
bp, idx := decodeBytes(b)
return string(bp), idx
}
func decodeInt(b []byte) (int64, int) {
if b[0] == intZeroCode {
return 0, 1
}
var neg bool
n := int(b[0]) - intZeroCode
if n < 0 {
n = -n
neg = true
}
bp := make([]byte, 8)
copy(bp[8-n:], b[1:n+1])
var ret int64
binary.Read(bytes.NewBuffer(bp), binary.BigEndian, &ret)
if neg {
ret -= int64(sizeLimits[n])
}
return ret, n + 1
}
func decodeFloat(b []byte) (float32, int) {
bp := make([]byte, 4)
copy(bp, b[1:])
adjustFloatBytes(bp, false)
var ret float32
binary.Read(bytes.NewBuffer(bp), binary.BigEndian, &ret)
return ret, 5
}
func decodeDouble(b []byte) (float64, int) {
bp := make([]byte, 8)
copy(bp, b[1:])
adjustFloatBytes(bp, false)
var ret float64
binary.Read(bytes.NewBuffer(bp), binary.BigEndian, &ret)
return ret, 9
}
func decodeUUID(b []byte) (UUID, int) {
var u UUID
copy(u[:], b[1:])
return u, 17
}
func decodeTuple(b []byte, nested bool) (Tuple, int, error) {
var t Tuple
var i int
for i < len(b) {
var el interface{}
var off int
switch {
case b[i] == nilCode:
if !nested {
el = nil
off = 1
} else if i+1 < len(b) && b[i+1] == 0xff {
el = nil
off = 2
} else {
return t, i + 1, nil
}
case b[i] == bytesCode:
el, off = decodeBytes(b[i:])
case b[i] == stringCode:
el, off = decodeString(b[i:])
case negIntStart <= b[i] && b[i] <= posIntEnd:
el, off = decodeInt(b[i:])
case b[i] == floatCode:
if i+5 > len(b) {
return nil, i, fmt.Errorf("insufficient bytes to decode float starting at position %d of byte array for tuple", i)
}
el, off = decodeFloat(b[i:])
case b[i] == doubleCode:
if i+9 > len(b) {
return nil, i, fmt.Errorf("insufficient bytes to decode double starting at position %d of byte array for tuple", i)
}
el, off = decodeDouble(b[i:])
case b[i] == trueCode:
el = true
off = 1
case b[i] == falseCode:
el = false
off = 1
case b[i] == uuidCode:
if i+17 > len(b) {
return nil, i, fmt.Errorf("insufficient bytes to decode UUID starting at position %d of byte array for tuple", i)
}
el, off = decodeUUID(b[i:])
case b[i] == nestedCode:
var err error
el, off, err = decodeTuple(b[i+1:], true)
if err != nil {
return nil, i, err
}
off++
default:
return nil, i, fmt.Errorf("unable to decode tuple element with unknown typecode %02x", b[i])
}
t = append(t, el)
i += off
}
return t, i, nil
}
// Unpack returns the tuple encoded by the provided byte slice, or an error if
// the key does not correctly encode a FoundationDB tuple.
func Unpack(b []byte) (Tuple, error) {
t, _, err := decodeTuple(b, false)
return t, err
}
// FDBKey returns the packed representation of a Tuple, and allows Tuple to
// satisfy the fdb.KeyConvertible interface. FDBKey will panic in the same
// circumstances as Pack.
func (t Tuple) FDBKey() fdb.Key {
return t.Pack()
}
// FDBRangeKeys allows Tuple to satisfy the fdb.ExactRange interface. The range
// represents all keys that encode tuples strictly starting with a Tuple (that
// is, all tuples of greater length than the Tuple of which the Tuple is a
// prefix).
func (t Tuple) FDBRangeKeys() (fdb.KeyConvertible, fdb.KeyConvertible) {
p := t.Pack()
return fdb.Key(concat(p, 0x00)), fdb.Key(concat(p, 0xFF))
}
// FDBRangeKeySelectors allows Tuple to satisfy the fdb.Range interface. The
// range represents all keys that encode tuples strictly starting with a Tuple
// (that is, all tuples of greater length than the Tuple of which the Tuple is a
// prefix).
func (t Tuple) FDBRangeKeySelectors() (fdb.Selectable, fdb.Selectable) {
b, e := t.FDBRangeKeys()
return fdb.FirstGreaterOrEqual(b), fdb.FirstGreaterOrEqual(e)
}
func concat(a []byte, b ...byte) []byte {
r := make([]byte, len(a)+len(b))
copy(r, a)
copy(r[len(a):], b)
return r
}

View File

@ -1,4 +1,5 @@
// +build go1.3
package prometheus
import (
@ -100,7 +101,7 @@ func (p *PrometheusSink) Collect(c chan<- prometheus.Metric) {
}
}
var forbiddenChars = regexp.MustCompile("[ .=\\-]")
var forbiddenChars = regexp.MustCompile("[ .=\\-/]")
func (p *PrometheusSink) flattenKey(parts []string, labels []metrics.Label) (string, string) {
key := strings.Join(parts, "_")

1
vendor/github.com/armon/go-radix/go.mod generated vendored Normal file
View File

@ -0,0 +1 @@
module github.com/armon/go-radix

View File

@ -44,13 +44,13 @@ func (n *node) addEdge(e edge) {
n.edges.Sort()
}
func (n *node) replaceEdge(e edge) {
func (n *node) updateEdge(label byte, node *node) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= e.label
return n.edges[i].label >= label
})
if idx < num && n.edges[idx].label == e.label {
n.edges[idx].node = e.node
if idx < num && n.edges[idx].label == label {
n.edges[idx].node = node
return
}
panic("replacing missing edge")
@ -198,10 +198,7 @@ func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) {
child := &node{
prefix: search[:commonPrefix],
}
parent.replaceEdge(edge{
label: search[0],
node: child,
})
parent.updateEdge(search[0], child)
// Restore the existing node
child.addEdge(edge{

View File

@ -1,7 +1,7 @@
govalidator
===========
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) [![Coverage Status](https://img.shields.io/coveralls/asaskevich/govalidator.svg)](https://coveralls.io/r/asaskevich/govalidator?branch=master) [![wercker status](https://app.wercker.com/status/1ec990b09ea86c910d5f08b0e02c6043/s "wercker status")](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043)
[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors)
[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield)
A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js).
@ -33,6 +33,8 @@ import (
#### Activate behavior to require all fields have a validation tag by default
`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function.
`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors.
```go
import "github.com/asaskevich/govalidator"
@ -488,3 +490,7 @@ Support this project by becoming a sponsor. Your logo will show up here with a l
<a href="https://opencollective.com/govalidator/sponsor/9/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/9/avatar.svg"></a>
## License
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large)

View File

@ -26,11 +26,18 @@ type Error struct {
// Validator indicates the name of the validator that failed
Validator string
Path []string
}
func (e Error) Error() string {
if e.CustomErrorMessageExists {
return e.Err.Error()
}
return e.Name + ": " + e.Err.Error()
errName := e.Name
if len(e.Path) > 0 {
errName = strings.Join(append(e.Path, e.Name), ".")
}
return errName + ": " + e.Err.Error()
}

View File

@ -4,47 +4,49 @@ import "regexp"
// Basic regular expressions for validating strings
const (
Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$"
ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$"
ISBN13 string = "^(?:[0-9]{13})$"
UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
Alpha string = "^[a-zA-Z]+$"
Alphanumeric string = "^[a-zA-Z0-9]+$"
Numeric string = "^[0-9]+$"
Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$"
Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$"
Hexadecimal string = "^[0-9a-fA-F]+$"
Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$"
ASCII string = "^[\x00-\x7F]+$"
Multibyte string = "[^\x00-\x7F]"
FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
PrintableASCII string = "^[\x20-\x7E]+$"
DataURI string = "^data:.+\\/(.+);base64$"
Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$`
IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)`
URLUsername string = `(\S+(:\S*)?@)`
URLPath string = `((\/|\?|#)[^\s]*)`
URLPort string = `(:(\d{1,5}))`
URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))`
URLSubdomain string = `((www\.)|([a-zA-Z0-9]([-\.][-\._a-zA-Z0-9]+)*))`
URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$`
SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
UnixPath string = `^(/[^/\x00]*)+/?$`
Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$"
tagName string = "valid"
hasLowerCase string = ".*[[:lower:]]"
hasUpperCase string = ".*[[:upper:]]"
Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$"
ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$"
ISBN13 string = "^(?:[0-9]{13})$"
UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
Alpha string = "^[a-zA-Z]+$"
Alphanumeric string = "^[a-zA-Z0-9]+$"
Numeric string = "^[0-9]+$"
Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$"
Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$"
Hexadecimal string = "^[0-9a-fA-F]+$"
Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$"
ASCII string = "^[\x00-\x7F]+$"
Multibyte string = "[^\x00-\x7F]"
FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
PrintableASCII string = "^[\x20-\x7E]+$"
DataURI string = "^data:.+\\/(.+);base64$"
Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$`
IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)`
URLUsername string = `(\S+(:\S*)?@)`
URLPath string = `((\/|\?|#)[^\s]*)`
URLPort string = `(:(\d{1,5}))`
URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))`
URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))`
URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$`
SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
UnixPath string = `^(/[^/\x00]*)+/?$`
Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$"
tagName string = "valid"
hasLowerCase string = ".*[[:lower:]]"
hasUpperCase string = ".*[[:upper:]]"
hasWhitespace string = ".*[[:space:]]"
hasWhitespaceOnly string = "^[[:space:]]+$"
)
// Used by IsFilePath func
@ -58,40 +60,42 @@ const (
)
var (
userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$")
hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$")
userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})")
rxEmail = regexp.MustCompile(Email)
rxCreditCard = regexp.MustCompile(CreditCard)
rxISBN10 = regexp.MustCompile(ISBN10)
rxISBN13 = regexp.MustCompile(ISBN13)
rxUUID3 = regexp.MustCompile(UUID3)
rxUUID4 = regexp.MustCompile(UUID4)
rxUUID5 = regexp.MustCompile(UUID5)
rxUUID = regexp.MustCompile(UUID)
rxAlpha = regexp.MustCompile(Alpha)
rxAlphanumeric = regexp.MustCompile(Alphanumeric)
rxNumeric = regexp.MustCompile(Numeric)
rxInt = regexp.MustCompile(Int)
rxFloat = regexp.MustCompile(Float)
rxHexadecimal = regexp.MustCompile(Hexadecimal)
rxHexcolor = regexp.MustCompile(Hexcolor)
rxRGBcolor = regexp.MustCompile(RGBcolor)
rxASCII = regexp.MustCompile(ASCII)
rxPrintableASCII = regexp.MustCompile(PrintableASCII)
rxMultibyte = regexp.MustCompile(Multibyte)
rxFullWidth = regexp.MustCompile(FullWidth)
rxHalfWidth = regexp.MustCompile(HalfWidth)
rxBase64 = regexp.MustCompile(Base64)
rxDataURI = regexp.MustCompile(DataURI)
rxLatitude = regexp.MustCompile(Latitude)
rxLongitude = regexp.MustCompile(Longitude)
rxDNSName = regexp.MustCompile(DNSName)
rxURL = regexp.MustCompile(URL)
rxSSN = regexp.MustCompile(SSN)
rxWinPath = regexp.MustCompile(WinPath)
rxUnixPath = regexp.MustCompile(UnixPath)
rxSemver = regexp.MustCompile(Semver)
rxHasLowerCase = regexp.MustCompile(hasLowerCase)
rxHasUpperCase = regexp.MustCompile(hasUpperCase)
userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$")
hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$")
userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})")
rxEmail = regexp.MustCompile(Email)
rxCreditCard = regexp.MustCompile(CreditCard)
rxISBN10 = regexp.MustCompile(ISBN10)
rxISBN13 = regexp.MustCompile(ISBN13)
rxUUID3 = regexp.MustCompile(UUID3)
rxUUID4 = regexp.MustCompile(UUID4)
rxUUID5 = regexp.MustCompile(UUID5)
rxUUID = regexp.MustCompile(UUID)
rxAlpha = regexp.MustCompile(Alpha)
rxAlphanumeric = regexp.MustCompile(Alphanumeric)
rxNumeric = regexp.MustCompile(Numeric)
rxInt = regexp.MustCompile(Int)
rxFloat = regexp.MustCompile(Float)
rxHexadecimal = regexp.MustCompile(Hexadecimal)
rxHexcolor = regexp.MustCompile(Hexcolor)
rxRGBcolor = regexp.MustCompile(RGBcolor)
rxASCII = regexp.MustCompile(ASCII)
rxPrintableASCII = regexp.MustCompile(PrintableASCII)
rxMultibyte = regexp.MustCompile(Multibyte)
rxFullWidth = regexp.MustCompile(FullWidth)
rxHalfWidth = regexp.MustCompile(HalfWidth)
rxBase64 = regexp.MustCompile(Base64)
rxDataURI = regexp.MustCompile(DataURI)
rxLatitude = regexp.MustCompile(Latitude)
rxLongitude = regexp.MustCompile(Longitude)
rxDNSName = regexp.MustCompile(DNSName)
rxURL = regexp.MustCompile(URL)
rxSSN = regexp.MustCompile(SSN)
rxWinPath = regexp.MustCompile(WinPath)
rxUnixPath = regexp.MustCompile(UnixPath)
rxSemver = regexp.MustCompile(Semver)
rxHasLowerCase = regexp.MustCompile(hasLowerCase)
rxHasUpperCase = regexp.MustCompile(hasUpperCase)
rxHasWhitespace = regexp.MustCompile(hasWhitespace)
rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly)
)

View File

@ -3,6 +3,7 @@ package govalidator
import (
"reflect"
"regexp"
"sort"
"sync"
)
@ -15,7 +16,26 @@ type CustomTypeValidator func(i interface{}, o interface{}) bool
// ParamValidator is a wrapper for validator functions that accepts additional parameters.
type ParamValidator func(str string, params ...string) bool
type tagOptionsMap map[string]string
type tagOptionsMap map[string]tagOption
func (t tagOptionsMap) orderedKeys() []string {
var keys []string
for k := range t {
keys = append(keys, k)
}
sort.Slice(keys, func(a, b int) bool {
return t[keys[a]].order < t[keys[b]].order
})
return keys
}
type tagOption struct {
name string
customErrorMessage string
order int
}
// UnsupportedTypeError is a wrapper for reflect.Type
type UnsupportedTypeError struct {

View File

@ -262,3 +262,9 @@ func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight b
return leftSide + str + rightSide
}
// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object
func TruncatingErrorf(str string, args ...interface{}) error {
n := strings.Count(str, "%s")
return fmt.Errorf(str, args[:n]...)
}

View File

@ -24,9 +24,10 @@ import (
var (
fieldsRequiredByDefault bool
nilPtrAllowedByRequired = false
notNumberRegexp = regexp.MustCompile("[^0-9]+")
whiteSpacesAndMinus = regexp.MustCompile("[\\s-]+")
paramsRegexp = regexp.MustCompile("\\(.*\\)$")
whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`)
paramsRegexp = regexp.MustCompile(`\(.*\)$`)
)
const maxURLRuneCount = 2083
@ -51,6 +52,17 @@ func SetFieldsRequiredByDefault(value bool) {
fieldsRequiredByDefault = value
}
// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required.
// The validation will still reject ptr fields in their zero value state. Example with this enabled:
// type exampleStruct struct {
// Name *string `valid:"required"`
// With `Name` set to "", this will be considered invalid input and will cause a validation error.
// With `Name` set to nil, this will be considered valid by validation.
// By default this is disabled.
func SetNilPtrAllowedByRequired(value bool) {
nilPtrAllowedByRequired = value
}
// IsEmail check if the string is an email.
func IsEmail(str string) bool {
// TODO uppercase letters are not supported
@ -94,7 +106,7 @@ func IsURL(str string) bool {
return false
}
strTemp := str
if strings.Index(str, ":") >= 0 && strings.Index(str, "://") == -1 {
if strings.Contains(str, ":") && !strings.Contains(str, "://") {
// support no indicated urlscheme but with colon for port number
// http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString
strTemp = "http://" + str
@ -202,7 +214,7 @@ func IsUTFNumeric(str string) bool {
str = strings.TrimPrefix(str, "+")
}
for _, c := range str {
if unicode.IsNumber(c) == false { //numbers && minus sign are ok
if !unicode.IsNumber(c) { //numbers && minus sign are ok
return false
}
}
@ -309,6 +321,16 @@ func IsNull(str string) bool {
return len(str) == 0
}
// HasWhitespaceOnly checks the string only contains whitespace
func HasWhitespaceOnly(str string) bool {
return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str)
}
// HasWhitespace checks if the string contains any whitespace
func HasWhitespace(str string) bool {
return len(str) > 0 && rxHasWhitespace.MatchString(str)
}
// IsByteLength check if the string's length (in bytes) falls in a range.
func IsByteLength(str string, min, max int) bool {
return len(str) >= min && len(str) <= max
@ -360,10 +382,7 @@ func IsCreditCard(str string) bool {
shouldDouble = !shouldDouble
}
if sum%10 == 0 {
return true
}
return false
return sum%10 == 0
}
// IsISBN10 check if the string is an ISBN version 10.
@ -406,10 +425,7 @@ func IsISBN(str string, version int) bool {
for i = 0; i < 12; i++ {
checksum += factor[i%2] * int32(sanitized[i]-'0')
}
if (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 {
return true
}
return false
return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0
}
return IsISBN(str, 10) || IsISBN(str, 13)
}
@ -701,6 +717,22 @@ func toJSONName(tag string) string {
return name
}
func PrependPathToErrors(err error, path string) error {
switch err2 := err.(type) {
case Error:
err2.Path = append([]string{path}, err2.Path...)
return err2
case Errors:
errors := err2.Errors()
for i, err3 := range errors {
errors[i] = PrependPathToErrors(err3, path)
}
return err2
}
fmt.Println(err)
return err
}
// ValidateStruct use tags for fields.
// result will be equal to `false` if there are any errors.
func ValidateStruct(s interface{}) (bool, error) {
@ -725,12 +757,16 @@ func ValidateStruct(s interface{}) (bool, error) {
continue // Private field
}
structResult := true
if valueField.Kind() == reflect.Interface {
valueField = valueField.Elem()
}
if (valueField.Kind() == reflect.Struct ||
(valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) &&
typeField.Tag.Get(tagName) != "-" {
var err error
structResult, err = ValidateStruct(valueField.Interface())
if err != nil {
err = PrependPathToErrors(err, typeField.Name)
errs = append(errs, err)
}
}
@ -772,7 +808,7 @@ func parseTagIntoMap(tag string) tagOptionsMap {
optionsMap := make(tagOptionsMap)
options := strings.Split(tag, ",")
for _, option := range options {
for i, option := range options {
option = strings.TrimSpace(option)
validationOptions := strings.Split(option, "~")
@ -780,9 +816,9 @@ func parseTagIntoMap(tag string) tagOptionsMap {
continue
}
if len(validationOptions) == 2 {
optionsMap[validationOptions[0]] = validationOptions[1]
optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i}
} else {
optionsMap[validationOptions[0]] = ""
optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i}
}
}
return optionsMap
@ -933,13 +969,20 @@ func IsIn(str string, params ...string) bool {
}
func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) {
if requiredOption, isRequired := options["required"]; isRequired {
if len(requiredOption) > 0 {
return false, Error{t.Name, fmt.Errorf(requiredOption), true, "required"}
if nilPtrAllowedByRequired {
k := v.Kind()
if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() {
return true, nil
}
return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required"}
}
if requiredOption, isRequired := options["required"]; isRequired {
if len(requiredOption.customErrorMessage) > 0 {
return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}}
}
return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}}
} else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional {
return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required"}
return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}}
}
// not required and empty is valid
return true, nil
@ -955,10 +998,12 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options
// Check if the field should be ignored
switch tag {
case "":
if !fieldsRequiredByDefault {
return true, nil
if v.Kind() != reflect.Slice && v.Kind() != reflect.Map {
if !fieldsRequiredByDefault {
return true, nil
}
return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}}
}
return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required"}
case "-":
return true, nil
}
@ -971,17 +1016,23 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options
if isEmptyValue(v) {
// an empty value is not validated, check only required
return checkRequired(v, t, options)
isValid, resultErr = checkRequired(v, t, options)
for key := range options {
delete(options, key)
}
return isValid, resultErr
}
var customTypeErrors Errors
for validatorName, customErrorMessage := range options {
optionsOrder := options.orderedKeys()
for _, validatorName := range optionsOrder {
validatorStruct := options[validatorName]
if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok {
delete(options, validatorName)
if result := validatefunc(v.Interface(), o.Interface()); !result {
if len(customErrorMessage) > 0 {
customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf(customErrorMessage), CustomErrorMessageExists: true, Validator: stripParams(validatorName)})
if len(validatorStruct.customErrorMessage) > 0 {
customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)})
continue
}
customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)})
@ -1000,10 +1051,11 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options
delete(options, "required")
if isValid && resultErr == nil && len(options) != 0 {
for validator := range options {
optionsOrder := options.orderedKeys()
for _, validator := range optionsOrder {
isValid = false
resultErr = Error{t.Name, fmt.Errorf(
"The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator)}
"The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}}
return
}
}
@ -1017,10 +1069,11 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options
reflect.Float32, reflect.Float64,
reflect.String:
// for each tag option check the map of validator functions
for validatorSpec, customErrorMessage := range options {
for _, validatorSpec := range optionsOrder {
validatorStruct := options[validatorSpec]
var negate bool
validator := validatorSpec
customMsgExists := len(customErrorMessage) > 0
customMsgExists := len(validatorStruct.customErrorMessage) > 0
// Check whether the tag looks like '!something' or 'something'
if validator[0] == '!' {
@ -1051,16 +1104,16 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options
field := fmt.Sprint(v) // make value into string, then validate with regex
if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) {
if customMsgExists {
return false, Error{t.Name, fmt.Errorf(customErrorMessage), customMsgExists, stripParams(validatorSpec)}
return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
}
if negate {
return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec)}
return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
}
return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec)}
return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
}
default:
// type not yet supported, fail
return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec)}
return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}}
}
}
@ -1068,21 +1121,24 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options
delete(options, validatorSpec)
switch v.Kind() {
case reflect.String:
case reflect.String,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Float32, reflect.Float64:
field := fmt.Sprint(v) // make value into string, then validate with regex
if result := validatefunc(field); !result && !negate || result && negate {
if customMsgExists {
return false, Error{t.Name, fmt.Errorf(customErrorMessage), customMsgExists, stripParams(validatorSpec)}
return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
}
if negate {
return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec)}
return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
}
return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec)}
return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
}
default:
//Not Yet Supported Types (Fail here!)
err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v)
return false, Error{t.Name, err, false, stripParams(validatorSpec)}
return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}}
}
}
}
@ -1095,7 +1151,7 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options
sv = v.MapKeys()
sort.Sort(sv)
result := true
for _, k := range sv {
for i, k := range sv {
var resultItem bool
var err error
if v.MapIndex(k).Kind() != reflect.Struct {
@ -1106,6 +1162,7 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options
} else {
resultItem, err = ValidateStruct(v.MapIndex(k).Interface())
if err != nil {
err = PrependPathToErrors(err, t.Name+"."+sv[i].Interface().(string))
return false, err
}
}
@ -1125,6 +1182,7 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options
} else {
resultItem, err = ValidateStruct(v.Index(i).Interface())
if err != nil {
err = PrependPathToErrors(err, t.Name+"."+strconv.Itoa(i))
return false, err
}
}

View File

@ -18,7 +18,7 @@ const UseServiceDefaultRetries = -1
type RequestRetryer interface{}
// A Config provides service configuration for service clients. By default,
// all clients will use the defaults.DefaultConfig tructure.
// all clients will use the defaults.DefaultConfig structure.
//
// // Create Session with MaxRetry configuration to be shared by multiple
// // service clients.

View File

@ -158,13 +158,14 @@ func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
// IsExpired returns if the credentials are expired.
func (e *Expiry) IsExpired() bool {
if e.CurrentTime == nil {
e.CurrentTime = time.Now
curTime := e.CurrentTime
if curTime == nil {
curTime = time.Now
}
return e.expiration.Before(e.CurrentTime())
return e.expiration.Before(curTime())
}
// A Credentials provides synchronous safe retrieval of AWS credentials Value.
// A Credentials provides concurrency safe retrieval of AWS credentials Value.
// Credentials will cache the credentials value until they expire. Once the value
// expires the next Get will attempt to retrieve valid credentials.
//

View File

@ -4,7 +4,6 @@ import (
"bufio"
"encoding/json"
"fmt"
"path"
"strings"
"time"
@ -12,6 +11,7 @@ import (
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/internal/sdkuri"
)
// ProviderName provides a name of EC2Role provider
@ -125,7 +125,7 @@ type ec2RoleCredRespBody struct {
Message string
}
const iamSecurityCredsPath = "/iam/security-credentials"
const iamSecurityCredsPath = "iam/security-credentials/"
// requestCredList requests a list of credentials from the EC2 service.
// If there are no credentials, or there is an error making or receiving the request
@ -153,7 +153,7 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
// If the credentials cannot be found, or there is an error reading the response
// and error will be returned.
func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName))
if err != nil {
return ec2RoleCredRespBody{},
awserr.New("EC2RoleRequestError",

Some files were not shown because too many files have changed in this diff Show More