vendor: Update to latest hcl2
* Includes fixes for handling null blocks https://github.com/hashicorp/hcl2/pull/87 * Updates go-cty as a newer version is required by hcl2
This commit is contained in:
parent
f7102cd01d
commit
b59ed9d6c9
28
vendor/github.com/hashicorp/hcl2/ext/userfunc/README.md
generated
vendored
28
vendor/github.com/hashicorp/hcl2/ext/userfunc/README.md
generated
vendored
|
@ -1,28 +0,0 @@
|
|||
# HCL User Functions Extension
|
||||
|
||||
This HCL extension allows a calling application to support user-defined
|
||||
functions.
|
||||
|
||||
Functions are defined via a specific block type, like this:
|
||||
|
||||
```hcl
|
||||
function "add" {
|
||||
params = [a, b]
|
||||
result = a + b
|
||||
}
|
||||
|
||||
function "list" {
|
||||
params = []
|
||||
variadic_param = items
|
||||
result = items
|
||||
}
|
||||
```
|
||||
|
||||
The extension is implemented as a pre-processor for `cty.Body` objects. Given
|
||||
a body that may contain functions, the `DecodeUserFunctions` function searches
|
||||
for blocks that define functions and returns a functions map suitable for
|
||||
inclusion in a `hcl.EvalContext`. It also returns a new `cty.Body` that
|
||||
contains the remainder of the content from the given body, allowing for
|
||||
further processing of remaining content.
|
||||
|
||||
For more information, see [the godoc reference](http://godoc.org/github.com/hashicorp/hcl2/ext/userfunc).
|
156
vendor/github.com/hashicorp/hcl2/ext/userfunc/decode.go
generated
vendored
156
vendor/github.com/hashicorp/hcl2/ext/userfunc/decode.go
generated
vendored
|
@ -1,156 +0,0 @@
|
|||
package userfunc
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/function"
|
||||
)
|
||||
|
||||
var funcBodySchema = &hcl.BodySchema{
|
||||
Attributes: []hcl.AttributeSchema{
|
||||
{
|
||||
Name: "params",
|
||||
Required: true,
|
||||
},
|
||||
{
|
||||
Name: "variadic_param",
|
||||
Required: false,
|
||||
},
|
||||
{
|
||||
Name: "result",
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func decodeUserFunctions(body hcl.Body, blockType string, contextFunc ContextFunc) (funcs map[string]function.Function, remain hcl.Body, diags hcl.Diagnostics) {
|
||||
schema := &hcl.BodySchema{
|
||||
Blocks: []hcl.BlockHeaderSchema{
|
||||
{
|
||||
Type: blockType,
|
||||
LabelNames: []string{"name"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
content, remain, diags := body.PartialContent(schema)
|
||||
if diags.HasErrors() {
|
||||
return nil, remain, diags
|
||||
}
|
||||
|
||||
// first call to getBaseCtx will populate context, and then the same
|
||||
// context will be used for all subsequent calls. It's assumed that
|
||||
// all functions in a given body should see an identical context.
|
||||
var baseCtx *hcl.EvalContext
|
||||
getBaseCtx := func() *hcl.EvalContext {
|
||||
if baseCtx == nil {
|
||||
if contextFunc != nil {
|
||||
baseCtx = contextFunc()
|
||||
}
|
||||
}
|
||||
// baseCtx might still be nil here, and that's okay
|
||||
return baseCtx
|
||||
}
|
||||
|
||||
funcs = make(map[string]function.Function)
|
||||
Blocks:
|
||||
for _, block := range content.Blocks {
|
||||
name := block.Labels[0]
|
||||
funcContent, funcDiags := block.Body.Content(funcBodySchema)
|
||||
diags = append(diags, funcDiags...)
|
||||
if funcDiags.HasErrors() {
|
||||
continue
|
||||
}
|
||||
|
||||
paramsExpr := funcContent.Attributes["params"].Expr
|
||||
resultExpr := funcContent.Attributes["result"].Expr
|
||||
var varParamExpr hcl.Expression
|
||||
if funcContent.Attributes["variadic_param"] != nil {
|
||||
varParamExpr = funcContent.Attributes["variadic_param"].Expr
|
||||
}
|
||||
|
||||
var params []string
|
||||
var varParam string
|
||||
|
||||
paramExprs, paramsDiags := hcl.ExprList(paramsExpr)
|
||||
diags = append(diags, paramsDiags...)
|
||||
if paramsDiags.HasErrors() {
|
||||
continue
|
||||
}
|
||||
for _, paramExpr := range paramExprs {
|
||||
param := hcl.ExprAsKeyword(paramExpr)
|
||||
if param == "" {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid param element",
|
||||
Detail: "Each parameter name must be an identifier.",
|
||||
Subject: paramExpr.Range().Ptr(),
|
||||
})
|
||||
continue Blocks
|
||||
}
|
||||
params = append(params, param)
|
||||
}
|
||||
|
||||
if varParamExpr != nil {
|
||||
varParam = hcl.ExprAsKeyword(varParamExpr)
|
||||
if varParam == "" {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid variadic_param",
|
||||
Detail: "The variadic parameter name must be an identifier.",
|
||||
Subject: varParamExpr.Range().Ptr(),
|
||||
})
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
spec := &function.Spec{}
|
||||
for _, paramName := range params {
|
||||
spec.Params = append(spec.Params, function.Parameter{
|
||||
Name: paramName,
|
||||
Type: cty.DynamicPseudoType,
|
||||
})
|
||||
}
|
||||
if varParamExpr != nil {
|
||||
spec.VarParam = &function.Parameter{
|
||||
Name: varParam,
|
||||
Type: cty.DynamicPseudoType,
|
||||
}
|
||||
}
|
||||
impl := func(args []cty.Value) (cty.Value, error) {
|
||||
ctx := getBaseCtx()
|
||||
ctx = ctx.NewChild()
|
||||
ctx.Variables = make(map[string]cty.Value)
|
||||
|
||||
// The cty function machinery guarantees that we have at least
|
||||
// enough args to fill all of our params.
|
||||
for i, paramName := range params {
|
||||
ctx.Variables[paramName] = args[i]
|
||||
}
|
||||
if spec.VarParam != nil {
|
||||
varArgs := args[len(params):]
|
||||
ctx.Variables[varParam] = cty.TupleVal(varArgs)
|
||||
}
|
||||
|
||||
result, diags := resultExpr.Value(ctx)
|
||||
if diags.HasErrors() {
|
||||
// Smuggle the diagnostics out via the error channel, since
|
||||
// a diagnostics sequence implements error. Caller can
|
||||
// type-assert this to recover the individual diagnostics
|
||||
// if desired.
|
||||
return cty.DynamicVal, diags
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
spec.Type = func(args []cty.Value) (cty.Type, error) {
|
||||
val, err := impl(args)
|
||||
return val.Type(), err
|
||||
}
|
||||
spec.Impl = func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
return impl(args)
|
||||
}
|
||||
funcs[name] = function.New(spec)
|
||||
}
|
||||
|
||||
return funcs, remain, diags
|
||||
}
|
22
vendor/github.com/hashicorp/hcl2/ext/userfunc/doc.go
generated
vendored
22
vendor/github.com/hashicorp/hcl2/ext/userfunc/doc.go
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
// Package userfunc implements a HCL extension that allows user-defined
|
||||
// functions in HCL configuration.
|
||||
//
|
||||
// Using this extension requires some integration effort on the part of the
|
||||
// calling application, to pass any declared functions into a HCL evaluation
|
||||
// context after processing.
|
||||
//
|
||||
// The function declaration syntax looks like this:
|
||||
//
|
||||
// function "foo" {
|
||||
// params = ["name"]
|
||||
// result = "Hello, ${name}!"
|
||||
// }
|
||||
//
|
||||
// When a user-defined function is called, the expression given for the "result"
|
||||
// attribute is evaluated in an isolated evaluation context that defines variables
|
||||
// named after the given parameter names.
|
||||
//
|
||||
// The block name "function" may be overridden by the calling application, if
|
||||
// that default name conflicts with an existing block or attribute name in
|
||||
// the application.
|
||||
package userfunc
|
42
vendor/github.com/hashicorp/hcl2/ext/userfunc/public.go
generated
vendored
42
vendor/github.com/hashicorp/hcl2/ext/userfunc/public.go
generated
vendored
|
@ -1,42 +0,0 @@
|
|||
package userfunc
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
"github.com/zclconf/go-cty/cty/function"
|
||||
)
|
||||
|
||||
// A ContextFunc is a callback used to produce the base EvalContext for
|
||||
// running a particular set of functions.
|
||||
//
|
||||
// This is a function rather than an EvalContext directly to allow functions
|
||||
// to be decoded before their context is complete. This will be true, for
|
||||
// example, for applications that wish to allow functions to refer to themselves.
|
||||
//
|
||||
// The simplest use of a ContextFunc is to give user functions access to the
|
||||
// same global variables and functions available elsewhere in an application's
|
||||
// configuration language, but more complex applications may use different
|
||||
// contexts to support lexical scoping depending on where in a configuration
|
||||
// structure a function declaration is found, etc.
|
||||
type ContextFunc func() *hcl.EvalContext
|
||||
|
||||
// DecodeUserFunctions looks for blocks of the given type in the given body
|
||||
// and, for each one found, interprets it as a custom function definition.
|
||||
//
|
||||
// On success, the result is a mapping of function names to implementations,
|
||||
// along with a new body that represents the remaining content of the given
|
||||
// body which can be used for further processing.
|
||||
//
|
||||
// The result expression of each function is parsed during decoding but not
|
||||
// evaluated until the function is called.
|
||||
//
|
||||
// If the given ContextFunc is non-nil, it will be called to obtain the
|
||||
// context in which the function result expressions will be evaluated. If nil,
|
||||
// or if it returns nil, the result expression will have access only to
|
||||
// variables named after the declared parameters. A non-nil context turns
|
||||
// the returned functions into closures, bound to the given context.
|
||||
//
|
||||
// If the returned diagnostics set has errors then the function map and
|
||||
// remain body may be nil or incomplete.
|
||||
func DecodeUserFunctions(body hcl.Body, blockType string, context ContextFunc) (funcs map[string]function.Function, remain hcl.Body, diags hcl.Diagnostics) {
|
||||
return decodeUserFunctions(body, blockType, context)
|
||||
}
|
4
vendor/github.com/hashicorp/hcl2/gohcl/doc.go
generated
vendored
4
vendor/github.com/hashicorp/hcl2/gohcl/doc.go
generated
vendored
|
@ -40,6 +40,10 @@
|
|||
// present then any attributes or blocks not matched by another valid tag
|
||||
// will cause an error diagnostic.
|
||||
//
|
||||
// Only a subset of this tagging/typing vocabulary is supported for the
|
||||
// "Encode" family of functions. See the EncodeIntoBody docs for full details
|
||||
// on the constraints there.
|
||||
//
|
||||
// Broadly-speaking this package deals with two types of error. The first is
|
||||
// errors in the configuration itself, which are returned as diagnostics
|
||||
// written with the configuration author as the target audience. The second
|
||||
|
|
191
vendor/github.com/hashicorp/hcl2/gohcl/encode.go
generated
vendored
Normal file
191
vendor/github.com/hashicorp/hcl2/gohcl/encode.go
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
|||
package gohcl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"github.com/hashicorp/hcl2/hclwrite"
|
||||
"github.com/zclconf/go-cty/cty/gocty"
|
||||
)
|
||||
|
||||
// EncodeIntoBody replaces the contents of the given hclwrite Body with
|
||||
// attributes and blocks derived from the given value, which must be a
|
||||
// struct value or a pointer to a struct value with the struct tags defined
|
||||
// in this package.
|
||||
//
|
||||
// This function can work only with fully-decoded data. It will ignore any
|
||||
// fields tagged as "remain", any fields that decode attributes into either
|
||||
// hcl.Attribute or hcl.Expression values, and any fields that decode blocks
|
||||
// into hcl.Attributes values. This function does not have enough information
|
||||
// to complete the decoding of these types.
|
||||
//
|
||||
// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock
|
||||
// to produce a whole hclwrite.Block including block labels.
|
||||
//
|
||||
// As long as a suitable value is given to encode and the destination body
|
||||
// is non-nil, this function will always complete. It will panic in case of
|
||||
// any errors in the calling program, such as passing an inappropriate type
|
||||
// or a nil body.
|
||||
//
|
||||
// The layout of the resulting HCL source is derived from the ordering of
|
||||
// the struct fields, with blank lines around nested blocks of different types.
|
||||
// Fields representing attributes should usually precede those representing
|
||||
// blocks so that the attributes can group togather in the result. For more
|
||||
// control, use the hclwrite API directly.
|
||||
func EncodeIntoBody(val interface{}, dst *hclwrite.Body) {
|
||||
rv := reflect.ValueOf(val)
|
||||
ty := rv.Type()
|
||||
if ty.Kind() == reflect.Ptr {
|
||||
rv = rv.Elem()
|
||||
ty = rv.Type()
|
||||
}
|
||||
if ty.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
|
||||
}
|
||||
|
||||
tags := getFieldTags(ty)
|
||||
populateBody(rv, ty, tags, dst)
|
||||
}
|
||||
|
||||
// EncodeAsBlock creates a new hclwrite.Block populated with the data from
|
||||
// the given value, which must be a struct or pointer to struct with the
|
||||
// struct tags defined in this package.
|
||||
//
|
||||
// If the given struct type has fields tagged with "label" tags then they
|
||||
// will be used in order to annotate the created block with labels.
|
||||
//
|
||||
// This function has the same constraints as EncodeIntoBody and will panic
|
||||
// if they are violated.
|
||||
func EncodeAsBlock(val interface{}, blockType string) *hclwrite.Block {
|
||||
rv := reflect.ValueOf(val)
|
||||
ty := rv.Type()
|
||||
if ty.Kind() == reflect.Ptr {
|
||||
rv = rv.Elem()
|
||||
ty = rv.Type()
|
||||
}
|
||||
if ty.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
|
||||
}
|
||||
|
||||
tags := getFieldTags(ty)
|
||||
labels := make([]string, len(tags.Labels))
|
||||
for i, lf := range tags.Labels {
|
||||
lv := rv.Field(lf.FieldIndex)
|
||||
// We just stringify whatever we find. It should always be a string
|
||||
// but if not then we'll still do something reasonable.
|
||||
labels[i] = fmt.Sprintf("%s", lv.Interface())
|
||||
}
|
||||
|
||||
block := hclwrite.NewBlock(blockType, labels)
|
||||
populateBody(rv, ty, tags, block.Body())
|
||||
return block
|
||||
}
|
||||
|
||||
func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) {
|
||||
nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks))
|
||||
namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks))
|
||||
for n, i := range tags.Attributes {
|
||||
nameIdxs[n] = i
|
||||
namesOrder = append(namesOrder, n)
|
||||
}
|
||||
for n, i := range tags.Blocks {
|
||||
nameIdxs[n] = i
|
||||
namesOrder = append(namesOrder, n)
|
||||
}
|
||||
sort.SliceStable(namesOrder, func(i, j int) bool {
|
||||
ni, nj := namesOrder[i], namesOrder[j]
|
||||
return nameIdxs[ni] < nameIdxs[nj]
|
||||
})
|
||||
|
||||
dst.Clear()
|
||||
|
||||
prevWasBlock := false
|
||||
for _, name := range namesOrder {
|
||||
fieldIdx := nameIdxs[name]
|
||||
field := ty.Field(fieldIdx)
|
||||
fieldTy := field.Type
|
||||
fieldVal := rv.Field(fieldIdx)
|
||||
|
||||
if fieldTy.Kind() == reflect.Ptr {
|
||||
fieldTy = fieldTy.Elem()
|
||||
fieldVal = fieldVal.Elem()
|
||||
}
|
||||
|
||||
if _, isAttr := tags.Attributes[name]; isAttr {
|
||||
|
||||
if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) {
|
||||
continue // ignore undecoded fields
|
||||
}
|
||||
if !fieldVal.IsValid() {
|
||||
continue // ignore (field value is nil pointer)
|
||||
}
|
||||
if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
|
||||
continue // ignore
|
||||
}
|
||||
if prevWasBlock {
|
||||
dst.AppendNewline()
|
||||
prevWasBlock = false
|
||||
}
|
||||
|
||||
valTy, err := gocty.ImpliedType(fieldVal.Interface())
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err))
|
||||
}
|
||||
|
||||
val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy)
|
||||
if err != nil {
|
||||
// This should never happen, since we should always be able
|
||||
// to decode into the implied type.
|
||||
panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err))
|
||||
}
|
||||
|
||||
dst.SetAttributeValue(name, val)
|
||||
|
||||
} else { // must be a block, then
|
||||
elemTy := fieldTy
|
||||
isSeq := false
|
||||
if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array {
|
||||
isSeq = true
|
||||
elemTy = elemTy.Elem()
|
||||
}
|
||||
|
||||
if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) {
|
||||
continue // ignore undecoded fields
|
||||
}
|
||||
prevWasBlock = false
|
||||
|
||||
if isSeq {
|
||||
l := fieldVal.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
elemVal := fieldVal.Index(i)
|
||||
if !elemVal.IsValid() {
|
||||
continue // ignore (elem value is nil pointer)
|
||||
}
|
||||
if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() {
|
||||
continue // ignore
|
||||
}
|
||||
block := EncodeAsBlock(elemVal.Interface(), name)
|
||||
if !prevWasBlock {
|
||||
dst.AppendNewline()
|
||||
prevWasBlock = true
|
||||
}
|
||||
dst.AppendBlock(block)
|
||||
}
|
||||
} else {
|
||||
if !fieldVal.IsValid() {
|
||||
continue // ignore (field value is nil pointer)
|
||||
}
|
||||
if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
|
||||
continue // ignore
|
||||
}
|
||||
block := EncodeAsBlock(fieldVal.Interface(), name)
|
||||
if !prevWasBlock {
|
||||
dst.AppendNewline()
|
||||
prevWasBlock = true
|
||||
}
|
||||
dst.AppendBlock(block)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
154
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go
generated
vendored
154
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go
generated
vendored
|
@ -132,7 +132,7 @@ type RelativeTraversalExpr struct {
|
|||
}
|
||||
|
||||
func (e *RelativeTraversalExpr) walkChildNodes(w internalWalkFunc) {
|
||||
// Scope traversals have no child nodes
|
||||
w(e.Source)
|
||||
}
|
||||
|
||||
func (e *RelativeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
|
@ -181,8 +181,8 @@ type FunctionCallExpr struct {
|
|||
}
|
||||
|
||||
func (e *FunctionCallExpr) walkChildNodes(w internalWalkFunc) {
|
||||
for i, arg := range e.Args {
|
||||
e.Args[i] = w(arg).(Expression)
|
||||
for _, arg := range e.Args {
|
||||
w(arg)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -463,9 +463,9 @@ type ConditionalExpr struct {
|
|||
}
|
||||
|
||||
func (e *ConditionalExpr) walkChildNodes(w internalWalkFunc) {
|
||||
e.Condition = w(e.Condition).(Expression)
|
||||
e.TrueResult = w(e.TrueResult).(Expression)
|
||||
e.FalseResult = w(e.FalseResult).(Expression)
|
||||
w(e.Condition)
|
||||
w(e.TrueResult)
|
||||
w(e.FalseResult)
|
||||
}
|
||||
|
||||
func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
|
@ -593,8 +593,8 @@ type IndexExpr struct {
|
|||
}
|
||||
|
||||
func (e *IndexExpr) walkChildNodes(w internalWalkFunc) {
|
||||
e.Collection = w(e.Collection).(Expression)
|
||||
e.Key = w(e.Key).(Expression)
|
||||
w(e.Collection)
|
||||
w(e.Key)
|
||||
}
|
||||
|
||||
func (e *IndexExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
|
@ -604,8 +604,9 @@ func (e *IndexExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
|||
diags = append(diags, collDiags...)
|
||||
diags = append(diags, keyDiags...)
|
||||
|
||||
val, diags := hcl.Index(coll, key, &e.SrcRange)
|
||||
setDiagEvalContext(diags, e, ctx)
|
||||
val, indexDiags := hcl.Index(coll, key, &e.SrcRange)
|
||||
setDiagEvalContext(indexDiags, e, ctx)
|
||||
diags = append(diags, indexDiags...)
|
||||
return val, diags
|
||||
}
|
||||
|
||||
|
@ -625,8 +626,8 @@ type TupleConsExpr struct {
|
|||
}
|
||||
|
||||
func (e *TupleConsExpr) walkChildNodes(w internalWalkFunc) {
|
||||
for i, expr := range e.Exprs {
|
||||
e.Exprs[i] = w(expr).(Expression)
|
||||
for _, expr := range e.Exprs {
|
||||
w(expr)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -674,9 +675,9 @@ type ObjectConsItem struct {
|
|||
}
|
||||
|
||||
func (e *ObjectConsExpr) walkChildNodes(w internalWalkFunc) {
|
||||
for i, item := range e.Items {
|
||||
e.Items[i].KeyExpr = w(item.KeyExpr).(Expression)
|
||||
e.Items[i].ValueExpr = w(item.ValueExpr).(Expression)
|
||||
for _, item := range e.Items {
|
||||
w(item.KeyExpr)
|
||||
w(item.ValueExpr)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -727,8 +728,8 @@ func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics
|
|||
Severity: hcl.DiagError,
|
||||
Summary: "Incorrect key type",
|
||||
Detail: fmt.Sprintf("Can't use this value as a key: %s.", err.Error()),
|
||||
Subject: item.ValueExpr.Range().Ptr(),
|
||||
Expression: item.ValueExpr,
|
||||
Subject: item.KeyExpr.Range().Ptr(),
|
||||
Expression: item.KeyExpr,
|
||||
EvalContext: ctx,
|
||||
})
|
||||
known = false
|
||||
|
@ -792,11 +793,31 @@ func (e *ObjectConsKeyExpr) walkChildNodes(w internalWalkFunc) {
|
|||
// We only treat our wrapped expression as a real expression if we're
|
||||
// not going to interpret it as a literal.
|
||||
if e.literalName() == "" {
|
||||
e.Wrapped = w(e.Wrapped).(Expression)
|
||||
w(e.Wrapped)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ObjectConsKeyExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
// Because we accept a naked identifier as a literal key rather than a
|
||||
// reference, it's confusing to accept a traversal containing periods
|
||||
// here since we can't tell if the user intends to create a key with
|
||||
// periods or actually reference something. To avoid confusing downstream
|
||||
// errors we'll just prohibit a naked multi-step traversal here and
|
||||
// require the user to state their intent more clearly.
|
||||
// (This is handled at evaluation time rather than parse time because
|
||||
// an application using static analysis _can_ accept a naked multi-step
|
||||
// traversal here, if desired.)
|
||||
if travExpr, isTraversal := e.Wrapped.(*ScopeTraversalExpr); isTraversal && len(travExpr.Traversal) > 1 {
|
||||
var diags hcl.Diagnostics
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Ambiguous attribute key",
|
||||
Detail: "If this expression is intended to be a reference, wrap it in parentheses. If it's instead intended as a literal name containing periods, wrap it in quotes to create a string literal.",
|
||||
Subject: e.Range().Ptr(),
|
||||
})
|
||||
return cty.DynamicVal, diags
|
||||
}
|
||||
|
||||
if ln := e.literalName(); ln != "" {
|
||||
return cty.StringVal(ln), nil
|
||||
}
|
||||
|
@ -1157,7 +1178,7 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
|||
}
|
||||
|
||||
func (e *ForExpr) walkChildNodes(w internalWalkFunc) {
|
||||
e.CollExpr = w(e.CollExpr).(Expression)
|
||||
w(e.CollExpr)
|
||||
|
||||
scopeNames := map[string]struct{}{}
|
||||
if e.KeyVar != "" {
|
||||
|
@ -1170,17 +1191,17 @@ func (e *ForExpr) walkChildNodes(w internalWalkFunc) {
|
|||
if e.KeyExpr != nil {
|
||||
w(ChildScope{
|
||||
LocalNames: scopeNames,
|
||||
Expr: &e.KeyExpr,
|
||||
Expr: e.KeyExpr,
|
||||
})
|
||||
}
|
||||
w(ChildScope{
|
||||
LocalNames: scopeNames,
|
||||
Expr: &e.ValExpr,
|
||||
Expr: e.ValExpr,
|
||||
})
|
||||
if e.CondExpr != nil {
|
||||
w(ChildScope{
|
||||
LocalNames: scopeNames,
|
||||
Expr: &e.CondExpr,
|
||||
Expr: e.CondExpr,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1214,11 +1235,28 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
|||
return cty.DynamicVal, diags
|
||||
}
|
||||
|
||||
sourceTy := sourceVal.Type()
|
||||
if sourceTy == cty.DynamicPseudoType {
|
||||
// If we don't even know the _type_ of our source value yet then
|
||||
// we'll need to defer all processing, since we can't decide our
|
||||
// result type either.
|
||||
return cty.DynamicVal, diags
|
||||
}
|
||||
|
||||
// A "special power" of splat expressions is that they can be applied
|
||||
// both to tuples/lists and to other values, and in the latter case
|
||||
// the value will be treated as an implicit single-item tuple, or as
|
||||
// an empty tuple if the value is null.
|
||||
autoUpgrade := !(sourceTy.IsTupleType() || sourceTy.IsListType() || sourceTy.IsSetType())
|
||||
|
||||
if sourceVal.IsNull() {
|
||||
if autoUpgrade {
|
||||
return cty.EmptyTupleVal, diags
|
||||
}
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Splat of null value",
|
||||
Detail: "Splat expressions (with the * symbol) cannot be applied to null values.",
|
||||
Detail: "Splat expressions (with the * symbol) cannot be applied to null sequences.",
|
||||
Subject: e.Source.Range().Ptr(),
|
||||
Context: hcl.RangeBetween(e.Source.Range(), e.MarkerRange).Ptr(),
|
||||
Expression: e.Source,
|
||||
|
@ -1226,16 +1264,49 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
|||
})
|
||||
return cty.DynamicVal, diags
|
||||
}
|
||||
if !sourceVal.IsKnown() {
|
||||
return cty.DynamicVal, diags
|
||||
|
||||
if autoUpgrade {
|
||||
sourceVal = cty.TupleVal([]cty.Value{sourceVal})
|
||||
sourceTy = sourceVal.Type()
|
||||
}
|
||||
|
||||
// A "special power" of splat expressions is that they can be applied
|
||||
// both to tuples/lists and to other values, and in the latter case
|
||||
// the value will be treated as an implicit single-value list. We'll
|
||||
// deal with that here first.
|
||||
if !(sourceVal.Type().IsTupleType() || sourceVal.Type().IsListType() || sourceVal.Type().IsSetType()) {
|
||||
sourceVal = cty.ListVal([]cty.Value{sourceVal})
|
||||
// We'll compute our result type lazily if we need it. In the normal case
|
||||
// it's inferred automatically from the value we construct.
|
||||
resultTy := func() (cty.Type, hcl.Diagnostics) {
|
||||
chiCtx := ctx.NewChild()
|
||||
var diags hcl.Diagnostics
|
||||
switch {
|
||||
case sourceTy.IsListType() || sourceTy.IsSetType():
|
||||
ety := sourceTy.ElementType()
|
||||
e.Item.setValue(chiCtx, cty.UnknownVal(ety))
|
||||
val, itemDiags := e.Each.Value(chiCtx)
|
||||
diags = append(diags, itemDiags...)
|
||||
e.Item.clearValue(chiCtx) // clean up our temporary value
|
||||
return cty.List(val.Type()), diags
|
||||
case sourceTy.IsTupleType():
|
||||
etys := sourceTy.TupleElementTypes()
|
||||
resultTys := make([]cty.Type, 0, len(etys))
|
||||
for _, ety := range etys {
|
||||
e.Item.setValue(chiCtx, cty.UnknownVal(ety))
|
||||
val, itemDiags := e.Each.Value(chiCtx)
|
||||
diags = append(diags, itemDiags...)
|
||||
e.Item.clearValue(chiCtx) // clean up our temporary value
|
||||
resultTys = append(resultTys, val.Type())
|
||||
}
|
||||
return cty.Tuple(resultTys), diags
|
||||
default:
|
||||
// Should never happen because of our promotion to list above.
|
||||
return cty.DynamicPseudoType, diags
|
||||
}
|
||||
}
|
||||
|
||||
if !sourceVal.IsKnown() {
|
||||
// We can't produce a known result in this case, but we'll still
|
||||
// indicate what the result type would be, allowing any downstream type
|
||||
// checking to proceed.
|
||||
ty, tyDiags := resultTy()
|
||||
diags = append(diags, tyDiags...)
|
||||
return cty.UnknownVal(ty), diags
|
||||
}
|
||||
|
||||
vals := make([]cty.Value, 0, sourceVal.LengthInt())
|
||||
|
@ -1259,15 +1330,28 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
|||
e.Item.clearValue(ctx) // clean up our temporary value
|
||||
|
||||
if !isKnown {
|
||||
return cty.DynamicVal, diags
|
||||
// We'll ingore the resultTy diagnostics in this case since they
|
||||
// will just be the same errors we saw while iterating above.
|
||||
ty, _ := resultTy()
|
||||
return cty.UnknownVal(ty), diags
|
||||
}
|
||||
|
||||
return cty.TupleVal(vals), diags
|
||||
switch {
|
||||
case sourceTy.IsListType() || sourceTy.IsSetType():
|
||||
if len(vals) == 0 {
|
||||
ty, tyDiags := resultTy()
|
||||
diags = append(diags, tyDiags...)
|
||||
return cty.ListValEmpty(ty.ElementType()), diags
|
||||
}
|
||||
return cty.ListVal(vals), diags
|
||||
default:
|
||||
return cty.TupleVal(vals), diags
|
||||
}
|
||||
}
|
||||
|
||||
func (e *SplatExpr) walkChildNodes(w internalWalkFunc) {
|
||||
e.Source = w(e.Source).(Expression)
|
||||
e.Each = w(e.Each).(Expression)
|
||||
w(e.Source)
|
||||
w(e.Each)
|
||||
}
|
||||
|
||||
func (e *SplatExpr) Range() hcl.Range {
|
||||
|
|
6
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go
generated
vendored
6
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go
generated
vendored
|
@ -129,8 +129,8 @@ type BinaryOpExpr struct {
|
|||
}
|
||||
|
||||
func (e *BinaryOpExpr) walkChildNodes(w internalWalkFunc) {
|
||||
e.LHS = w(e.LHS).(Expression)
|
||||
e.RHS = w(e.RHS).(Expression)
|
||||
w(e.LHS)
|
||||
w(e.RHS)
|
||||
}
|
||||
|
||||
func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
|
@ -212,7 +212,7 @@ type UnaryOpExpr struct {
|
|||
}
|
||||
|
||||
func (e *UnaryOpExpr) walkChildNodes(w internalWalkFunc) {
|
||||
e.Val = w(e.Val).(Expression)
|
||||
w(e.Val)
|
||||
}
|
||||
|
||||
func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
|
|
8
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go
generated
vendored
8
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go
generated
vendored
|
@ -16,8 +16,8 @@ type TemplateExpr struct {
|
|||
}
|
||||
|
||||
func (e *TemplateExpr) walkChildNodes(w internalWalkFunc) {
|
||||
for i, part := range e.Parts {
|
||||
e.Parts[i] = w(part).(Expression)
|
||||
for _, part := range e.Parts {
|
||||
w(part)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -98,7 +98,7 @@ type TemplateJoinExpr struct {
|
|||
}
|
||||
|
||||
func (e *TemplateJoinExpr) walkChildNodes(w internalWalkFunc) {
|
||||
e.Tuple = w(e.Tuple).(Expression)
|
||||
w(e.Tuple)
|
||||
}
|
||||
|
||||
func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
|
@ -184,7 +184,7 @@ type TemplateWrapExpr struct {
|
|||
}
|
||||
|
||||
func (e *TemplateWrapExpr) walkChildNodes(w internalWalkFunc) {
|
||||
e.Wrapped = w(e.Wrapped).(Expression)
|
||||
w(e.Wrapped)
|
||||
}
|
||||
|
||||
func (e *TemplateWrapExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
|
|
18
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go
generated
vendored
18
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go
generated
vendored
|
@ -3,6 +3,8 @@ package hclsyntax
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
)
|
||||
|
||||
type navigation struct {
|
||||
|
@ -39,3 +41,19 @@ func (n navigation) ContextString(offset int) string {
|
|||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (n navigation) ContextDefRange(offset int) hcl.Range {
|
||||
var block *Block
|
||||
for _, candidate := range n.root.Blocks {
|
||||
if candidate.Range().ContainsOffset(offset) {
|
||||
block = candidate
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if block == nil {
|
||||
return hcl.Range{}
|
||||
}
|
||||
|
||||
return block.DefRange()
|
||||
}
|
||||
|
|
2
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go
generated
vendored
2
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go
generated
vendored
|
@ -19,4 +19,4 @@ type Node interface {
|
|||
Range() hcl.Range
|
||||
}
|
||||
|
||||
type internalWalkFunc func(Node) Node
|
||||
type internalWalkFunc func(Node)
|
||||
|
|
333
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go
generated
vendored
333
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go
generated
vendored
|
@ -9,7 +9,6 @@ import (
|
|||
"github.com/apparentlymart/go-textseg/textseg"
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/convert"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
|
@ -131,7 +130,7 @@ func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) {
|
|||
|
||||
switch next.Type {
|
||||
case TokenEqual:
|
||||
return p.finishParsingBodyAttribute(ident)
|
||||
return p.finishParsingBodyAttribute(ident, false)
|
||||
case TokenOQuote, TokenOBrace, TokenIdent:
|
||||
return p.finishParsingBodyBlock(ident)
|
||||
default:
|
||||
|
@ -149,7 +148,72 @@ func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (p *parser) finishParsingBodyAttribute(ident Token) (Node, hcl.Diagnostics) {
|
||||
// parseSingleAttrBody is a weird variant of ParseBody that deals with the
|
||||
// body of a nested block containing only one attribute value all on a single
|
||||
// line, like foo { bar = baz } . It expects to find a single attribute item
|
||||
// immediately followed by the end token type with no intervening newlines.
|
||||
func (p *parser) parseSingleAttrBody(end TokenType) (*Body, hcl.Diagnostics) {
|
||||
ident := p.Read()
|
||||
if ident.Type != TokenIdent {
|
||||
p.recoverAfterBodyItem()
|
||||
return nil, hcl.Diagnostics{
|
||||
{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Argument or block definition required",
|
||||
Detail: "An argument or block definition is required here.",
|
||||
Subject: &ident.Range,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var attr *Attribute
|
||||
var diags hcl.Diagnostics
|
||||
|
||||
next := p.Peek()
|
||||
|
||||
switch next.Type {
|
||||
case TokenEqual:
|
||||
node, attrDiags := p.finishParsingBodyAttribute(ident, true)
|
||||
diags = append(diags, attrDiags...)
|
||||
attr = node.(*Attribute)
|
||||
case TokenOQuote, TokenOBrace, TokenIdent:
|
||||
p.recoverAfterBodyItem()
|
||||
return nil, hcl.Diagnostics{
|
||||
{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Argument definition required",
|
||||
Detail: fmt.Sprintf("A single-line block definition can contain only a single argument. If you meant to define argument %q, use an equals sign to assign it a value. To define a nested block, place it on a line of its own within its parent block.", ident.Bytes),
|
||||
Subject: hcl.RangeBetween(ident.Range, next.Range).Ptr(),
|
||||
},
|
||||
}
|
||||
default:
|
||||
p.recoverAfterBodyItem()
|
||||
return nil, hcl.Diagnostics{
|
||||
{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Argument or block definition required",
|
||||
Detail: "An argument or block definition is required here. To set an argument, use the equals sign \"=\" to introduce the argument value.",
|
||||
Subject: &ident.Range,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return &Body{
|
||||
Attributes: Attributes{
|
||||
string(ident.Bytes): attr,
|
||||
},
|
||||
|
||||
SrcRange: attr.SrcRange,
|
||||
EndRange: hcl.Range{
|
||||
Filename: attr.SrcRange.Filename,
|
||||
Start: attr.SrcRange.End,
|
||||
End: attr.SrcRange.End,
|
||||
},
|
||||
}, diags
|
||||
|
||||
}
|
||||
|
||||
func (p *parser) finishParsingBodyAttribute(ident Token, singleLine bool) (Node, hcl.Diagnostics) {
|
||||
eqTok := p.Read() // eat equals token
|
||||
if eqTok.Type != TokenEqual {
|
||||
// should never happen if caller behaves
|
||||
|
@ -166,22 +230,33 @@ func (p *parser) finishParsingBodyAttribute(ident Token) (Node, hcl.Diagnostics)
|
|||
endRange = p.PrevRange()
|
||||
p.recoverAfterBodyItem()
|
||||
} else {
|
||||
end := p.Peek()
|
||||
if end.Type != TokenNewline && end.Type != TokenEOF {
|
||||
if !p.recovery {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Missing newline after argument",
|
||||
Detail: "An argument definition must end with a newline.",
|
||||
Subject: &end.Range,
|
||||
Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(),
|
||||
})
|
||||
endRange = p.PrevRange()
|
||||
if !singleLine {
|
||||
end := p.Peek()
|
||||
if end.Type != TokenNewline && end.Type != TokenEOF {
|
||||
if !p.recovery {
|
||||
summary := "Missing newline after argument"
|
||||
detail := "An argument definition must end with a newline."
|
||||
|
||||
if end.Type == TokenComma {
|
||||
summary = "Unexpected comma after argument"
|
||||
detail = "Argument definitions must be separated by newlines, not commas. " + detail
|
||||
}
|
||||
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: summary,
|
||||
Detail: detail,
|
||||
Subject: &end.Range,
|
||||
Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(),
|
||||
})
|
||||
}
|
||||
endRange = p.PrevRange()
|
||||
p.recoverAfterBodyItem()
|
||||
} else {
|
||||
endRange = p.PrevRange()
|
||||
p.Read() // eat newline
|
||||
}
|
||||
endRange = p.PrevRange()
|
||||
p.recoverAfterBodyItem()
|
||||
} else {
|
||||
endRange = p.PrevRange()
|
||||
p.Read() // eat newline
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -218,19 +293,9 @@ Token:
|
|||
diags = append(diags, labelDiags...)
|
||||
labels = append(labels, label)
|
||||
labelRanges = append(labelRanges, labelRange)
|
||||
if labelDiags.HasErrors() {
|
||||
p.recoverAfterBodyItem()
|
||||
return &Block{
|
||||
Type: blockType,
|
||||
Labels: labels,
|
||||
Body: nil,
|
||||
|
||||
TypeRange: ident.Range,
|
||||
LabelRanges: labelRanges,
|
||||
OpenBraceRange: ident.Range, // placeholder
|
||||
CloseBraceRange: ident.Range, // placeholder
|
||||
}, diags
|
||||
}
|
||||
// parseQuoteStringLiteral recovers up to the closing quote
|
||||
// if it encounters problems, so we can continue looking for
|
||||
// more labels and eventually the block body even.
|
||||
|
||||
case TokenIdent:
|
||||
tok = p.Read() // eat token
|
||||
|
@ -273,7 +338,10 @@ Token:
|
|||
return &Block{
|
||||
Type: blockType,
|
||||
Labels: labels,
|
||||
Body: nil,
|
||||
Body: &Body{
|
||||
SrcRange: ident.Range,
|
||||
EndRange: ident.Range,
|
||||
},
|
||||
|
||||
TypeRange: ident.Range,
|
||||
LabelRanges: labelRanges,
|
||||
|
@ -285,7 +353,51 @@ Token:
|
|||
|
||||
// Once we fall out here, the peeker is pointed just after our opening
|
||||
// brace, so we can begin our nested body parsing.
|
||||
body, bodyDiags := p.ParseBody(TokenCBrace)
|
||||
var body *Body
|
||||
var bodyDiags hcl.Diagnostics
|
||||
switch p.Peek().Type {
|
||||
case TokenNewline, TokenEOF, TokenCBrace:
|
||||
body, bodyDiags = p.ParseBody(TokenCBrace)
|
||||
default:
|
||||
// Special one-line, single-attribute block parsing mode.
|
||||
body, bodyDiags = p.parseSingleAttrBody(TokenCBrace)
|
||||
switch p.Peek().Type {
|
||||
case TokenCBrace:
|
||||
p.Read() // the happy path - just consume the closing brace
|
||||
case TokenComma:
|
||||
// User seems to be trying to use the object-constructor
|
||||
// comma-separated style, which isn't permitted for blocks.
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid single-argument block definition",
|
||||
Detail: "Single-line block syntax can include only one argument definition. To define multiple arguments, use the multi-line block syntax with one argument definition per line.",
|
||||
Subject: p.Peek().Range.Ptr(),
|
||||
})
|
||||
p.recover(TokenCBrace)
|
||||
case TokenNewline:
|
||||
// We don't allow weird mixtures of single and multi-line syntax.
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid single-argument block definition",
|
||||
Detail: "An argument definition on the same line as its containing block creates a single-line block definition, which must also be closed on the same line. Place the block's closing brace immediately after the argument definition.",
|
||||
Subject: p.Peek().Range.Ptr(),
|
||||
})
|
||||
p.recover(TokenCBrace)
|
||||
default:
|
||||
// Some other weird thing is going on. Since we can't guess a likely
|
||||
// user intent for this one, we'll skip it if we're already in
|
||||
// recovery mode.
|
||||
if !p.recovery {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid single-argument block definition",
|
||||
Detail: "A single-line block definition must end with a closing brace immediately after its single argument definition.",
|
||||
Subject: p.Peek().Range.Ptr(),
|
||||
})
|
||||
}
|
||||
p.recover(TokenCBrace)
|
||||
}
|
||||
}
|
||||
diags = append(diags, bodyDiags...)
|
||||
cBraceRange := p.PrevRange()
|
||||
|
||||
|
@ -459,7 +571,14 @@ func (p *parser) parseBinaryOps(ops []map[TokenType]*Operation) (Expression, hcl
|
|||
|
||||
func (p *parser) parseExpressionWithTraversals() (Expression, hcl.Diagnostics) {
|
||||
term, diags := p.parseExpressionTerm()
|
||||
ret := term
|
||||
ret, moreDiags := p.parseExpressionTraversals(term)
|
||||
diags = append(diags, moreDiags...)
|
||||
return ret, diags
|
||||
}
|
||||
|
||||
func (p *parser) parseExpressionTraversals(from Expression) (Expression, hcl.Diagnostics) {
|
||||
var diags hcl.Diagnostics
|
||||
ret := from
|
||||
|
||||
Traversal:
|
||||
for {
|
||||
|
@ -657,44 +776,81 @@ Traversal:
|
|||
// the key value is something constant.
|
||||
|
||||
open := p.Read()
|
||||
// TODO: If we have a TokenStar inside our brackets, parse as
|
||||
// a Splat expression: foo[*].baz[0].
|
||||
var close Token
|
||||
p.PushIncludeNewlines(false) // arbitrary newlines allowed in brackets
|
||||
keyExpr, keyDiags := p.ParseExpression()
|
||||
diags = append(diags, keyDiags...)
|
||||
if p.recovery && keyDiags.HasErrors() {
|
||||
close = p.recover(TokenCBrack)
|
||||
} else {
|
||||
close = p.Read()
|
||||
switch p.Peek().Type {
|
||||
case TokenStar:
|
||||
// This is a full splat expression, like foo[*], which consumes
|
||||
// the rest of the traversal steps after it using a recursive
|
||||
// call to this function.
|
||||
p.Read() // consume star
|
||||
close := p.Read()
|
||||
if close.Type != TokenCBrack && !p.recovery {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Missing close bracket on index",
|
||||
Detail: "The index operator must end with a closing bracket (\"]\").",
|
||||
Summary: "Missing close bracket on splat index",
|
||||
Detail: "The star for a full splat operator must be immediately followed by a closing bracket (\"]\").",
|
||||
Subject: &close.Range,
|
||||
})
|
||||
close = p.recover(TokenCBrack)
|
||||
}
|
||||
}
|
||||
p.PopIncludeNewlines()
|
||||
|
||||
if lit, isLit := keyExpr.(*LiteralValueExpr); isLit {
|
||||
litKey, _ := lit.Value(nil)
|
||||
rng := hcl.RangeBetween(open.Range, close.Range)
|
||||
step := hcl.TraverseIndex{
|
||||
Key: litKey,
|
||||
SrcRange: rng,
|
||||
// Splat expressions use a special "anonymous symbol" as a
|
||||
// placeholder in an expression to be evaluated once for each
|
||||
// item in the source expression.
|
||||
itemExpr := &AnonSymbolExpr{
|
||||
SrcRange: hcl.RangeBetween(open.Range, close.Range),
|
||||
}
|
||||
ret = makeRelativeTraversal(ret, step, rng)
|
||||
} else {
|
||||
rng := hcl.RangeBetween(open.Range, close.Range)
|
||||
ret = &IndexExpr{
|
||||
Collection: ret,
|
||||
Key: keyExpr,
|
||||
// Now we'll recursively call this same function to eat any
|
||||
// remaining traversal steps against the anonymous symbol.
|
||||
travExpr, nestedDiags := p.parseExpressionTraversals(itemExpr)
|
||||
diags = append(diags, nestedDiags...)
|
||||
|
||||
SrcRange: rng,
|
||||
OpenRange: open.Range,
|
||||
ret = &SplatExpr{
|
||||
Source: ret,
|
||||
Each: travExpr,
|
||||
Item: itemExpr,
|
||||
|
||||
SrcRange: hcl.RangeBetween(open.Range, travExpr.Range()),
|
||||
MarkerRange: hcl.RangeBetween(open.Range, close.Range),
|
||||
}
|
||||
|
||||
default:
|
||||
|
||||
var close Token
|
||||
p.PushIncludeNewlines(false) // arbitrary newlines allowed in brackets
|
||||
keyExpr, keyDiags := p.ParseExpression()
|
||||
diags = append(diags, keyDiags...)
|
||||
if p.recovery && keyDiags.HasErrors() {
|
||||
close = p.recover(TokenCBrack)
|
||||
} else {
|
||||
close = p.Read()
|
||||
if close.Type != TokenCBrack && !p.recovery {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Missing close bracket on index",
|
||||
Detail: "The index operator must end with a closing bracket (\"]\").",
|
||||
Subject: &close.Range,
|
||||
})
|
||||
close = p.recover(TokenCBrack)
|
||||
}
|
||||
}
|
||||
p.PopIncludeNewlines()
|
||||
|
||||
if lit, isLit := keyExpr.(*LiteralValueExpr); isLit {
|
||||
litKey, _ := lit.Value(nil)
|
||||
rng := hcl.RangeBetween(open.Range, close.Range)
|
||||
step := hcl.TraverseIndex{
|
||||
Key: litKey,
|
||||
SrcRange: rng,
|
||||
}
|
||||
ret = makeRelativeTraversal(ret, step, rng)
|
||||
} else {
|
||||
rng := hcl.RangeBetween(open.Range, close.Range)
|
||||
ret = &IndexExpr{
|
||||
Collection: ret,
|
||||
Key: keyExpr,
|
||||
|
||||
SrcRange: rng,
|
||||
OpenRange: open.Range,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -813,7 +969,7 @@ func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) {
|
|||
case TokenOQuote, TokenOHeredoc:
|
||||
open := p.Read() // eat opening marker
|
||||
closer := p.oppositeBracket(open.Type)
|
||||
exprs, passthru, _, diags := p.parseTemplateInner(closer)
|
||||
exprs, passthru, _, diags := p.parseTemplateInner(closer, tokenOpensFlushHeredoc(open))
|
||||
|
||||
closeRange := p.PrevRange()
|
||||
|
||||
|
@ -891,11 +1047,10 @@ func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) {
|
|||
}
|
||||
|
||||
func (p *parser) numberLitValue(tok Token) (cty.Value, hcl.Diagnostics) {
|
||||
// We'll lean on the cty converter to do the conversion, to ensure that
|
||||
// the behavior is the same as what would happen if converting a
|
||||
// non-literal string to a number.
|
||||
numStrVal := cty.StringVal(string(tok.Bytes))
|
||||
numVal, err := convert.Convert(numStrVal, cty.Number)
|
||||
// The cty.ParseNumberVal is always the same behavior as converting a
|
||||
// string to a number, ensuring we always interpret decimal numbers in
|
||||
// the same way.
|
||||
numVal, err := cty.ParseNumberVal(string(tok.Bytes))
|
||||
if err != nil {
|
||||
ret := cty.UnknownVal(cty.Number)
|
||||
return ret, hcl.Diagnostics{
|
||||
|
@ -1087,13 +1242,13 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) {
|
|||
panic("parseObjectCons called without peeker pointing to open brace")
|
||||
}
|
||||
|
||||
p.PushIncludeNewlines(true)
|
||||
defer p.PopIncludeNewlines()
|
||||
|
||||
if forKeyword.TokenMatches(p.Peek()) {
|
||||
return p.finishParsingForExpr(open)
|
||||
}
|
||||
|
||||
p.PushIncludeNewlines(true)
|
||||
defer p.PopIncludeNewlines()
|
||||
|
||||
var close Token
|
||||
|
||||
var diags hcl.Diagnostics
|
||||
|
@ -1132,7 +1287,8 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) {
|
|||
next = p.Peek()
|
||||
if next.Type != TokenEqual && next.Type != TokenColon {
|
||||
if !p.recovery {
|
||||
if next.Type == TokenNewline || next.Type == TokenComma {
|
||||
switch next.Type {
|
||||
case TokenNewline, TokenComma:
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Missing attribute value",
|
||||
|
@ -1140,7 +1296,23 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) {
|
|||
Subject: &next.Range,
|
||||
Context: hcl.RangeBetween(open.Range, next.Range).Ptr(),
|
||||
})
|
||||
} else {
|
||||
case TokenIdent:
|
||||
// Although this might just be a plain old missing equals
|
||||
// sign before a reference, one way to get here is to try
|
||||
// to write an attribute name containing a period followed
|
||||
// by a digit, which was valid in HCL1, like this:
|
||||
// foo1.2_bar = "baz"
|
||||
// We can't know exactly what the user intended here, but
|
||||
// we'll augment our message with an extra hint in this case
|
||||
// in case it is helpful.
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Missing key/value separator",
|
||||
Detail: "Expected an equals sign (\"=\") to mark the beginning of the attribute value. If you intended to given an attribute name containing periods or spaces, write the name in quotes to create a string literal.",
|
||||
Subject: &next.Range,
|
||||
Context: hcl.RangeBetween(open.Range, next.Range).Ptr(),
|
||||
})
|
||||
default:
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Missing key/value separator",
|
||||
|
@ -1472,7 +1644,16 @@ Token:
|
|||
Subject: &tok.Range,
|
||||
Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(),
|
||||
})
|
||||
p.recover(TokenTemplateSeqEnd)
|
||||
|
||||
// Now that we're returning an error callers won't attempt to use
|
||||
// the result for any real operations, but they might try to use
|
||||
// the partial AST for other analyses, so we'll leave a marker
|
||||
// to indicate that there was something invalid in the string to
|
||||
// help avoid misinterpretation of the partial result
|
||||
ret.WriteString(which)
|
||||
ret.WriteString("{ ... }")
|
||||
|
||||
p.recover(TokenTemplateSeqEnd) // we'll try to keep parsing after the sequence ends
|
||||
|
||||
case TokenEOF:
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
|
@ -1493,7 +1674,7 @@ Token:
|
|||
Subject: &tok.Range,
|
||||
Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(),
|
||||
})
|
||||
p.recover(TokenOQuote)
|
||||
p.recover(TokenCQuote)
|
||||
break Token
|
||||
|
||||
}
|
||||
|
|
79
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go
generated
vendored
79
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go
generated
vendored
|
@ -2,6 +2,7 @@ package hclsyntax
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/apparentlymart/go-textseg/textseg"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
|
@ -10,11 +11,11 @@ import (
|
|||
)
|
||||
|
||||
func (p *parser) ParseTemplate() (Expression, hcl.Diagnostics) {
|
||||
return p.parseTemplate(TokenEOF)
|
||||
return p.parseTemplate(TokenEOF, false)
|
||||
}
|
||||
|
||||
func (p *parser) parseTemplate(end TokenType) (Expression, hcl.Diagnostics) {
|
||||
exprs, passthru, rng, diags := p.parseTemplateInner(end)
|
||||
func (p *parser) parseTemplate(end TokenType, flushHeredoc bool) (Expression, hcl.Diagnostics) {
|
||||
exprs, passthru, rng, diags := p.parseTemplateInner(end, flushHeredoc)
|
||||
|
||||
if passthru {
|
||||
if len(exprs) != 1 {
|
||||
|
@ -32,8 +33,11 @@ func (p *parser) parseTemplate(end TokenType) (Expression, hcl.Diagnostics) {
|
|||
}, diags
|
||||
}
|
||||
|
||||
func (p *parser) parseTemplateInner(end TokenType) ([]Expression, bool, hcl.Range, hcl.Diagnostics) {
|
||||
func (p *parser) parseTemplateInner(end TokenType, flushHeredoc bool) ([]Expression, bool, hcl.Range, hcl.Diagnostics) {
|
||||
parts, diags := p.parseTemplateParts(end)
|
||||
if flushHeredoc {
|
||||
flushHeredocTemplateParts(parts) // Trim off leading spaces on lines per the flush heredoc spec
|
||||
}
|
||||
tp := templateParser{
|
||||
Tokens: parts.Tokens,
|
||||
SrcRange: parts.SrcRange,
|
||||
|
@ -649,6 +653,73 @@ Token:
|
|||
return ret, diags
|
||||
}
|
||||
|
||||
// flushHeredocTemplateParts modifies in-place the line-leading literal strings
|
||||
// to apply the flush heredoc processing rule: find the line with the smallest
|
||||
// number of whitespace characters as prefix and then trim that number of
|
||||
// characters from all of the lines.
|
||||
//
|
||||
// This rule is applied to static tokens rather than to the rendered result,
|
||||
// so interpolating a string with leading whitespace cannot affect the chosen
|
||||
// prefix length.
|
||||
func flushHeredocTemplateParts(parts *templateParts) {
|
||||
if len(parts.Tokens) == 0 {
|
||||
// Nothing to do
|
||||
return
|
||||
}
|
||||
|
||||
const maxInt = int((^uint(0)) >> 1)
|
||||
|
||||
minSpaces := maxInt
|
||||
newline := true
|
||||
var adjust []*templateLiteralToken
|
||||
for _, ttok := range parts.Tokens {
|
||||
if newline {
|
||||
newline = false
|
||||
var spaces int
|
||||
if lit, ok := ttok.(*templateLiteralToken); ok {
|
||||
orig := lit.Val
|
||||
trimmed := strings.TrimLeftFunc(orig, unicode.IsSpace)
|
||||
// If a token is entirely spaces and ends with a newline
|
||||
// then it's a "blank line" and thus not considered for
|
||||
// space-prefix-counting purposes.
|
||||
if len(trimmed) == 0 && strings.HasSuffix(orig, "\n") {
|
||||
spaces = maxInt
|
||||
} else {
|
||||
spaceBytes := len(lit.Val) - len(trimmed)
|
||||
spaces, _ = textseg.TokenCount([]byte(orig[:spaceBytes]), textseg.ScanGraphemeClusters)
|
||||
adjust = append(adjust, lit)
|
||||
}
|
||||
} else if _, ok := ttok.(*templateEndToken); ok {
|
||||
break // don't process the end token since it never has spaces before it
|
||||
}
|
||||
if spaces < minSpaces {
|
||||
minSpaces = spaces
|
||||
}
|
||||
}
|
||||
if lit, ok := ttok.(*templateLiteralToken); ok {
|
||||
if strings.HasSuffix(lit.Val, "\n") {
|
||||
newline = true // The following token, if any, begins a new line
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, lit := range adjust {
|
||||
// Since we want to count space _characters_ rather than space _bytes_,
|
||||
// we can't just do a straightforward slice operation here and instead
|
||||
// need to hunt for the split point with a scanner.
|
||||
valBytes := []byte(lit.Val)
|
||||
spaceByteCount := 0
|
||||
for i := 0; i < minSpaces; i++ {
|
||||
adv, _, _ := textseg.ScanGraphemeClusters(valBytes, true)
|
||||
spaceByteCount += adv
|
||||
valBytes = valBytes[adv:]
|
||||
}
|
||||
lit.Val = lit.Val[spaceByteCount:]
|
||||
lit.SrcRange.Start.Column += minSpaces
|
||||
lit.SrcRange.Start.Byte += spaceByteCount
|
||||
}
|
||||
}
|
||||
|
||||
type templateParts struct {
|
||||
Tokens []templateToken
|
||||
SrcRange hcl.Range
|
||||
|
|
24
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go
generated
vendored
24
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go
generated
vendored
|
@ -1,10 +1,10 @@
|
|||
// line 1 "scan_string_lit.rl"
|
||||
//line scan_string_lit.rl:1
|
||||
|
||||
package hclsyntax
|
||||
|
||||
// This file is generated from scan_string_lit.rl. DO NOT EDIT.
|
||||
|
||||
// line 9 "scan_string_lit.go"
|
||||
//line scan_string_lit.go:9
|
||||
var _hclstrtok_actions []byte = []byte{
|
||||
0, 1, 0, 1, 1, 2, 1, 0,
|
||||
}
|
||||
|
@ -114,12 +114,12 @@ const hclstrtok_error int = 0
|
|||
const hclstrtok_en_quoted int = 10
|
||||
const hclstrtok_en_unquoted int = 4
|
||||
|
||||
// line 10 "scan_string_lit.rl"
|
||||
//line scan_string_lit.rl:10
|
||||
|
||||
func scanStringLit(data []byte, quoted bool) [][]byte {
|
||||
var ret [][]byte
|
||||
|
||||
// line 61 "scan_string_lit.rl"
|
||||
//line scan_string_lit.rl:61
|
||||
|
||||
// Ragel state
|
||||
p := 0 // "Pointer" into data
|
||||
|
@ -144,11 +144,11 @@ func scanStringLit(data []byte, quoted bool) [][]byte {
|
|||
ret = append(ret, data[ts:te])
|
||||
}*/
|
||||
|
||||
// line 154 "scan_string_lit.go"
|
||||
//line scan_string_lit.go:154
|
||||
{
|
||||
}
|
||||
|
||||
// line 158 "scan_string_lit.go"
|
||||
//line scan_string_lit.go:158
|
||||
{
|
||||
var _klen int
|
||||
var _trans int
|
||||
|
@ -229,7 +229,7 @@ func scanStringLit(data []byte, quoted bool) [][]byte {
|
|||
_acts++
|
||||
switch _hclstrtok_actions[_acts-1] {
|
||||
case 0:
|
||||
// line 40 "scan_string_lit.rl"
|
||||
//line scan_string_lit.rl:40
|
||||
|
||||
// If te is behind p then we've skipped over some literal
|
||||
// characters which we must now return.
|
||||
|
@ -239,12 +239,12 @@ func scanStringLit(data []byte, quoted bool) [][]byte {
|
|||
ts = p
|
||||
|
||||
case 1:
|
||||
// line 48 "scan_string_lit.rl"
|
||||
//line scan_string_lit.rl:48
|
||||
|
||||
te = p
|
||||
ret = append(ret, data[ts:te])
|
||||
|
||||
// line 255 "scan_string_lit.go"
|
||||
//line scan_string_lit.go:253
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -267,12 +267,12 @@ func scanStringLit(data []byte, quoted bool) [][]byte {
|
|||
__acts++
|
||||
switch _hclstrtok_actions[__acts-1] {
|
||||
case 1:
|
||||
// line 48 "scan_string_lit.rl"
|
||||
//line scan_string_lit.rl:48
|
||||
|
||||
te = p
|
||||
ret = append(ret, data[ts:te])
|
||||
|
||||
// line 281 "scan_string_lit.go"
|
||||
//line scan_string_lit.go:278
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -282,7 +282,7 @@ func scanStringLit(data []byte, quoted bool) [][]byte {
|
|||
}
|
||||
}
|
||||
|
||||
// line 89 "scan_string_lit.rl"
|
||||
//line scan_string_lit.rl:89
|
||||
|
||||
if te < p {
|
||||
// Collect any leftover literal characters at the end of the input
|
||||
|
|
3783
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go
generated
vendored
3783
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go
generated
vendored
File diff suppressed because it is too large
Load diff
17
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl
generated
vendored
17
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl
generated
vendored
|
@ -9,17 +9,22 @@ import (
|
|||
|
||||
// This file is generated from scan_tokens.rl. DO NOT EDIT.
|
||||
%%{
|
||||
# (except you are actually in scan_tokens.rl here, so edit away!)
|
||||
# (except when you are actually in scan_tokens.rl here, so edit away!)
|
||||
|
||||
machine hcltok;
|
||||
write data;
|
||||
}%%
|
||||
|
||||
func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token {
|
||||
stripData := stripUTF8BOM(data)
|
||||
start.Byte += len(data) - len(stripData)
|
||||
data = stripData
|
||||
|
||||
f := &tokenAccum{
|
||||
Filename: filename,
|
||||
Bytes: data,
|
||||
Pos: start,
|
||||
Filename: filename,
|
||||
Bytes: data,
|
||||
Pos: start,
|
||||
StartByte: start.Byte,
|
||||
}
|
||||
|
||||
%%{
|
||||
|
@ -39,7 +44,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
|
|||
Ident = (ID_Start | '_') (ID_Continue | '-')*;
|
||||
|
||||
# Symbols that just represent themselves are handled as a single rule.
|
||||
SelfToken = "[" | "]" | "(" | ")" | "." | "," | "*" | "/" | "%" | "+" | "-" | "=" | "<" | ">" | "!" | "?" | ":" | "\n" | "&" | "|" | "~" | "^" | ";" | "`";
|
||||
SelfToken = "[" | "]" | "(" | ")" | "." | "," | "*" | "/" | "%" | "+" | "-" | "=" | "<" | ">" | "!" | "?" | ":" | "\n" | "&" | "|" | "~" | "^" | ";" | "`" | "'";
|
||||
|
||||
EqualOp = "==";
|
||||
NotEqual = "!=";
|
||||
|
@ -60,7 +65,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
|
|||
Comment = (
|
||||
("#" (any - EndOfLine)* EndOfLine) |
|
||||
("//" (any - EndOfLine)* EndOfLine) |
|
||||
("/*" any* "*/")
|
||||
("/*" any* :>> "*/")
|
||||
);
|
||||
|
||||
# Note: hclwrite assumes that only ASCII spaces appear between tokens,
|
||||
|
|
137
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md
generated
vendored
137
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md
generated
vendored
|
@ -9,13 +9,13 @@ generation of configuration.
|
|||
|
||||
The language consists of three integrated sub-languages:
|
||||
|
||||
* The _structural_ language defines the overall hierarchical configuration
|
||||
- The _structural_ language defines the overall hierarchical configuration
|
||||
structure, and is a serialization of HCL bodies, blocks and attributes.
|
||||
|
||||
* The _expression_ language is used to express attribute values, either as
|
||||
- The _expression_ language is used to express attribute values, either as
|
||||
literals or as derivations of other values.
|
||||
|
||||
* The _template_ language is used to compose values together into strings,
|
||||
- The _template_ language is used to compose values together into strings,
|
||||
as one of several types of expression in the expression language.
|
||||
|
||||
In normal use these three sub-languages are used together within configuration
|
||||
|
@ -30,19 +30,19 @@ Within this specification a semi-formal notation is used to illustrate the
|
|||
details of syntax. This notation is intended for human consumption rather
|
||||
than machine consumption, with the following conventions:
|
||||
|
||||
* A naked name starting with an uppercase letter is a global production,
|
||||
- A naked name starting with an uppercase letter is a global production,
|
||||
common to all of the syntax specifications in this document.
|
||||
* A naked name starting with a lowercase letter is a local production,
|
||||
- A naked name starting with a lowercase letter is a local production,
|
||||
meaningful only within the specification where it is defined.
|
||||
* Double and single quotes (`"` and `'`) are used to mark literal character
|
||||
- Double and single quotes (`"` and `'`) are used to mark literal character
|
||||
sequences, which may be either punctuation markers or keywords.
|
||||
* The default operator for combining items, which has no punctuation,
|
||||
- The default operator for combining items, which has no punctuation,
|
||||
is concatenation.
|
||||
* The symbol `|` indicates that any one of its left and right operands may
|
||||
- The symbol `|` indicates that any one of its left and right operands may
|
||||
be present.
|
||||
* The `*` symbol indicates zero or more repetitions of the item to its left.
|
||||
* The `?` symbol indicates zero or one of the item to its left.
|
||||
* Parentheses (`(` and `)`) are used to group items together to apply
|
||||
- The `*` symbol indicates zero or more repetitions of the item to its left.
|
||||
- The `?` symbol indicates zero or one of the item to its left.
|
||||
- Parentheses (`(` and `)`) are used to group items together to apply
|
||||
the `|`, `*` and `?` operators to them collectively.
|
||||
|
||||
The grammar notation does not fully describe the language. The prose may
|
||||
|
@ -77,11 +77,11 @@ are not valid within HCL native syntax.
|
|||
|
||||
Comments serve as program documentation and come in two forms:
|
||||
|
||||
* _Line comments_ start with either the `//` or `#` sequences and end with
|
||||
- _Line comments_ start with either the `//` or `#` sequences and end with
|
||||
the next newline sequence. A line comments is considered equivalent to a
|
||||
newline sequence.
|
||||
|
||||
* _Inline comments_ start with the `/*` sequence and end with the `*/`
|
||||
- _Inline comments_ start with the `/*` sequence and end with the `*/`
|
||||
sequence, and may have any characters within except the ending sequence.
|
||||
An inline comments is considered equivalent to a whitespace sequence.
|
||||
|
||||
|
@ -91,7 +91,7 @@ template literals except inside an interpolation sequence or template directive.
|
|||
### Identifiers
|
||||
|
||||
Identifiers name entities such as blocks, attributes and expression variables.
|
||||
Identifiers are interpreted as per [UAX #31][UAX31] Section 2. Specifically,
|
||||
Identifiers are interpreted as per [UAX #31][uax31] Section 2. Specifically,
|
||||
their syntax is defined in terms of the `ID_Start` and `ID_Continue`
|
||||
character properties as follows:
|
||||
|
||||
|
@ -109,7 +109,7 @@ that is not part of the unicode `ID_Continue` definition. This is to allow
|
|||
attribute names and block type names to contain dashes, although underscores
|
||||
as word separators are considered the idiomatic usage.
|
||||
|
||||
[UAX31]: http://unicode.org/reports/tr31/ "Unicode Identifier and Pattern Syntax"
|
||||
[uax31]: http://unicode.org/reports/tr31/ "Unicode Identifier and Pattern Syntax"
|
||||
|
||||
### Keywords
|
||||
|
||||
|
@ -150,18 +150,19 @@ expmark = ('e' | 'E') ("+" | "-")?;
|
|||
The structural language consists of syntax representing the following
|
||||
constructs:
|
||||
|
||||
* _Attributes_, which assign a value to a specified name.
|
||||
* _Blocks_, which create a child body annotated by a type and optional labels.
|
||||
* _Body Content_, which consists of a collection of attributes and blocks.
|
||||
- _Attributes_, which assign a value to a specified name.
|
||||
- _Blocks_, which create a child body annotated by a type and optional labels.
|
||||
- _Body Content_, which consists of a collection of attributes and blocks.
|
||||
|
||||
These constructs correspond to the similarly-named concepts in the
|
||||
language-agnostic HCL information model.
|
||||
|
||||
```ebnf
|
||||
ConfigFile = Body;
|
||||
Body = (Attribute | Block)*;
|
||||
Attribute = Identifier "=" Expression Newline;
|
||||
Block = Identifier (StringLit|Identifier)* "{" Newline Body "}" Newline;
|
||||
ConfigFile = Body;
|
||||
Body = (Attribute | Block | OneLineBlock)*;
|
||||
Attribute = Identifier "=" Expression Newline;
|
||||
Block = Identifier (StringLit|Identifier)* "{" Newline Body "}" Newline;
|
||||
OneLineBlock = Identifier (StringLit|Identifier)* "{" (Identifier "=" Expression)? "}" Newline;
|
||||
```
|
||||
|
||||
### Configuration Files
|
||||
|
@ -252,9 +253,9 @@ LiteralValue = (
|
|||
);
|
||||
```
|
||||
|
||||
* Numeric literals represent values of type _number_.
|
||||
* The `true` and `false` keywords represent values of type _bool_.
|
||||
* The `null` keyword represents a null value of the dynamic pseudo-type.
|
||||
- Numeric literals represent values of type _number_.
|
||||
- The `true` and `false` keywords represent values of type _bool_.
|
||||
- The `null` keyword represents a null value of the dynamic pseudo-type.
|
||||
|
||||
String literals are not directly available in the expression sub-language, but
|
||||
are available via the template sub-language, which can in turn be incorporated
|
||||
|
@ -285,8 +286,8 @@ When specifying an object element, an identifier is interpreted as a literal
|
|||
attribute name as opposed to a variable reference. To populate an item key
|
||||
from a variable, use parentheses to disambiguate:
|
||||
|
||||
* `{foo = "baz"}` is interpreted as an attribute literally named `foo`.
|
||||
* `{(foo) = "baz"}` is interpreted as an attribute whose name is taken
|
||||
- `{foo = "baz"}` is interpreted as an attribute literally named `foo`.
|
||||
- `{(foo) = "baz"}` is interpreted as an attribute whose name is taken
|
||||
from the variable named `foo`.
|
||||
|
||||
Between the open and closing delimiters of these sequences, newline sequences
|
||||
|
@ -298,12 +299,12 @@ _for expression_ interpretation has priority, so to produce a tuple whose
|
|||
first element is the value of a variable named `for`, or an object with a
|
||||
key named `for`, use parentheses to disambiguate:
|
||||
|
||||
* `[for, foo, baz]` is a syntax error.
|
||||
* `[(for), foo, baz]` is a tuple whose first element is the value of variable
|
||||
- `[for, foo, baz]` is a syntax error.
|
||||
- `[(for), foo, baz]` is a tuple whose first element is the value of variable
|
||||
`for`.
|
||||
* `{for: 1, baz: 2}` is a syntax error.
|
||||
* `{(for): 1, baz: 2}` is an object with an attribute literally named `for`.
|
||||
* `{baz: 2, for: 1}` is equivalent to the previous example, and resolves the
|
||||
- `{for: 1, baz: 2}` is a syntax error.
|
||||
- `{(for): 1, baz: 2}` is an object with an attribute literally named `for`.
|
||||
- `{baz: 2, for: 1}` is equivalent to the previous example, and resolves the
|
||||
ambiguity by reordering.
|
||||
|
||||
### Template Expressions
|
||||
|
@ -311,9 +312,9 @@ key named `for`, use parentheses to disambiguate:
|
|||
A _template expression_ embeds a program written in the template sub-language
|
||||
as an expression. Template expressions come in two forms:
|
||||
|
||||
* A _quoted_ template expression is delimited by quote characters (`"`) and
|
||||
- A _quoted_ template expression is delimited by quote characters (`"`) and
|
||||
defines a template as a single-line expression with escape characters.
|
||||
* A _heredoc_ template expression is introduced by a `<<` sequence and
|
||||
- A _heredoc_ template expression is introduced by a `<<` sequence and
|
||||
defines a template via a multi-line sequence terminated by a user-chosen
|
||||
delimiter.
|
||||
|
||||
|
@ -321,7 +322,7 @@ In both cases the template interpolation and directive syntax is available for
|
|||
use within the delimiters, and any text outside of these special sequences is
|
||||
interpreted as a literal string.
|
||||
|
||||
In _quoted_ template expressions any literal string sequences within the
|
||||
In _quoted_ template expressions any literal string sequences within the
|
||||
template behave in a special way: literal newline sequences are not permitted
|
||||
and instead _escape sequences_ can be included, starting with the
|
||||
backslash `\`:
|
||||
|
@ -457,14 +458,14 @@ are provided, the first is the key and the second is the value.
|
|||
Tuple, object, list, map, and set types are iterable. The type of collection
|
||||
used defines how the key and value variables are populated:
|
||||
|
||||
* For tuple and list types, the _key_ is the zero-based index into the
|
||||
- For tuple and list types, the _key_ is the zero-based index into the
|
||||
sequence for each element, and the _value_ is the element value. The
|
||||
elements are visited in index order.
|
||||
* For object and map types, the _key_ is the string attribute name or element
|
||||
- For object and map types, the _key_ is the string attribute name or element
|
||||
key, and the _value_ is the attribute or element value. The elements are
|
||||
visited in the order defined by a lexicographic sort of the attribute names
|
||||
or keys.
|
||||
* For set types, the _key_ and _value_ are both the element value. The elements
|
||||
- For set types, the _key_ and _value_ are both the element value. The elements
|
||||
are visited in an undefined but consistent order.
|
||||
|
||||
The expression after the colon and (in the case of object `for`) the expression
|
||||
|
@ -486,12 +487,12 @@ immediately after the value expression, this activates the grouping mode in
|
|||
which each value in the resulting object is a _tuple_ of all of the values
|
||||
that were produced against each distinct key.
|
||||
|
||||
* `[for v in ["a", "b"]: v]` returns `["a", "b"]`.
|
||||
* `[for i, v in ["a", "b"]: i]` returns `[0, 1]`.
|
||||
* `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`.
|
||||
* `{for i, v in ["a", "a", "b"]: k => v}` produces an error, because attribute
|
||||
- `[for v in ["a", "b"]: v]` returns `["a", "b"]`.
|
||||
- `[for i, v in ["a", "b"]: i]` returns `[0, 1]`.
|
||||
- `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`.
|
||||
- `{for i, v in ["a", "a", "b"]: k => v}` produces an error, because attribute
|
||||
`a` is defined twice.
|
||||
* `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`.
|
||||
- `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`.
|
||||
|
||||
If the `if` keyword is used after the element expression(s), it applies an
|
||||
additional predicate that can be used to conditionally filter elements from
|
||||
|
@ -501,7 +502,7 @@ element expression(s). It must evaluate to a boolean value; if `true`, the
|
|||
element will be evaluated as normal, while if `false` the element will be
|
||||
skipped.
|
||||
|
||||
* `[for i, v in ["a", "b", "c"]: v if i < 2]` returns `["a", "b"]`.
|
||||
- `[for i, v in ["a", "b", "c"]: v if i < 2]` returns `["a", "b"]`.
|
||||
|
||||
If the collection value, element expression(s) or condition expression return
|
||||
unknown values that are otherwise type-valid, the result is a value of the
|
||||
|
@ -566,10 +567,10 @@ elements in a tuple, list, or set value.
|
|||
|
||||
There are two kinds of "splat" operator:
|
||||
|
||||
* The _attribute-only_ splat operator supports only attribute lookups into
|
||||
- The _attribute-only_ splat operator supports only attribute lookups into
|
||||
the elements from a list, but supports an arbitrary number of them.
|
||||
|
||||
* The _full_ splat operator additionally supports indexing into the elements
|
||||
- The _full_ splat operator additionally supports indexing into the elements
|
||||
from a list, and allows any combination of attribute access and index
|
||||
operations.
|
||||
|
||||
|
@ -582,9 +583,9 @@ fullSplat = "[" "*" "]" (GetAttr | Index)*;
|
|||
The splat operators can be thought of as shorthands for common operations that
|
||||
could otherwise be performed using _for expressions_:
|
||||
|
||||
* `tuple.*.foo.bar[0]` is approximately equivalent to
|
||||
- `tuple.*.foo.bar[0]` is approximately equivalent to
|
||||
`[for v in tuple: v.foo.bar][0]`.
|
||||
* `tuple[*].foo.bar[0]` is approximately equivalent to
|
||||
- `tuple[*].foo.bar[0]` is approximately equivalent to
|
||||
`[for v in tuple: v.foo.bar[0]]`
|
||||
|
||||
Note the difference in how the trailing index operator is interpreted in
|
||||
|
@ -596,13 +597,15 @@ _for expressions_ shown above: if a splat operator is applied to a value that
|
|||
is _not_ of tuple, list, or set type, the value is coerced automatically into
|
||||
a single-value list of the value type:
|
||||
|
||||
* `any_object.*.id` is equivalent to `[any_object.id]`, assuming that `any_object`
|
||||
- `any_object.*.id` is equivalent to `[any_object.id]`, assuming that `any_object`
|
||||
is a single object.
|
||||
* `any_number.*` is equivalent to `[any_number]`, assuming that `any_number`
|
||||
- `any_number.*` is equivalent to `[any_number]`, assuming that `any_number`
|
||||
is a single number.
|
||||
|
||||
If the left operand of a splat operator is an unknown value of any type, the
|
||||
result is a value of the dynamic pseudo-type.
|
||||
If applied to a null value that is not tuple, list, or set, the result is always
|
||||
an empty tuple, which allows conveniently converting a possibly-null scalar
|
||||
value into a tuple of zero or one elements. It is illegal to apply a splat
|
||||
operator to a null value of tuple, list, or set type.
|
||||
|
||||
### Operations
|
||||
|
||||
|
@ -683,7 +686,7 @@ Arithmetic operations are considered to be performed in an arbitrary-precision
|
|||
number space.
|
||||
|
||||
If either operand of an arithmetic operator is an unknown number or a value
|
||||
of the dynamic pseudo-type, the result is an unknown number.
|
||||
of the dynamic pseudo-type, the result is an unknown number.
|
||||
|
||||
### Logic Operators
|
||||
|
||||
|
@ -708,7 +711,7 @@ the outcome of a boolean expression.
|
|||
Conditional = Expression "?" Expression ":" Expression;
|
||||
```
|
||||
|
||||
The first expression is the _predicate_, which is evaluated and must produce
|
||||
The first expression is the _predicate_, which is evaluated and must produce
|
||||
a boolean result. If the predicate value is `true`, the result of the second
|
||||
expression is the result of the conditional. If the predicate value is
|
||||
`false`, the result of the third expression is the result of the conditional.
|
||||
|
@ -776,8 +779,8 @@ interpolations or directives that are adjacent to it. A strip marker is
|
|||
a tilde (`~`) placed immediately after the opening `{` or before the closing
|
||||
`}` of a template sequence:
|
||||
|
||||
* `hello ${~ "world" }` produces `"helloworld"`.
|
||||
* `%{ if true ~} hello %{~ endif }` produces `"hello"`.
|
||||
- `hello ${~ "world" }` produces `"helloworld"`.
|
||||
- `%{ if true ~} hello %{~ endif }` produces `"hello"`.
|
||||
|
||||
When a strip marker is present, any spaces adjacent to it in the corresponding
|
||||
string literal (if any) are removed before producing the final value. Space
|
||||
|
@ -786,7 +789,7 @@ characters are interpreted as per Unicode's definition.
|
|||
Stripping is done at syntax level rather than value level. Values returned
|
||||
by interpolations or directives are not subject to stripping:
|
||||
|
||||
* `${"hello" ~}${" world"}` produces `"hello world"`, and not `"helloworld"`,
|
||||
- `${"hello" ~}${" world"}` produces `"hello world"`, and not `"helloworld"`,
|
||||
because the space is not in a template literal directly adjacent to the
|
||||
strip marker.
|
||||
|
||||
|
@ -824,9 +827,9 @@ TemplateIf = (
|
|||
The evaluation of the `if` directive is equivalent to the conditional
|
||||
expression, with the following exceptions:
|
||||
|
||||
* The two sub-templates always produce strings, and thus the result value is
|
||||
- The two sub-templates always produce strings, and thus the result value is
|
||||
also always a string.
|
||||
* The `else` clause may be omitted, in which case the conditional's third
|
||||
- The `else` clause may be omitted, in which case the conditional's third
|
||||
expression result is implied to be the empty string.
|
||||
|
||||
### Template For Directive
|
||||
|
@ -846,9 +849,9 @@ TemplateFor = (
|
|||
The evaluation of the `for` directive is equivalent to the _for expression_
|
||||
when producing a tuple, with the following exceptions:
|
||||
|
||||
* The sub-template always produces a string.
|
||||
* There is no equivalent of the "if" clause on the for expression.
|
||||
* The elements of the resulting tuple are all converted to strings and
|
||||
- The sub-template always produces a string.
|
||||
- There is no equivalent of the "if" clause on the for expression.
|
||||
- The elements of the resulting tuple are all converted to strings and
|
||||
concatenated to produce a flat string result.
|
||||
|
||||
### Template Interpolation Unwrapping
|
||||
|
@ -864,13 +867,13 @@ template or expression syntax. Unwrapping allows arbitrary expressions to be
|
|||
used to populate attributes when strings in such languages are interpreted
|
||||
as templates.
|
||||
|
||||
* `${true}` produces the boolean value `true`
|
||||
* `${"${true}"}` produces the boolean value `true`, because both the inner
|
||||
- `${true}` produces the boolean value `true`
|
||||
- `${"${true}"}` produces the boolean value `true`, because both the inner
|
||||
and outer interpolations are subject to unwrapping.
|
||||
* `hello ${true}` produces the string `"hello true"`
|
||||
* `${""}${true}` produces the string `"true"` because there are two
|
||||
- `hello ${true}` produces the string `"hello true"`
|
||||
- `${""}${true}` produces the string `"true"` because there are two
|
||||
interpolation sequences, even though one produces an empty result.
|
||||
* `%{ for v in [true] }${v}%{ endif }` produces the string `true` because
|
||||
- `%{ for v in [true] }${v}%{ endif }` produces the string `true` because
|
||||
the presence of the `for` directive circumvents the unwrapping even though
|
||||
the final result is a single value.
|
||||
|
||||
|
|
34
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go
generated
vendored
34
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go
generated
vendored
|
@ -47,8 +47,8 @@ type Body struct {
|
|||
var assertBodyImplBody hcl.Body = &Body{}
|
||||
|
||||
func (b *Body) walkChildNodes(w internalWalkFunc) {
|
||||
b.Attributes = w(b.Attributes).(Attributes)
|
||||
b.Blocks = w(b.Blocks).(Blocks)
|
||||
w(b.Attributes)
|
||||
w(b.Blocks)
|
||||
}
|
||||
|
||||
func (b *Body) Range() hcl.Range {
|
||||
|
@ -86,8 +86,8 @@ func (b *Body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostic
|
|||
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unsupported attribute",
|
||||
Detail: fmt.Sprintf("An attribute named %q is not expected here.%s", name, suggestion),
|
||||
Summary: "Unsupported argument",
|
||||
Detail: fmt.Sprintf("An argument named %q is not expected here.%s", name, suggestion),
|
||||
Subject: &attr.NameRange,
|
||||
})
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ func (b *Body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostic
|
|||
// Is there an attribute of the same name?
|
||||
for _, attrS := range schema.Attributes {
|
||||
if attrS.Name == blockTy {
|
||||
suggestion = fmt.Sprintf(" Did you mean to define attribute %q?", blockTy)
|
||||
suggestion = fmt.Sprintf(" Did you mean to define argument %q? If so, use the equals sign to assign it a value.", blockTy)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -151,8 +151,8 @@ func (b *Body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Bod
|
|||
if attrS.Required {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Missing required attribute",
|
||||
Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name),
|
||||
Summary: "Missing required argument",
|
||||
Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", attrS.Name),
|
||||
Subject: b.MissingItemRange().Ptr(),
|
||||
})
|
||||
}
|
||||
|
@ -255,9 +255,9 @@ func (b *Body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
|
|||
example := b.Blocks[0]
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Unexpected %s block", example.Type),
|
||||
Summary: fmt.Sprintf("Unexpected %q block", example.Type),
|
||||
Detail: "Blocks are not allowed here.",
|
||||
Context: &example.TypeRange,
|
||||
Subject: &example.TypeRange,
|
||||
})
|
||||
// we will continue processing anyway, and return the attributes
|
||||
// we are able to find so that certain analyses can still be done
|
||||
|
@ -286,8 +286,8 @@ func (b *Body) MissingItemRange() hcl.Range {
|
|||
type Attributes map[string]*Attribute
|
||||
|
||||
func (a Attributes) walkChildNodes(w internalWalkFunc) {
|
||||
for k, attr := range a {
|
||||
a[k] = w(attr).(*Attribute)
|
||||
for _, attr := range a {
|
||||
w(attr)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -321,7 +321,7 @@ type Attribute struct {
|
|||
}
|
||||
|
||||
func (a *Attribute) walkChildNodes(w internalWalkFunc) {
|
||||
a.Expr = w(a.Expr).(Expression)
|
||||
w(a.Expr)
|
||||
}
|
||||
|
||||
func (a *Attribute) Range() hcl.Range {
|
||||
|
@ -346,8 +346,8 @@ func (a *Attribute) AsHCLAttribute() *hcl.Attribute {
|
|||
type Blocks []*Block
|
||||
|
||||
func (bs Blocks) walkChildNodes(w internalWalkFunc) {
|
||||
for i, block := range bs {
|
||||
bs[i] = w(block).(*Block)
|
||||
for _, block := range bs {
|
||||
w(block)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -378,9 +378,13 @@ type Block struct {
|
|||
}
|
||||
|
||||
func (b *Block) walkChildNodes(w internalWalkFunc) {
|
||||
b.Body = w(b.Body).(*Body)
|
||||
w(b.Body)
|
||||
}
|
||||
|
||||
func (b *Block) Range() hcl.Range {
|
||||
return hcl.RangeBetween(b.TypeRange, b.CloseBraceRange)
|
||||
}
|
||||
|
||||
func (b *Block) DefRange() hcl.Range {
|
||||
return hcl.RangeBetween(b.TypeRange, b.OpenBraceRange)
|
||||
}
|
||||
|
|
62
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go
generated
vendored
62
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go
generated
vendored
|
@ -1,6 +1,7 @@
|
|||
package hclsyntax
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/apparentlymart/go-textseg/textseg"
|
||||
|
@ -89,6 +90,7 @@ const (
|
|||
TokenBitwiseNot TokenType = '~'
|
||||
TokenBitwiseXor TokenType = '^'
|
||||
TokenStarStar TokenType = '➚'
|
||||
TokenApostrophe TokenType = '\''
|
||||
TokenBacktick TokenType = '`'
|
||||
TokenSemicolon TokenType = ';'
|
||||
TokenTabs TokenType = '␉'
|
||||
|
@ -114,10 +116,11 @@ const (
|
|||
)
|
||||
|
||||
type tokenAccum struct {
|
||||
Filename string
|
||||
Bytes []byte
|
||||
Pos hcl.Pos
|
||||
Tokens []Token
|
||||
Filename string
|
||||
Bytes []byte
|
||||
Pos hcl.Pos
|
||||
Tokens []Token
|
||||
StartByte int
|
||||
}
|
||||
|
||||
func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) {
|
||||
|
@ -125,11 +128,11 @@ func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) {
|
|||
// the start pos to get our end pos.
|
||||
|
||||
start := f.Pos
|
||||
start.Column += startOfs - f.Pos.Byte // Safe because only ASCII spaces can be in the offset
|
||||
start.Byte = startOfs
|
||||
start.Column += startOfs + f.StartByte - f.Pos.Byte // Safe because only ASCII spaces can be in the offset
|
||||
start.Byte = startOfs + f.StartByte
|
||||
|
||||
end := start
|
||||
end.Byte = endOfs
|
||||
end.Byte = endOfs + f.StartByte
|
||||
b := f.Bytes[startOfs:endOfs]
|
||||
for len(b) > 0 {
|
||||
advance, seq, _ := textseg.ScanGraphemeClusters(b, true)
|
||||
|
@ -160,6 +163,13 @@ type heredocInProgress struct {
|
|||
StartOfLine bool
|
||||
}
|
||||
|
||||
func tokenOpensFlushHeredoc(tok Token) bool {
|
||||
if tok.Type != TokenOHeredoc {
|
||||
return false
|
||||
}
|
||||
return bytes.HasPrefix(tok.Bytes, []byte{'<', '<', '-'})
|
||||
}
|
||||
|
||||
// checkInvalidTokens does a simple pass across the given tokens and generates
|
||||
// diagnostics for tokens that should _never_ appear in HCL source. This
|
||||
// is intended to avoid the need for the parser to have special support
|
||||
|
@ -174,11 +184,15 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics {
|
|||
toldBitwise := 0
|
||||
toldExponent := 0
|
||||
toldBacktick := 0
|
||||
toldApostrophe := 0
|
||||
toldSemicolon := 0
|
||||
toldTabs := 0
|
||||
toldBadUTF8 := 0
|
||||
|
||||
for _, tok := range tokens {
|
||||
// copy token so it's safe to point to it
|
||||
tok := tok
|
||||
|
||||
switch tok.Type {
|
||||
case TokenBitwiseAnd, TokenBitwiseOr, TokenBitwiseXor, TokenBitwiseNot:
|
||||
if toldBitwise < 4 {
|
||||
|
@ -214,16 +228,30 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics {
|
|||
case TokenBacktick:
|
||||
// Only report for alternating (even) backticks, so we won't report both start and ends of the same
|
||||
// backtick-quoted string.
|
||||
if toldExponent < 4 && (toldExponent%2) == 0 {
|
||||
if (toldBacktick % 2) == 0 {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid character",
|
||||
Detail: "The \"`\" character is not valid. To create a multi-line string, use the \"heredoc\" syntax, like \"<<EOT\".",
|
||||
Subject: &tok.Range,
|
||||
})
|
||||
|
||||
}
|
||||
if toldBacktick <= 2 {
|
||||
toldBacktick++
|
||||
}
|
||||
case TokenApostrophe:
|
||||
if (toldApostrophe % 2) == 0 {
|
||||
newDiag := &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid character",
|
||||
Detail: "Single quotes are not valid. Use double quotes (\") to enclose strings.",
|
||||
Subject: &tok.Range,
|
||||
}
|
||||
diags = append(diags, newDiag)
|
||||
}
|
||||
if toldApostrophe <= 2 {
|
||||
toldApostrophe++
|
||||
}
|
||||
case TokenSemicolon:
|
||||
if toldSemicolon < 1 {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
|
@ -264,9 +292,21 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics {
|
|||
Detail: "This character is not used within the language.",
|
||||
Subject: &tok.Range,
|
||||
})
|
||||
|
||||
toldTabs++
|
||||
}
|
||||
}
|
||||
return diags
|
||||
}
|
||||
|
||||
var utf8BOM = []byte{0xef, 0xbb, 0xbf}
|
||||
|
||||
// stripUTF8BOM checks whether the given buffer begins with a UTF-8 byte order
|
||||
// mark (0xEF 0xBB 0xBF) and, if so, returns a truncated slice with the same
|
||||
// backing array but with the BOM skipped.
|
||||
//
|
||||
// If there is no BOM present, the given slice is returned verbatim.
|
||||
func stripUTF8BOM(src []byte) []byte {
|
||||
if bytes.HasPrefix(src, utf8BOM) {
|
||||
return src[3:]
|
||||
}
|
||||
return src
|
||||
}
|
||||
|
|
97
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token_type_string.go
generated
vendored
97
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token_type_string.go
generated
vendored
|
@ -4,7 +4,7 @@ package hclsyntax
|
|||
|
||||
import "strconv"
|
||||
|
||||
const _TokenType_name = "TokenNilTokenNewlineTokenBangTokenPercentTokenBitwiseAndTokenOParenTokenCParenTokenStarTokenPlusTokenCommaTokenMinusTokenDotTokenSlashTokenColonTokenSemicolonTokenLessThanTokenEqualTokenGreaterThanTokenQuestionTokenCommentTokenOHeredocTokenIdentTokenNumberLitTokenQuotedLitTokenStringLitTokenOBrackTokenCBrackTokenBitwiseXorTokenBacktickTokenCHeredocTokenOBraceTokenBitwiseOrTokenCBraceTokenBitwiseNotTokenOQuoteTokenCQuoteTokenTemplateControlTokenEllipsisTokenFatArrowTokenTemplateSeqEndTokenAndTokenOrTokenTemplateInterpTokenEqualOpTokenNotEqualTokenLessThanEqTokenGreaterThanEqTokenEOFTokenTabsTokenStarStarTokenInvalidTokenBadUTF8"
|
||||
const _TokenType_name = "TokenNilTokenNewlineTokenBangTokenPercentTokenBitwiseAndTokenApostropheTokenOParenTokenCParenTokenStarTokenPlusTokenCommaTokenMinusTokenDotTokenSlashTokenColonTokenSemicolonTokenLessThanTokenEqualTokenGreaterThanTokenQuestionTokenCommentTokenOHeredocTokenIdentTokenNumberLitTokenQuotedLitTokenStringLitTokenOBrackTokenCBrackTokenBitwiseXorTokenBacktickTokenCHeredocTokenOBraceTokenBitwiseOrTokenCBraceTokenBitwiseNotTokenOQuoteTokenCQuoteTokenTemplateControlTokenEllipsisTokenFatArrowTokenTemplateSeqEndTokenAndTokenOrTokenTemplateInterpTokenEqualOpTokenNotEqualTokenLessThanEqTokenGreaterThanEqTokenEOFTokenTabsTokenStarStarTokenInvalidTokenBadUTF8"
|
||||
|
||||
var _TokenType_map = map[TokenType]string{
|
||||
0: _TokenType_name[0:8],
|
||||
|
@ -12,53 +12,54 @@ var _TokenType_map = map[TokenType]string{
|
|||
33: _TokenType_name[20:29],
|
||||
37: _TokenType_name[29:41],
|
||||
38: _TokenType_name[41:56],
|
||||
40: _TokenType_name[56:67],
|
||||
41: _TokenType_name[67:78],
|
||||
42: _TokenType_name[78:87],
|
||||
43: _TokenType_name[87:96],
|
||||
44: _TokenType_name[96:106],
|
||||
45: _TokenType_name[106:116],
|
||||
46: _TokenType_name[116:124],
|
||||
47: _TokenType_name[124:134],
|
||||
58: _TokenType_name[134:144],
|
||||
59: _TokenType_name[144:158],
|
||||
60: _TokenType_name[158:171],
|
||||
61: _TokenType_name[171:181],
|
||||
62: _TokenType_name[181:197],
|
||||
63: _TokenType_name[197:210],
|
||||
67: _TokenType_name[210:222],
|
||||
72: _TokenType_name[222:235],
|
||||
73: _TokenType_name[235:245],
|
||||
78: _TokenType_name[245:259],
|
||||
81: _TokenType_name[259:273],
|
||||
83: _TokenType_name[273:287],
|
||||
91: _TokenType_name[287:298],
|
||||
93: _TokenType_name[298:309],
|
||||
94: _TokenType_name[309:324],
|
||||
96: _TokenType_name[324:337],
|
||||
104: _TokenType_name[337:350],
|
||||
123: _TokenType_name[350:361],
|
||||
124: _TokenType_name[361:375],
|
||||
125: _TokenType_name[375:386],
|
||||
126: _TokenType_name[386:401],
|
||||
171: _TokenType_name[401:412],
|
||||
187: _TokenType_name[412:423],
|
||||
955: _TokenType_name[423:443],
|
||||
8230: _TokenType_name[443:456],
|
||||
8658: _TokenType_name[456:469],
|
||||
8718: _TokenType_name[469:488],
|
||||
8743: _TokenType_name[488:496],
|
||||
8744: _TokenType_name[496:503],
|
||||
8747: _TokenType_name[503:522],
|
||||
8788: _TokenType_name[522:534],
|
||||
8800: _TokenType_name[534:547],
|
||||
8804: _TokenType_name[547:562],
|
||||
8805: _TokenType_name[562:580],
|
||||
9220: _TokenType_name[580:588],
|
||||
9225: _TokenType_name[588:597],
|
||||
10138: _TokenType_name[597:610],
|
||||
65533: _TokenType_name[610:622],
|
||||
128169: _TokenType_name[622:634],
|
||||
39: _TokenType_name[56:71],
|
||||
40: _TokenType_name[71:82],
|
||||
41: _TokenType_name[82:93],
|
||||
42: _TokenType_name[93:102],
|
||||
43: _TokenType_name[102:111],
|
||||
44: _TokenType_name[111:121],
|
||||
45: _TokenType_name[121:131],
|
||||
46: _TokenType_name[131:139],
|
||||
47: _TokenType_name[139:149],
|
||||
58: _TokenType_name[149:159],
|
||||
59: _TokenType_name[159:173],
|
||||
60: _TokenType_name[173:186],
|
||||
61: _TokenType_name[186:196],
|
||||
62: _TokenType_name[196:212],
|
||||
63: _TokenType_name[212:225],
|
||||
67: _TokenType_name[225:237],
|
||||
72: _TokenType_name[237:250],
|
||||
73: _TokenType_name[250:260],
|
||||
78: _TokenType_name[260:274],
|
||||
81: _TokenType_name[274:288],
|
||||
83: _TokenType_name[288:302],
|
||||
91: _TokenType_name[302:313],
|
||||
93: _TokenType_name[313:324],
|
||||
94: _TokenType_name[324:339],
|
||||
96: _TokenType_name[339:352],
|
||||
104: _TokenType_name[352:365],
|
||||
123: _TokenType_name[365:376],
|
||||
124: _TokenType_name[376:390],
|
||||
125: _TokenType_name[390:401],
|
||||
126: _TokenType_name[401:416],
|
||||
171: _TokenType_name[416:427],
|
||||
187: _TokenType_name[427:438],
|
||||
955: _TokenType_name[438:458],
|
||||
8230: _TokenType_name[458:471],
|
||||
8658: _TokenType_name[471:484],
|
||||
8718: _TokenType_name[484:503],
|
||||
8743: _TokenType_name[503:511],
|
||||
8744: _TokenType_name[511:518],
|
||||
8747: _TokenType_name[518:537],
|
||||
8788: _TokenType_name[537:549],
|
||||
8800: _TokenType_name[549:562],
|
||||
8804: _TokenType_name[562:577],
|
||||
8805: _TokenType_name[577:595],
|
||||
9220: _TokenType_name[595:603],
|
||||
9225: _TokenType_name[603:612],
|
||||
10138: _TokenType_name[612:625],
|
||||
65533: _TokenType_name[625:637],
|
||||
128169: _TokenType_name[637:649],
|
||||
}
|
||||
|
||||
func (i TokenType) String() string {
|
||||
|
|
6
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/variables.go
generated
vendored
6
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/variables.go
generated
vendored
|
@ -72,15 +72,15 @@ func (w *variablesWalker) Exit(n Node) hcl.Diagnostics {
|
|||
// that the child scope struct wraps.
|
||||
type ChildScope struct {
|
||||
LocalNames map[string]struct{}
|
||||
Expr *Expression // pointer because it can be replaced on walk
|
||||
Expr Expression
|
||||
}
|
||||
|
||||
func (e ChildScope) walkChildNodes(w internalWalkFunc) {
|
||||
*(e.Expr) = w(*(e.Expr)).(Expression)
|
||||
w(e.Expr)
|
||||
}
|
||||
|
||||
// Range returns the range of the expression that the ChildScope is
|
||||
// encapsulating. It isn't really very useful to call Range on a ChildScope.
|
||||
func (e ChildScope) Range() hcl.Range {
|
||||
return (*e.Expr).Range()
|
||||
return e.Expr.Range()
|
||||
}
|
||||
|
|
44
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/walk.go
generated
vendored
44
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/walk.go
generated
vendored
|
@ -15,9 +15,8 @@ type VisitFunc func(node Node) hcl.Diagnostics
|
|||
// and returned as a single set.
|
||||
func VisitAll(node Node, f VisitFunc) hcl.Diagnostics {
|
||||
diags := f(node)
|
||||
node.walkChildNodes(func(node Node) Node {
|
||||
node.walkChildNodes(func(node Node) {
|
||||
diags = append(diags, VisitAll(node, f)...)
|
||||
return node
|
||||
})
|
||||
return diags
|
||||
}
|
||||
|
@ -33,45 +32,10 @@ type Walker interface {
|
|||
// Enter and Exit functions.
|
||||
func Walk(node Node, w Walker) hcl.Diagnostics {
|
||||
diags := w.Enter(node)
|
||||
node.walkChildNodes(func(node Node) Node {
|
||||
node.walkChildNodes(func(node Node) {
|
||||
diags = append(diags, Walk(node, w)...)
|
||||
return node
|
||||
})
|
||||
moreDiags := w.Exit(node)
|
||||
diags = append(diags, moreDiags...)
|
||||
return diags
|
||||
}
|
||||
|
||||
// Transformer is an interface used with Transform
|
||||
type Transformer interface {
|
||||
// Transform accepts a node and returns a replacement node along with
|
||||
// a flag for whether to also visit child nodes. If the flag is false,
|
||||
// none of the child nodes will be visited and the TransformExit method
|
||||
// will not be called for the node.
|
||||
//
|
||||
// It is acceptable and appropriate for Transform to return the same node
|
||||
// it was given, for situations where no transform is needed.
|
||||
Transform(node Node) (Node, bool, hcl.Diagnostics)
|
||||
|
||||
// TransformExit signals the end of transformations of child nodes of the
|
||||
// given node. If Transform returned a new node, the given node is the
|
||||
// node that was returned, rather than the node that was originally
|
||||
// encountered.
|
||||
TransformExit(node Node) hcl.Diagnostics
|
||||
}
|
||||
|
||||
// Transform allows for in-place transformations of an AST starting with a
|
||||
// particular node. The provider Transformer implementation drives the
|
||||
// transformation process. The return value is the node that replaced the
|
||||
// given top-level node.
|
||||
func Transform(node Node, t Transformer) (Node, hcl.Diagnostics) {
|
||||
newNode, descend, diags := t.Transform(node)
|
||||
if !descend {
|
||||
return newNode, diags
|
||||
}
|
||||
node.walkChildNodes(func(node Node) Node {
|
||||
newNode, newDiags := Transform(node, t)
|
||||
diags = append(diags, newDiags...)
|
||||
return newNode
|
||||
})
|
||||
diags = append(diags, t.TransformExit(newNode)...)
|
||||
return newNode, diags
|
||||
}
|
||||
|
|
13
vendor/github.com/hashicorp/hcl2/hcl/json/parser.go
generated
vendored
13
vendor/github.com/hashicorp/hcl2/hcl/json/parser.go
generated
vendored
|
@ -3,9 +3,9 @@ package json
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
func parseFileContent(buf []byte, filename string) (node, hcl.Diagnostics) {
|
||||
|
@ -370,10 +370,15 @@ func parseNumber(p *peeker) (node, hcl.Diagnostics) {
|
|||
}
|
||||
}
|
||||
|
||||
f, _, err := big.ParseFloat(string(num), 10, 512, big.ToNearestEven)
|
||||
// We want to guarantee that we parse numbers the same way as cty (and thus
|
||||
// native syntax HCL) would here, so we'll use the cty parser even though
|
||||
// in most other cases we don't actually introduce cty concepts until
|
||||
// decoding time. We'll unwrap the parsed float immediately afterwards, so
|
||||
// the cty value is just a temporary helper.
|
||||
nv, err := cty.ParseNumberVal(string(num))
|
||||
if err != nil {
|
||||
// Should never happen if above passed, since JSON numbers are a subset
|
||||
// of what big.Float can parse...
|
||||
// of what cty can parse...
|
||||
return nil, hcl.Diagnostics{
|
||||
{
|
||||
Severity: hcl.DiagError,
|
||||
|
@ -385,7 +390,7 @@ func parseNumber(p *peeker) (node, hcl.Diagnostics) {
|
|||
}
|
||||
|
||||
return &numberVal{
|
||||
Value: f,
|
||||
Value: nv.AsBigFloat(),
|
||||
SrcRange: tok.Range,
|
||||
}, nil
|
||||
}
|
||||
|
|
14
vendor/github.com/hashicorp/hcl2/hcl/json/spec.md
generated
vendored
14
vendor/github.com/hashicorp/hcl2/hcl/json/spec.md
generated
vendored
|
@ -18,11 +18,11 @@ _Parsing_ such JSON has some additional constraints not beyond what is normally
|
|||
supported by JSON parsers, so a specialized parser may be required that
|
||||
is able to:
|
||||
|
||||
* Preserve the relative ordering of properties defined in an object.
|
||||
* Preserve multiple definitions of the same property name.
|
||||
* Preserve numeric values to the precision required by the number type
|
||||
- Preserve the relative ordering of properties defined in an object.
|
||||
- Preserve multiple definitions of the same property name.
|
||||
- Preserve numeric values to the precision required by the number type
|
||||
in [the HCL syntax-agnostic information model](../spec.md).
|
||||
* Retain source location information for parsed tokens/constructs in order
|
||||
- Retain source location information for parsed tokens/constructs in order
|
||||
to produce good error messages.
|
||||
|
||||
## Structural Elements
|
||||
|
@ -118,6 +118,7 @@ type:
|
|||
]
|
||||
}
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"foo": []
|
||||
|
@ -147,7 +148,7 @@ the following examples:
|
|||
"boz": {
|
||||
"baz": {
|
||||
"child_attr": "baz"
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -189,7 +190,7 @@ the following examples:
|
|||
"boz": {
|
||||
"child_attr": "baz"
|
||||
}
|
||||
},
|
||||
}
|
||||
},
|
||||
{
|
||||
"bar": {
|
||||
|
@ -402,4 +403,3 @@ to that expression.
|
|||
|
||||
If the original expression is not a string or its contents cannot be parsed
|
||||
as a native syntax expression then static call analysis is not supported.
|
||||
|
||||
|
|
12
vendor/github.com/hashicorp/hcl2/hcl/json/structure.go
generated
vendored
12
vendor/github.com/hashicorp/hcl2/hcl/json/structure.go
generated
vendored
|
@ -266,6 +266,9 @@ func (b *body) unpackBlock(v node, typeName string, typeRange *hcl.Range, labels
|
|||
copy(labelR, labelRanges)
|
||||
|
||||
switch tv := v.(type) {
|
||||
case *nullVal:
|
||||
// There is no block content, e.g the value is null.
|
||||
return
|
||||
case *objectVal:
|
||||
// Single instance of the block
|
||||
*blocks = append(*blocks, &hcl.Block{
|
||||
|
@ -324,6 +327,8 @@ func (b *body) collectDeepAttrs(v node, labelName *string) ([]*objectAttr, hcl.D
|
|||
var attrs []*objectAttr
|
||||
|
||||
switch tv := v.(type) {
|
||||
case *nullVal:
|
||||
// If a value is null, then we don't return any attributes or return an error.
|
||||
|
||||
case *objectVal:
|
||||
attrs = append(attrs, tv.Attrs...)
|
||||
|
@ -424,7 +429,7 @@ func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
|||
known := true
|
||||
for _, jsonAttr := range v.Attrs {
|
||||
// In this one context we allow keys to contain interpolation
|
||||
// experessions too, assuming we're evaluating in interpolation
|
||||
// expressions too, assuming we're evaluating in interpolation
|
||||
// mode. This achieves parity with the native syntax where
|
||||
// object expressions can have dynamic keys, while block contents
|
||||
// may not.
|
||||
|
@ -533,6 +538,11 @@ func (e *expression) Variables() []hcl.Traversal {
|
|||
}
|
||||
case *objectVal:
|
||||
for _, jsonAttr := range v.Attrs {
|
||||
keyExpr := &stringVal{ // we're going to treat key as an expression in this context
|
||||
Value: jsonAttr.Name,
|
||||
SrcRange: jsonAttr.NameRange,
|
||||
}
|
||||
vars = append(vars, (&expression{src: keyExpr}).Variables()...)
|
||||
vars = append(vars, (&expression{src: jsonAttr.Value}).Variables()...)
|
||||
}
|
||||
}
|
||||
|
|
2
vendor/github.com/hashicorp/hcl2/hcl/merged.go
generated
vendored
2
vendor/github.com/hashicorp/hcl2/hcl/merged.go
generated
vendored
|
@ -171,7 +171,7 @@ func (mb mergedBodies) mergedContent(schema *BodySchema, partial bool) (*BodyCon
|
|||
}
|
||||
|
||||
if thisLeftovers != nil {
|
||||
mergedLeftovers = append(mergedLeftovers)
|
||||
mergedLeftovers = append(mergedLeftovers, thisLeftovers)
|
||||
}
|
||||
if len(thisDiags) != 0 {
|
||||
diags = append(diags, thisDiags...)
|
||||
|
|
119
vendor/github.com/hashicorp/hcl2/hcl/ops.go
generated
vendored
119
vendor/github.com/hashicorp/hcl2/hcl/ops.go
generated
vendored
|
@ -145,3 +145,122 @@ func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics)
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
// GetAttr is a helper function that performs the same operation as the
|
||||
// attribute access in the HCL expression language. That is, the result is the
|
||||
// same as it would be for obj.attr in a configuration expression.
|
||||
//
|
||||
// This is exported so that applications can access attributes in a manner
|
||||
// consistent with how the language does it, including handling of null and
|
||||
// unknown values, etc.
|
||||
//
|
||||
// Diagnostics are produced if the given combination of values is not valid.
|
||||
// Therefore a pointer to a source range must be provided to use in diagnostics,
|
||||
// though nil can be provided if the calling application is going to
|
||||
// ignore the subject of the returned diagnostics anyway.
|
||||
func GetAttr(obj cty.Value, attrName string, srcRange *Range) (cty.Value, Diagnostics) {
|
||||
if obj.IsNull() {
|
||||
return cty.DynamicVal, Diagnostics{
|
||||
{
|
||||
Severity: DiagError,
|
||||
Summary: "Attempt to get attribute from null value",
|
||||
Detail: "This value is null, so it does not have any attributes.",
|
||||
Subject: srcRange,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
ty := obj.Type()
|
||||
switch {
|
||||
case ty.IsObjectType():
|
||||
if !ty.HasAttribute(attrName) {
|
||||
return cty.DynamicVal, Diagnostics{
|
||||
{
|
||||
Severity: DiagError,
|
||||
Summary: "Unsupported attribute",
|
||||
Detail: fmt.Sprintf("This object does not have an attribute named %q.", attrName),
|
||||
Subject: srcRange,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if !obj.IsKnown() {
|
||||
return cty.UnknownVal(ty.AttributeType(attrName)), nil
|
||||
}
|
||||
|
||||
return obj.GetAttr(attrName), nil
|
||||
case ty.IsMapType():
|
||||
if !obj.IsKnown() {
|
||||
return cty.UnknownVal(ty.ElementType()), nil
|
||||
}
|
||||
|
||||
idx := cty.StringVal(attrName)
|
||||
if obj.HasIndex(idx).False() {
|
||||
return cty.DynamicVal, Diagnostics{
|
||||
{
|
||||
Severity: DiagError,
|
||||
Summary: "Missing map element",
|
||||
Detail: fmt.Sprintf("This map does not have an element with the key %q.", attrName),
|
||||
Subject: srcRange,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return obj.Index(idx), nil
|
||||
case ty == cty.DynamicPseudoType:
|
||||
return cty.DynamicVal, nil
|
||||
default:
|
||||
return cty.DynamicVal, Diagnostics{
|
||||
{
|
||||
Severity: DiagError,
|
||||
Summary: "Unsupported attribute",
|
||||
Detail: "This value does not have any attributes.",
|
||||
Subject: srcRange,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// ApplyPath is a helper function that applies a cty.Path to a value using the
|
||||
// indexing and attribute access operations from HCL.
|
||||
//
|
||||
// This is similar to calling the path's own Apply method, but ApplyPath uses
|
||||
// the more relaxed typing rules that apply to these operations in HCL, rather
|
||||
// than cty's relatively-strict rules. ApplyPath is implemented in terms of
|
||||
// Index and GetAttr, and so it has the same behavior for individual steps
|
||||
// but will stop and return any errors returned by intermediate steps.
|
||||
//
|
||||
// Diagnostics are produced if the given path cannot be applied to the given
|
||||
// value. Therefore a pointer to a source range must be provided to use in
|
||||
// diagnostics, though nil can be provided if the calling application is going
|
||||
// to ignore the subject of the returned diagnostics anyway.
|
||||
func ApplyPath(val cty.Value, path cty.Path, srcRange *Range) (cty.Value, Diagnostics) {
|
||||
var diags Diagnostics
|
||||
|
||||
for _, step := range path {
|
||||
var stepDiags Diagnostics
|
||||
switch ts := step.(type) {
|
||||
case cty.IndexStep:
|
||||
val, stepDiags = Index(val, ts.Key, srcRange)
|
||||
case cty.GetAttrStep:
|
||||
val, stepDiags = GetAttr(val, ts.Name, srcRange)
|
||||
default:
|
||||
// Should never happen because the above are all of the step types.
|
||||
diags = diags.Append(&Diagnostic{
|
||||
Severity: DiagError,
|
||||
Summary: "Invalid path step",
|
||||
Detail: fmt.Sprintf("Go type %T is not a valid path step. This is a bug in this program.", step),
|
||||
Subject: srcRange,
|
||||
})
|
||||
return cty.DynamicVal, diags
|
||||
}
|
||||
|
||||
diags = append(diags, stepDiags...)
|
||||
if stepDiags.HasErrors() {
|
||||
return cty.DynamicVal, diags
|
||||
}
|
||||
}
|
||||
|
||||
return val, diags
|
||||
}
|
||||
|
|
90
vendor/github.com/hashicorp/hcl2/hcl/spec.md
generated
vendored
90
vendor/github.com/hashicorp/hcl2/hcl/spec.md
generated
vendored
|
@ -57,10 +57,10 @@ access to the specific attributes and blocks requested.
|
|||
A _body schema_ consists of a list of _attribute schemata_ and
|
||||
_block header schemata_:
|
||||
|
||||
* An _attribute schema_ provides the name of an attribute and whether its
|
||||
- An _attribute schema_ provides the name of an attribute and whether its
|
||||
presence is required.
|
||||
|
||||
* A _block header schema_ provides a block type name and the semantic names
|
||||
- A _block header schema_ provides a block type name and the semantic names
|
||||
assigned to each of the labels of that block type, if any.
|
||||
|
||||
Within a schema, it is an error to request the same attribute name twice or
|
||||
|
@ -72,11 +72,11 @@ a block whose type name is identical to the attribute name.
|
|||
The result of applying a body schema to a body is _body content_, which
|
||||
consists of an _attribute map_ and a _block sequence_:
|
||||
|
||||
* The _attribute map_ is a map data structure whose keys are attribute names
|
||||
- The _attribute map_ is a map data structure whose keys are attribute names
|
||||
and whose values are _expressions_ that represent the corresponding attribute
|
||||
values.
|
||||
|
||||
* The _block sequence_ is an ordered sequence of blocks, with each specifying
|
||||
- The _block sequence_ is an ordered sequence of blocks, with each specifying
|
||||
a block _type name_, the sequence of _labels_ specified for the block,
|
||||
and the body object (not body _content_) representing the block's own body.
|
||||
|
||||
|
@ -132,13 +132,13 @@ the schema has been processed.
|
|||
|
||||
Specifically:
|
||||
|
||||
* Any attribute whose name is specified in the schema is returned in body
|
||||
- Any attribute whose name is specified in the schema is returned in body
|
||||
content and elided from the new body.
|
||||
|
||||
* Any block whose type is specified in the schema is returned in body content
|
||||
- Any block whose type is specified in the schema is returned in body content
|
||||
and elided from the new body.
|
||||
|
||||
* Any attribute or block _not_ meeting the above conditions is placed into
|
||||
- Any attribute or block _not_ meeting the above conditions is placed into
|
||||
the new body, unmodified.
|
||||
|
||||
The new body can then be recursively processed using any of the body
|
||||
|
@ -168,20 +168,20 @@ In order to obtain a concrete value, each expression must be _evaluated_.
|
|||
Evaluation is performed in terms of an evaluation context, which
|
||||
consists of the following:
|
||||
|
||||
* An _evaluation mode_, which is defined below.
|
||||
* A _variable scope_, which provides a set of named variables for use in
|
||||
- An _evaluation mode_, which is defined below.
|
||||
- A _variable scope_, which provides a set of named variables for use in
|
||||
expressions.
|
||||
* A _function table_, which provides a set of named functions for use in
|
||||
- A _function table_, which provides a set of named functions for use in
|
||||
expressions.
|
||||
|
||||
The _evaluation mode_ allows for two different interpretations of an
|
||||
expression:
|
||||
|
||||
* In _literal-only mode_, variables and functions are not available and it
|
||||
- In _literal-only mode_, variables and functions are not available and it
|
||||
is assumed that the calling application's intent is to treat the attribute
|
||||
value as a literal.
|
||||
|
||||
* In _full expression mode_, variables and functions are defined and it is
|
||||
- In _full expression mode_, variables and functions are defined and it is
|
||||
assumed that the calling application wishes to provide a full expression
|
||||
language for definition of the attribute value.
|
||||
|
||||
|
@ -235,15 +235,15 @@ for interpretation into any suitable number representation. An implementation
|
|||
may in practice implement numbers with limited precision so long as the
|
||||
following constraints are met:
|
||||
|
||||
* Integers are represented with at least 256 bits.
|
||||
* Non-integer numbers are represented as floating point values with a
|
||||
- Integers are represented with at least 256 bits.
|
||||
- Non-integer numbers are represented as floating point values with a
|
||||
mantissa of at least 256 bits and a signed binary exponent of at least
|
||||
16 bits.
|
||||
* An error is produced if an integer value given in source cannot be
|
||||
- An error is produced if an integer value given in source cannot be
|
||||
represented precisely.
|
||||
* An error is produced if a non-integer value cannot be represented due to
|
||||
- An error is produced if a non-integer value cannot be represented due to
|
||||
overflow.
|
||||
* A non-integer number is rounded to the nearest possible value when a
|
||||
- A non-integer number is rounded to the nearest possible value when a
|
||||
value is of too high a precision to be represented.
|
||||
|
||||
The _number_ type also requires representation of both positive and negative
|
||||
|
@ -265,11 +265,11 @@ _Structural types_ are types that are constructed by combining other types.
|
|||
Each distinct combination of other types is itself a distinct type. There
|
||||
are two structural type _kinds_:
|
||||
|
||||
* _Object types_ are constructed of a set of named attributes, each of which
|
||||
- _Object types_ are constructed of a set of named attributes, each of which
|
||||
has a type. Attribute names are always strings. (_Object_ attributes are a
|
||||
distinct idea from _body_ attributes, though calling applications
|
||||
may choose to blur the distinction by use of common naming schemes.)
|
||||
* _Tuple types_ are constructed of a sequence of elements, each of which
|
||||
- _Tuple types_ are constructed of a sequence of elements, each of which
|
||||
has a type.
|
||||
|
||||
Values of structural types are compared for equality in terms of their
|
||||
|
@ -284,9 +284,9 @@ have attributes or elements with identical types.
|
|||
_Collection types_ are types that combine together an arbitrary number of
|
||||
values of some other single type. There are three collection type _kinds_:
|
||||
|
||||
* _List types_ represent ordered sequences of values of their element type.
|
||||
* _Map types_ represent values of their element type accessed via string keys.
|
||||
* _Set types_ represent unordered sets of distinct values of their element type.
|
||||
- _List types_ represent ordered sequences of values of their element type.
|
||||
- _Map types_ represent values of their element type accessed via string keys.
|
||||
- _Set types_ represent unordered sets of distinct values of their element type.
|
||||
|
||||
For each of these kinds and each distinct element type there is a distinct
|
||||
collection type. For example, "list of string" is a distinct type from
|
||||
|
@ -376,9 +376,9 @@ a type has a non-commutative _matches_ relationship with a _type specification_.
|
|||
A type specification is, in practice, just a different interpretation of a
|
||||
type such that:
|
||||
|
||||
* Any type _matches_ any type that it is identical to.
|
||||
- Any type _matches_ any type that it is identical to.
|
||||
|
||||
* Any type _matches_ the dynamic pseudo-type.
|
||||
- Any type _matches_ the dynamic pseudo-type.
|
||||
|
||||
For example, given a type specification "list of dynamic pseudo-type", the
|
||||
concrete types "list of string" and "list of map" match, but the
|
||||
|
@ -397,51 +397,51 @@ applications to provide functions that are interoperable with all syntaxes.
|
|||
|
||||
A _function_ is defined from the following elements:
|
||||
|
||||
* Zero or more _positional parameters_, each with a name used for documentation,
|
||||
- Zero or more _positional parameters_, each with a name used for documentation,
|
||||
a type specification for expected argument values, and a flag for whether
|
||||
each of null values, unknown values, and values of the dynamic pseudo-type
|
||||
are accepted.
|
||||
|
||||
* Zero or one _variadic parameters_, with the same structure as the _positional_
|
||||
- Zero or one _variadic parameters_, with the same structure as the _positional_
|
||||
parameters, which if present collects any additional arguments provided at
|
||||
the function call site.
|
||||
|
||||
* A _result type definition_, which specifies the value type returned for each
|
||||
- A _result type definition_, which specifies the value type returned for each
|
||||
valid sequence of argument values.
|
||||
|
||||
* A _result value definition_, which specifies the value returned for each
|
||||
- A _result value definition_, which specifies the value returned for each
|
||||
valid sequence of argument values.
|
||||
|
||||
A _function call_, regardless of source syntax, consists of a sequence of
|
||||
argument values. The argument values are each mapped to a corresponding
|
||||
parameter as follows:
|
||||
|
||||
* For each of the function's positional parameters in sequence, take the next
|
||||
- For each of the function's positional parameters in sequence, take the next
|
||||
argument. If there are no more arguments, the call is erroneous.
|
||||
|
||||
* If the function has a variadic parameter, take all remaining arguments that
|
||||
- If the function has a variadic parameter, take all remaining arguments that
|
||||
where not yet assigned to a positional parameter and collect them into
|
||||
a sequence of variadic arguments that each correspond to the variadic
|
||||
parameter.
|
||||
|
||||
* If the function has _no_ variadic parameter, it is an error if any arguments
|
||||
- If the function has _no_ variadic parameter, it is an error if any arguments
|
||||
remain after taking one argument for each positional parameter.
|
||||
|
||||
After mapping each argument to a parameter, semantic checking proceeds
|
||||
for each argument:
|
||||
|
||||
* If the argument value corresponding to a parameter does not match the
|
||||
- If the argument value corresponding to a parameter does not match the
|
||||
parameter's type specification, the call is erroneous.
|
||||
|
||||
* If the argument value corresponding to a parameter is null and the parameter
|
||||
- If the argument value corresponding to a parameter is null and the parameter
|
||||
is not specified as accepting nulls, the call is erroneous.
|
||||
|
||||
* If the argument value corresponding to a parameter is the dynamic value
|
||||
- If the argument value corresponding to a parameter is the dynamic value
|
||||
and the parameter is not specified as accepting values of the dynamic
|
||||
pseudo-type, the call is valid but its _result type_ is forced to be the
|
||||
dynamic pseudo type.
|
||||
|
||||
* If neither of the above conditions holds for any argument, the call is
|
||||
- If neither of the above conditions holds for any argument, the call is
|
||||
valid and the function's value type definition is used to determine the
|
||||
call's _result type_. A function _may_ vary its result type depending on
|
||||
the argument _values_ as well as the argument _types_; for example, a
|
||||
|
@ -450,11 +450,11 @@ for each argument:
|
|||
|
||||
If semantic checking succeeds without error, the call is _executed_:
|
||||
|
||||
* For each argument, if its value is unknown and its corresponding parameter
|
||||
- For each argument, if its value is unknown and its corresponding parameter
|
||||
is not specified as accepting unknowns, the _result value_ is forced to be an
|
||||
unknown value of the result type.
|
||||
|
||||
* If the previous condition does not apply, the function's result value
|
||||
- If the previous condition does not apply, the function's result value
|
||||
definition is used to determine the call's _result value_.
|
||||
|
||||
The result of a function call expression is either an error, if one of the
|
||||
|
@ -631,20 +631,20 @@ diagnostics if they are applied to inappropriate expressions.
|
|||
|
||||
The following are the required static analysis functions:
|
||||
|
||||
* **Static List**: Require list/tuple construction syntax to be used and
|
||||
- **Static List**: Require list/tuple construction syntax to be used and
|
||||
return a list of expressions for each of the elements given.
|
||||
|
||||
* **Static Map**: Require map/object construction syntax to be used and
|
||||
- **Static Map**: Require map/object construction syntax to be used and
|
||||
return a list of key/value pairs -- both expressions -- for each of
|
||||
the elements given. The usual constraint that a map key must be a string
|
||||
must not apply to this analysis, thus allowing applications to interpret
|
||||
arbitrary keys as they see fit.
|
||||
|
||||
* **Static Call**: Require function call syntax to be used and return an
|
||||
- **Static Call**: Require function call syntax to be used and return an
|
||||
object describing the called function name and a list of expressions
|
||||
representing each of the call arguments.
|
||||
|
||||
* **Static Traversal**: Require a reference to a symbol in the variable
|
||||
- **Static Traversal**: Require a reference to a symbol in the variable
|
||||
scope and return a description of the path from the root scope to the
|
||||
accessed attribute or index.
|
||||
|
||||
|
@ -670,18 +670,18 @@ with the goals of this specification.
|
|||
The language-agnosticism of this specification assumes that certain behaviors
|
||||
are implemented separately for each syntax:
|
||||
|
||||
* Matching of a body schema with the physical elements of a body in the
|
||||
- Matching of a body schema with the physical elements of a body in the
|
||||
source language, to determine correspondence between physical constructs
|
||||
and schema elements.
|
||||
|
||||
* Implementing the _dynamic attributes_ body processing mode by either
|
||||
- Implementing the _dynamic attributes_ body processing mode by either
|
||||
interpreting all physical constructs as attributes or producing an error
|
||||
if non-attribute constructs are present.
|
||||
|
||||
* Providing an evaluation function for all possible expressions that produces
|
||||
- Providing an evaluation function for all possible expressions that produces
|
||||
a value given an evaluation context.
|
||||
|
||||
* Providing the static analysis functionality described above in a manner that
|
||||
- Providing the static analysis functionality described above in a manner that
|
||||
makes sense within the convention of the syntax.
|
||||
|
||||
The suggested implementation strategy is to use an implementation language's
|
||||
|
|
61
vendor/github.com/hashicorp/hcl2/hcl/traversal.go
generated
vendored
61
vendor/github.com/hashicorp/hcl2/hcl/traversal.go
generated
vendored
|
@ -255,66 +255,7 @@ type TraverseAttr struct {
|
|||
}
|
||||
|
||||
func (tn TraverseAttr) TraversalStep(val cty.Value) (cty.Value, Diagnostics) {
|
||||
if val.IsNull() {
|
||||
return cty.DynamicVal, Diagnostics{
|
||||
{
|
||||
Severity: DiagError,
|
||||
Summary: "Attempt to get attribute from null value",
|
||||
Detail: "This value is null, so it does not have any attributes.",
|
||||
Subject: &tn.SrcRange,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
ty := val.Type()
|
||||
switch {
|
||||
case ty.IsObjectType():
|
||||
if !ty.HasAttribute(tn.Name) {
|
||||
return cty.DynamicVal, Diagnostics{
|
||||
{
|
||||
Severity: DiagError,
|
||||
Summary: "Unsupported attribute",
|
||||
Detail: fmt.Sprintf("This object does not have an attribute named %q.", tn.Name),
|
||||
Subject: &tn.SrcRange,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if !val.IsKnown() {
|
||||
return cty.UnknownVal(ty.AttributeType(tn.Name)), nil
|
||||
}
|
||||
|
||||
return val.GetAttr(tn.Name), nil
|
||||
case ty.IsMapType():
|
||||
if !val.IsKnown() {
|
||||
return cty.UnknownVal(ty.ElementType()), nil
|
||||
}
|
||||
|
||||
idx := cty.StringVal(tn.Name)
|
||||
if val.HasIndex(idx).False() {
|
||||
return cty.DynamicVal, Diagnostics{
|
||||
{
|
||||
Severity: DiagError,
|
||||
Summary: "Missing map element",
|
||||
Detail: fmt.Sprintf("This map does not have an element with the key %q.", tn.Name),
|
||||
Subject: &tn.SrcRange,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return val.Index(idx), nil
|
||||
case ty == cty.DynamicPseudoType:
|
||||
return cty.DynamicVal, nil
|
||||
default:
|
||||
return cty.DynamicVal, Diagnostics{
|
||||
{
|
||||
Severity: DiagError,
|
||||
Summary: "Unsupported attribute",
|
||||
Detail: "This value does not have any attributes.",
|
||||
Subject: &tn.SrcRange,
|
||||
},
|
||||
}
|
||||
}
|
||||
return GetAttr(val, tn.Name, &tn.SrcRange)
|
||||
}
|
||||
|
||||
func (tn TraverseAttr) SourceRange() Range {
|
||||
|
|
5
vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go
generated
vendored
5
vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go
generated
vendored
|
@ -52,11 +52,14 @@ func AbsTraversalForExpr(expr Expression) (Traversal, Diagnostics) {
|
|||
func RelTraversalForExpr(expr Expression) (Traversal, Diagnostics) {
|
||||
traversal, diags := AbsTraversalForExpr(expr)
|
||||
if len(traversal) > 0 {
|
||||
ret := make(Traversal, len(traversal))
|
||||
copy(ret, traversal)
|
||||
root := traversal[0].(TraverseRoot)
|
||||
traversal[0] = TraverseAttr{
|
||||
ret[0] = TraverseAttr{
|
||||
Name: root.Name,
|
||||
SrcRange: root.SrcRange,
|
||||
}
|
||||
return ret, diags
|
||||
}
|
||||
return traversal, diags
|
||||
}
|
||||
|
|
346
vendor/github.com/hashicorp/hcl2/hcldec/spec.go
generated
vendored
346
vendor/github.com/hashicorp/hcl2/hcldec/spec.go
generated
vendored
|
@ -478,6 +478,44 @@ func (s *BlockListSpec) decode(content *hcl.BodyContent, blockLabels []blockLabe
|
|||
if len(elems) == 0 {
|
||||
ret = cty.ListValEmpty(s.Nested.impliedType())
|
||||
} else {
|
||||
// Since our target is a list, all of the decoded elements must have the
|
||||
// same type or cty.ListVal will panic below. Different types can arise
|
||||
// if there is an attribute spec of type cty.DynamicPseudoType in the
|
||||
// nested spec; all given values must be convertable to a single type
|
||||
// in order for the result to be considered valid.
|
||||
etys := make([]cty.Type, len(elems))
|
||||
for i, v := range elems {
|
||||
etys[i] = v.Type()
|
||||
}
|
||||
ety, convs := convert.UnifyUnsafe(etys)
|
||||
if ety == cty.NilType {
|
||||
// FIXME: This is a pretty terrible error message.
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName),
|
||||
Detail: "Corresponding attributes in all blocks of this type must be the same.",
|
||||
Subject: &sourceRanges[0],
|
||||
})
|
||||
return cty.DynamicVal, diags
|
||||
}
|
||||
for i, v := range elems {
|
||||
if convs[i] != nil {
|
||||
newV, err := convs[i](v)
|
||||
if err != nil {
|
||||
// FIXME: This is a pretty terrible error message.
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName),
|
||||
Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err),
|
||||
Subject: &sourceRanges[i],
|
||||
})
|
||||
// Bail early here so we won't panic below in cty.ListVal
|
||||
return cty.DynamicVal, diags
|
||||
}
|
||||
elems[i] = newV
|
||||
}
|
||||
}
|
||||
|
||||
ret = cty.ListVal(elems)
|
||||
}
|
||||
|
||||
|
@ -509,6 +547,127 @@ func (s *BlockListSpec) sourceRange(content *hcl.BodyContent, blockLabels []bloc
|
|||
return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)
|
||||
}
|
||||
|
||||
// A BlockTupleSpec is a Spec that produces a cty tuple of the results of
|
||||
// decoding all of the nested blocks of a given type, using a nested spec.
|
||||
//
|
||||
// This is similar to BlockListSpec, but it permits the nested blocks to have
|
||||
// different result types in situations where cty.DynamicPseudoType attributes
|
||||
// are present.
|
||||
type BlockTupleSpec struct {
|
||||
TypeName string
|
||||
Nested Spec
|
||||
MinItems int
|
||||
MaxItems int
|
||||
}
|
||||
|
||||
func (s *BlockTupleSpec) visitSameBodyChildren(cb visitFunc) {
|
||||
// leaf node ("Nested" does not use the same body)
|
||||
}
|
||||
|
||||
// blockSpec implementation
|
||||
func (s *BlockTupleSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema {
|
||||
return []hcl.BlockHeaderSchema{
|
||||
{
|
||||
Type: s.TypeName,
|
||||
LabelNames: findLabelSpecs(s.Nested),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// blockSpec implementation
|
||||
func (s *BlockTupleSpec) nestedSpec() Spec {
|
||||
return s.Nested
|
||||
}
|
||||
|
||||
// specNeedingVariables implementation
|
||||
func (s *BlockTupleSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal {
|
||||
var ret []hcl.Traversal
|
||||
|
||||
for _, childBlock := range content.Blocks {
|
||||
if childBlock.Type != s.TypeName {
|
||||
continue
|
||||
}
|
||||
|
||||
ret = append(ret, Variables(childBlock.Body, s.Nested)...)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (s *BlockTupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
var diags hcl.Diagnostics
|
||||
|
||||
if s.Nested == nil {
|
||||
panic("BlockListSpec with no Nested Spec")
|
||||
}
|
||||
|
||||
var elems []cty.Value
|
||||
var sourceRanges []hcl.Range
|
||||
for _, childBlock := range content.Blocks {
|
||||
if childBlock.Type != s.TypeName {
|
||||
continue
|
||||
}
|
||||
|
||||
val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false)
|
||||
diags = append(diags, childDiags...)
|
||||
elems = append(elems, val)
|
||||
sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested))
|
||||
}
|
||||
|
||||
if len(elems) < s.MinItems {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName),
|
||||
Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName),
|
||||
Subject: &content.MissingItemRange,
|
||||
})
|
||||
} else if s.MaxItems > 0 && len(elems) > s.MaxItems {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Too many %s blocks", s.TypeName),
|
||||
Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName),
|
||||
Subject: &sourceRanges[s.MaxItems],
|
||||
})
|
||||
}
|
||||
|
||||
var ret cty.Value
|
||||
|
||||
if len(elems) == 0 {
|
||||
ret = cty.EmptyTupleVal
|
||||
} else {
|
||||
ret = cty.TupleVal(elems)
|
||||
}
|
||||
|
||||
return ret, diags
|
||||
}
|
||||
|
||||
func (s *BlockTupleSpec) impliedType() cty.Type {
|
||||
// We can't predict our type, because we don't know how many blocks
|
||||
// there will be until we decode.
|
||||
return cty.DynamicPseudoType
|
||||
}
|
||||
|
||||
func (s *BlockTupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
|
||||
// We return the source range of the _first_ block of the given type,
|
||||
// since they are not guaranteed to form a contiguous range.
|
||||
|
||||
var childBlock *hcl.Block
|
||||
for _, candidate := range content.Blocks {
|
||||
if candidate.Type != s.TypeName {
|
||||
continue
|
||||
}
|
||||
|
||||
childBlock = candidate
|
||||
break
|
||||
}
|
||||
|
||||
if childBlock == nil {
|
||||
return content.MissingItemRange
|
||||
}
|
||||
|
||||
return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)
|
||||
}
|
||||
|
||||
// A BlockSetSpec is a Spec that produces a cty set of the results of
|
||||
// decoding all of the nested blocks of a given type, using a nested spec.
|
||||
type BlockSetSpec struct {
|
||||
|
@ -593,6 +752,44 @@ func (s *BlockSetSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel
|
|||
if len(elems) == 0 {
|
||||
ret = cty.SetValEmpty(s.Nested.impliedType())
|
||||
} else {
|
||||
// Since our target is a set, all of the decoded elements must have the
|
||||
// same type or cty.SetVal will panic below. Different types can arise
|
||||
// if there is an attribute spec of type cty.DynamicPseudoType in the
|
||||
// nested spec; all given values must be convertable to a single type
|
||||
// in order for the result to be considered valid.
|
||||
etys := make([]cty.Type, len(elems))
|
||||
for i, v := range elems {
|
||||
etys[i] = v.Type()
|
||||
}
|
||||
ety, convs := convert.UnifyUnsafe(etys)
|
||||
if ety == cty.NilType {
|
||||
// FIXME: This is a pretty terrible error message.
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName),
|
||||
Detail: "Corresponding attributes in all blocks of this type must be the same.",
|
||||
Subject: &sourceRanges[0],
|
||||
})
|
||||
return cty.DynamicVal, diags
|
||||
}
|
||||
for i, v := range elems {
|
||||
if convs[i] != nil {
|
||||
newV, err := convs[i](v)
|
||||
if err != nil {
|
||||
// FIXME: This is a pretty terrible error message.
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName),
|
||||
Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err),
|
||||
Subject: &sourceRanges[i],
|
||||
})
|
||||
// Bail early here so we won't panic below in cty.ListVal
|
||||
return cty.DynamicVal, diags
|
||||
}
|
||||
elems[i] = newV
|
||||
}
|
||||
}
|
||||
|
||||
ret = cty.SetVal(elems)
|
||||
}
|
||||
|
||||
|
@ -673,7 +870,10 @@ func (s *BlockMapSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel
|
|||
var diags hcl.Diagnostics
|
||||
|
||||
if s.Nested == nil {
|
||||
panic("BlockSetSpec with no Nested Spec")
|
||||
panic("BlockMapSpec with no Nested Spec")
|
||||
}
|
||||
if ImpliedType(s).HasDynamicTypes() {
|
||||
panic("cty.DynamicPseudoType attributes may not be used inside a BlockMapSpec")
|
||||
}
|
||||
|
||||
elems := map[string]interface{}{}
|
||||
|
@ -766,6 +966,150 @@ func (s *BlockMapSpec) sourceRange(content *hcl.BodyContent, blockLabels []block
|
|||
return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)
|
||||
}
|
||||
|
||||
// A BlockObjectSpec is a Spec that produces a cty object of the results of
|
||||
// decoding all of the nested blocks of a given type, using a nested spec.
|
||||
//
|
||||
// One level of object structure is created for each of the given label names.
|
||||
// There must be at least one given label name.
|
||||
//
|
||||
// This is similar to BlockMapSpec, but it permits the nested blocks to have
|
||||
// different result types in situations where cty.DynamicPseudoType attributes
|
||||
// are present.
|
||||
type BlockObjectSpec struct {
|
||||
TypeName string
|
||||
LabelNames []string
|
||||
Nested Spec
|
||||
}
|
||||
|
||||
func (s *BlockObjectSpec) visitSameBodyChildren(cb visitFunc) {
|
||||
// leaf node ("Nested" does not use the same body)
|
||||
}
|
||||
|
||||
// blockSpec implementation
|
||||
func (s *BlockObjectSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema {
|
||||
return []hcl.BlockHeaderSchema{
|
||||
{
|
||||
Type: s.TypeName,
|
||||
LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// blockSpec implementation
|
||||
func (s *BlockObjectSpec) nestedSpec() Spec {
|
||||
return s.Nested
|
||||
}
|
||||
|
||||
// specNeedingVariables implementation
|
||||
func (s *BlockObjectSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal {
|
||||
var ret []hcl.Traversal
|
||||
|
||||
for _, childBlock := range content.Blocks {
|
||||
if childBlock.Type != s.TypeName {
|
||||
continue
|
||||
}
|
||||
|
||||
ret = append(ret, Variables(childBlock.Body, s.Nested)...)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (s *BlockObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
var diags hcl.Diagnostics
|
||||
|
||||
if s.Nested == nil {
|
||||
panic("BlockObjectSpec with no Nested Spec")
|
||||
}
|
||||
|
||||
elems := map[string]interface{}{}
|
||||
for _, childBlock := range content.Blocks {
|
||||
if childBlock.Type != s.TypeName {
|
||||
continue
|
||||
}
|
||||
|
||||
childLabels := labelsForBlock(childBlock)
|
||||
val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false)
|
||||
targetMap := elems
|
||||
for _, key := range childBlock.Labels[:len(s.LabelNames)-1] {
|
||||
if _, exists := targetMap[key]; !exists {
|
||||
targetMap[key] = make(map[string]interface{})
|
||||
}
|
||||
targetMap = targetMap[key].(map[string]interface{})
|
||||
}
|
||||
|
||||
diags = append(diags, childDiags...)
|
||||
|
||||
key := childBlock.Labels[len(s.LabelNames)-1]
|
||||
if _, exists := targetMap[key]; exists {
|
||||
labelsBuf := bytes.Buffer{}
|
||||
for _, label := range childBlock.Labels {
|
||||
fmt.Fprintf(&labelsBuf, " %q", label)
|
||||
}
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Duplicate %s block", s.TypeName),
|
||||
Detail: fmt.Sprintf(
|
||||
"A block for %s%s was already defined. The %s labels must be unique.",
|
||||
s.TypeName, labelsBuf.String(), s.TypeName,
|
||||
),
|
||||
Subject: &childBlock.DefRange,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
targetMap[key] = val
|
||||
}
|
||||
|
||||
if len(elems) == 0 {
|
||||
return cty.EmptyObjectVal, diags
|
||||
}
|
||||
|
||||
var ctyObj func(map[string]interface{}, int) cty.Value
|
||||
ctyObj = func(raw map[string]interface{}, depth int) cty.Value {
|
||||
vals := make(map[string]cty.Value, len(raw))
|
||||
if depth == 1 {
|
||||
for k, v := range raw {
|
||||
vals[k] = v.(cty.Value)
|
||||
}
|
||||
} else {
|
||||
for k, v := range raw {
|
||||
vals[k] = ctyObj(v.(map[string]interface{}), depth-1)
|
||||
}
|
||||
}
|
||||
return cty.ObjectVal(vals)
|
||||
}
|
||||
|
||||
return ctyObj(elems, len(s.LabelNames)), diags
|
||||
}
|
||||
|
||||
func (s *BlockObjectSpec) impliedType() cty.Type {
|
||||
// We can't predict our type, since we don't know how many blocks are
|
||||
// present and what labels they have until we decode.
|
||||
return cty.DynamicPseudoType
|
||||
}
|
||||
|
||||
func (s *BlockObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
|
||||
// We return the source range of the _first_ block of the given type,
|
||||
// since they are not guaranteed to form a contiguous range.
|
||||
|
||||
var childBlock *hcl.Block
|
||||
for _, candidate := range content.Blocks {
|
||||
if candidate.Type != s.TypeName {
|
||||
continue
|
||||
}
|
||||
|
||||
childBlock = candidate
|
||||
break
|
||||
}
|
||||
|
||||
if childBlock == nil {
|
||||
return content.MissingItemRange
|
||||
}
|
||||
|
||||
return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)
|
||||
}
|
||||
|
||||
// A BlockAttrsSpec is a Spec that interprets a single block as if it were
|
||||
// a map of some element type. That is, each attribute within the block
|
||||
// becomes a key in the resulting map and the attribute's value becomes the
|
||||
|
|
123
vendor/github.com/hashicorp/hcl2/hclparse/parser.go
generated
vendored
123
vendor/github.com/hashicorp/hcl2/hclparse/parser.go
generated
vendored
|
@ -1,123 +0,0 @@
|
|||
package hclparse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
"github.com/hashicorp/hcl2/hcl/hclsyntax"
|
||||
"github.com/hashicorp/hcl2/hcl/json"
|
||||
)
|
||||
|
||||
// NOTE: This is the public interface for parsing. The actual parsers are
|
||||
// in other packages alongside this one, with this package just wrapping them
|
||||
// to provide a unified interface for the caller across all supported formats.
|
||||
|
||||
// Parser is the main interface for parsing configuration files. As well as
|
||||
// parsing files, a parser also retains a registry of all of the files it
|
||||
// has parsed so that multiple attempts to parse the same file will return
|
||||
// the same object and so the collected files can be used when printing
|
||||
// diagnostics.
|
||||
//
|
||||
// Any diagnostics for parsing a file are only returned once on the first
|
||||
// call to parse that file. Callers are expected to collect up diagnostics
|
||||
// and present them together, so returning diagnostics for the same file
|
||||
// multiple times would create a confusing result.
|
||||
type Parser struct {
|
||||
files map[string]*hcl.File
|
||||
}
|
||||
|
||||
// NewParser creates a new parser, ready to parse configuration files.
|
||||
func NewParser() *Parser {
|
||||
return &Parser{
|
||||
files: map[string]*hcl.File{},
|
||||
}
|
||||
}
|
||||
|
||||
// ParseHCL parses the given buffer (which is assumed to have been loaded from
|
||||
// the given filename) as a native-syntax configuration file and returns the
|
||||
// hcl.File object representing it.
|
||||
func (p *Parser) ParseHCL(src []byte, filename string) (*hcl.File, hcl.Diagnostics) {
|
||||
if existing := p.files[filename]; existing != nil {
|
||||
return existing, nil
|
||||
}
|
||||
|
||||
file, diags := hclsyntax.ParseConfig(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1})
|
||||
p.files[filename] = file
|
||||
return file, diags
|
||||
}
|
||||
|
||||
// ParseHCLFile reads the given filename and parses it as a native-syntax HCL
|
||||
// configuration file. An error diagnostic is returned if the given file
|
||||
// cannot be read.
|
||||
func (p *Parser) ParseHCLFile(filename string) (*hcl.File, hcl.Diagnostics) {
|
||||
if existing := p.files[filename]; existing != nil {
|
||||
return existing, nil
|
||||
}
|
||||
|
||||
src, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, hcl.Diagnostics{
|
||||
{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Failed to read file",
|
||||
Detail: fmt.Sprintf("The configuration file %q could not be read.", filename),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return p.ParseHCL(src, filename)
|
||||
}
|
||||
|
||||
// ParseJSON parses the given JSON buffer (which is assumed to have been loaded
|
||||
// from the given filename) and returns the hcl.File object representing it.
|
||||
func (p *Parser) ParseJSON(src []byte, filename string) (*hcl.File, hcl.Diagnostics) {
|
||||
if existing := p.files[filename]; existing != nil {
|
||||
return existing, nil
|
||||
}
|
||||
|
||||
file, diags := json.Parse(src, filename)
|
||||
p.files[filename] = file
|
||||
return file, diags
|
||||
}
|
||||
|
||||
// ParseJSONFile reads the given filename and parses it as JSON, similarly to
|
||||
// ParseJSON. An error diagnostic is returned if the given file cannot be read.
|
||||
func (p *Parser) ParseJSONFile(filename string) (*hcl.File, hcl.Diagnostics) {
|
||||
if existing := p.files[filename]; existing != nil {
|
||||
return existing, nil
|
||||
}
|
||||
|
||||
file, diags := json.ParseFile(filename)
|
||||
p.files[filename] = file
|
||||
return file, diags
|
||||
}
|
||||
|
||||
// AddFile allows a caller to record in a parser a file that was parsed some
|
||||
// other way, thus allowing it to be included in the registry of sources.
|
||||
func (p *Parser) AddFile(filename string, file *hcl.File) {
|
||||
p.files[filename] = file
|
||||
}
|
||||
|
||||
// Sources returns a map from filenames to the raw source code that was
|
||||
// read from them. This is intended to be used, for example, to print
|
||||
// diagnostics with contextual information.
|
||||
//
|
||||
// The arrays underlying the returned slices should not be modified.
|
||||
func (p *Parser) Sources() map[string][]byte {
|
||||
ret := make(map[string][]byte)
|
||||
for fn, f := range p.files {
|
||||
ret[fn] = f.Bytes
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Files returns a map from filenames to the File objects produced from them.
|
||||
// This is intended to be used, for example, to print diagnostics with
|
||||
// contextual information.
|
||||
//
|
||||
// The returned map and all of the objects it refers to directly or indirectly
|
||||
// must not be modified.
|
||||
func (p *Parser) Files() map[string]*hcl.File {
|
||||
return p.files
|
||||
}
|
121
vendor/github.com/hashicorp/hcl2/hclwrite/ast.go
generated
vendored
Normal file
121
vendor/github.com/hashicorp/hcl2/hclwrite/ast.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
|||
package hclwrite
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
type File struct {
|
||||
inTree
|
||||
|
||||
srcBytes []byte
|
||||
body *node
|
||||
}
|
||||
|
||||
// NewEmptyFile constructs a new file with no content, ready to be mutated
|
||||
// by other calls that append to its body.
|
||||
func NewEmptyFile() *File {
|
||||
f := &File{
|
||||
inTree: newInTree(),
|
||||
}
|
||||
body := newBody()
|
||||
f.body = f.children.Append(body)
|
||||
return f
|
||||
}
|
||||
|
||||
// Body returns the root body of the file, which contains the top-level
|
||||
// attributes and blocks.
|
||||
func (f *File) Body() *Body {
|
||||
return f.body.content.(*Body)
|
||||
}
|
||||
|
||||
// WriteTo writes the tokens underlying the receiving file to the given writer.
|
||||
//
|
||||
// The tokens first have a simple formatting pass applied that adjusts only
|
||||
// the spaces between them.
|
||||
func (f *File) WriteTo(wr io.Writer) (int64, error) {
|
||||
tokens := f.inTree.children.BuildTokens(nil)
|
||||
format(tokens)
|
||||
return tokens.WriteTo(wr)
|
||||
}
|
||||
|
||||
// Bytes returns a buffer containing the source code resulting from the
|
||||
// tokens underlying the receiving file. If any updates have been made via
|
||||
// the AST API, these will be reflected in the result.
|
||||
func (f *File) Bytes() []byte {
|
||||
buf := &bytes.Buffer{}
|
||||
f.WriteTo(buf)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
type comments struct {
|
||||
leafNode
|
||||
|
||||
parent *node
|
||||
tokens Tokens
|
||||
}
|
||||
|
||||
func newComments(tokens Tokens) *comments {
|
||||
return &comments{
|
||||
tokens: tokens,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *comments) BuildTokens(to Tokens) Tokens {
|
||||
return c.tokens.BuildTokens(to)
|
||||
}
|
||||
|
||||
type identifier struct {
|
||||
leafNode
|
||||
|
||||
parent *node
|
||||
token *Token
|
||||
}
|
||||
|
||||
func newIdentifier(token *Token) *identifier {
|
||||
return &identifier{
|
||||
token: token,
|
||||
}
|
||||
}
|
||||
|
||||
func (i *identifier) BuildTokens(to Tokens) Tokens {
|
||||
return append(to, i.token)
|
||||
}
|
||||
|
||||
func (i *identifier) hasName(name string) bool {
|
||||
return name == string(i.token.Bytes)
|
||||
}
|
||||
|
||||
type number struct {
|
||||
leafNode
|
||||
|
||||
parent *node
|
||||
token *Token
|
||||
}
|
||||
|
||||
func newNumber(token *Token) *number {
|
||||
return &number{
|
||||
token: token,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *number) BuildTokens(to Tokens) Tokens {
|
||||
return append(to, n.token)
|
||||
}
|
||||
|
||||
type quoted struct {
|
||||
leafNode
|
||||
|
||||
parent *node
|
||||
tokens Tokens
|
||||
}
|
||||
|
||||
func newQuoted(tokens Tokens) *quoted {
|
||||
return "ed{
|
||||
tokens: tokens,
|
||||
}
|
||||
}
|
||||
|
||||
func (q *quoted) BuildTokens(to Tokens) Tokens {
|
||||
return q.tokens.BuildTokens(to)
|
||||
}
|
48
vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go
generated
vendored
Normal file
48
vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
package hclwrite
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl2/hcl/hclsyntax"
|
||||
)
|
||||
|
||||
type Attribute struct {
|
||||
inTree
|
||||
|
||||
leadComments *node
|
||||
name *node
|
||||
expr *node
|
||||
lineComments *node
|
||||
}
|
||||
|
||||
func newAttribute() *Attribute {
|
||||
return &Attribute{
|
||||
inTree: newInTree(),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Attribute) init(name string, expr *Expression) {
|
||||
expr.assertUnattached()
|
||||
|
||||
nameTok := newIdentToken(name)
|
||||
nameObj := newIdentifier(nameTok)
|
||||
a.leadComments = a.children.Append(newComments(nil))
|
||||
a.name = a.children.Append(nameObj)
|
||||
a.children.AppendUnstructuredTokens(Tokens{
|
||||
{
|
||||
Type: hclsyntax.TokenEqual,
|
||||
Bytes: []byte{'='},
|
||||
},
|
||||
})
|
||||
a.expr = a.children.Append(expr)
|
||||
a.expr.list = a.children
|
||||
a.lineComments = a.children.Append(newComments(nil))
|
||||
a.children.AppendUnstructuredTokens(Tokens{
|
||||
{
|
||||
Type: hclsyntax.TokenNewline,
|
||||
Bytes: []byte{'\n'},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (a *Attribute) Expr() *Expression {
|
||||
return a.expr.content.(*Expression)
|
||||
}
|
74
vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go
generated
vendored
Normal file
74
vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
package hclwrite
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl2/hcl/hclsyntax"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
type Block struct {
|
||||
inTree
|
||||
|
||||
leadComments *node
|
||||
typeName *node
|
||||
labels nodeSet
|
||||
open *node
|
||||
body *node
|
||||
close *node
|
||||
}
|
||||
|
||||
func newBlock() *Block {
|
||||
return &Block{
|
||||
inTree: newInTree(),
|
||||
labels: newNodeSet(),
|
||||
}
|
||||
}
|
||||
|
||||
// NewBlock constructs a new, empty block with the given type name and labels.
|
||||
func NewBlock(typeName string, labels []string) *Block {
|
||||
block := newBlock()
|
||||
block.init(typeName, labels)
|
||||
return block
|
||||
}
|
||||
|
||||
func (b *Block) init(typeName string, labels []string) {
|
||||
nameTok := newIdentToken(typeName)
|
||||
nameObj := newIdentifier(nameTok)
|
||||
b.leadComments = b.children.Append(newComments(nil))
|
||||
b.typeName = b.children.Append(nameObj)
|
||||
for _, label := range labels {
|
||||
labelToks := TokensForValue(cty.StringVal(label))
|
||||
labelObj := newQuoted(labelToks)
|
||||
labelNode := b.children.Append(labelObj)
|
||||
b.labels.Add(labelNode)
|
||||
}
|
||||
b.open = b.children.AppendUnstructuredTokens(Tokens{
|
||||
{
|
||||
Type: hclsyntax.TokenOBrace,
|
||||
Bytes: []byte{'{'},
|
||||
},
|
||||
{
|
||||
Type: hclsyntax.TokenNewline,
|
||||
Bytes: []byte{'\n'},
|
||||
},
|
||||
})
|
||||
body := newBody() // initially totally empty; caller can append to it subsequently
|
||||
b.body = b.children.Append(body)
|
||||
b.close = b.children.AppendUnstructuredTokens(Tokens{
|
||||
{
|
||||
Type: hclsyntax.TokenCBrace,
|
||||
Bytes: []byte{'}'},
|
||||
},
|
||||
{
|
||||
Type: hclsyntax.TokenNewline,
|
||||
Bytes: []byte{'\n'},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Body returns the body that represents the content of the receiving block.
|
||||
//
|
||||
// Appending to or otherwise modifying this body will make changes to the
|
||||
// tokens that are generated between the blocks open and close braces.
|
||||
func (b *Block) Body() *Body {
|
||||
return b.body.content.(*Body)
|
||||
}
|
153
vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go
generated
vendored
Normal file
153
vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go
generated
vendored
Normal file
|
@ -0,0 +1,153 @@
|
|||
package hclwrite
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
"github.com/hashicorp/hcl2/hcl/hclsyntax"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
type Body struct {
|
||||
inTree
|
||||
|
||||
items nodeSet
|
||||
}
|
||||
|
||||
func newBody() *Body {
|
||||
return &Body{
|
||||
inTree: newInTree(),
|
||||
items: newNodeSet(),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Body) appendItem(c nodeContent) *node {
|
||||
nn := b.children.Append(c)
|
||||
b.items.Add(nn)
|
||||
return nn
|
||||
}
|
||||
|
||||
func (b *Body) appendItemNode(nn *node) *node {
|
||||
nn.assertUnattached()
|
||||
b.children.AppendNode(nn)
|
||||
b.items.Add(nn)
|
||||
return nn
|
||||
}
|
||||
|
||||
// Clear removes all of the items from the body, making it empty.
|
||||
func (b *Body) Clear() {
|
||||
b.children.Clear()
|
||||
}
|
||||
|
||||
func (b *Body) AppendUnstructuredTokens(ts Tokens) {
|
||||
b.inTree.children.Append(ts)
|
||||
}
|
||||
|
||||
// Attributes returns a new map of all of the attributes in the body, with
|
||||
// the attribute names as the keys.
|
||||
func (b *Body) Attributes() map[string]*Attribute {
|
||||
ret := make(map[string]*Attribute)
|
||||
for n := range b.items {
|
||||
if attr, isAttr := n.content.(*Attribute); isAttr {
|
||||
nameObj := attr.name.content.(*identifier)
|
||||
name := string(nameObj.token.Bytes)
|
||||
ret[name] = attr
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Blocks returns a new slice of all the blocks in the body.
|
||||
func (b *Body) Blocks() []*Block {
|
||||
ret := make([]*Block, 0, len(b.items))
|
||||
for n := range b.items {
|
||||
if block, isBlock := n.content.(*Block); isBlock {
|
||||
ret = append(ret, block)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// GetAttribute returns the attribute from the body that has the given name,
|
||||
// or returns nil if there is currently no matching attribute.
|
||||
func (b *Body) GetAttribute(name string) *Attribute {
|
||||
for n := range b.items {
|
||||
if attr, isAttr := n.content.(*Attribute); isAttr {
|
||||
nameObj := attr.name.content.(*identifier)
|
||||
if nameObj.hasName(name) {
|
||||
// We've found it!
|
||||
return attr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAttributeValue either replaces the expression of an existing attribute
|
||||
// of the given name or adds a new attribute definition to the end of the block.
|
||||
//
|
||||
// The value is given as a cty.Value, and must therefore be a literal. To set
|
||||
// a variable reference or other traversal, use SetAttributeTraversal.
|
||||
//
|
||||
// The return value is the attribute that was either modified in-place or
|
||||
// created.
|
||||
func (b *Body) SetAttributeValue(name string, val cty.Value) *Attribute {
|
||||
attr := b.GetAttribute(name)
|
||||
expr := NewExpressionLiteral(val)
|
||||
if attr != nil {
|
||||
attr.expr = attr.expr.ReplaceWith(expr)
|
||||
} else {
|
||||
attr := newAttribute()
|
||||
attr.init(name, expr)
|
||||
b.appendItem(attr)
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
// SetAttributeTraversal either replaces the expression of an existing attribute
|
||||
// of the given name or adds a new attribute definition to the end of the body.
|
||||
//
|
||||
// The new expression is given as a hcl.Traversal, which must be an absolute
|
||||
// traversal. To set a literal value, use SetAttributeValue.
|
||||
//
|
||||
// The return value is the attribute that was either modified in-place or
|
||||
// created.
|
||||
func (b *Body) SetAttributeTraversal(name string, traversal hcl.Traversal) *Attribute {
|
||||
attr := b.GetAttribute(name)
|
||||
expr := NewExpressionAbsTraversal(traversal)
|
||||
if attr != nil {
|
||||
attr.expr = attr.expr.ReplaceWith(expr)
|
||||
} else {
|
||||
attr := newAttribute()
|
||||
attr.init(name, expr)
|
||||
b.appendItem(attr)
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
// AppendBlock appends an existing block (which must not be already attached
|
||||
// to a body) to the end of the receiving body.
|
||||
func (b *Body) AppendBlock(block *Block) *Block {
|
||||
b.appendItem(block)
|
||||
return block
|
||||
}
|
||||
|
||||
// AppendNewBlock appends a new nested block to the end of the receiving body
|
||||
// with the given type name and labels.
|
||||
func (b *Body) AppendNewBlock(typeName string, labels []string) *Block {
|
||||
block := newBlock()
|
||||
block.init(typeName, labels)
|
||||
b.appendItem(block)
|
||||
return block
|
||||
}
|
||||
|
||||
// AppendNewline appends a newline token to th end of the receiving body,
|
||||
// which generally serves as a separator between different sets of body
|
||||
// contents.
|
||||
func (b *Body) AppendNewline() {
|
||||
b.AppendUnstructuredTokens(Tokens{
|
||||
{
|
||||
Type: hclsyntax.TokenNewline,
|
||||
Bytes: []byte{'\n'},
|
||||
},
|
||||
})
|
||||
}
|
201
vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go
generated
vendored
Normal file
201
vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
|||
package hclwrite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
"github.com/hashicorp/hcl2/hcl/hclsyntax"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
type Expression struct {
|
||||
inTree
|
||||
|
||||
absTraversals nodeSet
|
||||
}
|
||||
|
||||
func newExpression() *Expression {
|
||||
return &Expression{
|
||||
inTree: newInTree(),
|
||||
absTraversals: newNodeSet(),
|
||||
}
|
||||
}
|
||||
|
||||
// NewExpressionLiteral constructs an an expression that represents the given
|
||||
// literal value.
|
||||
//
|
||||
// Since an unknown value cannot be represented in source code, this function
|
||||
// will panic if the given value is unknown or contains a nested unknown value.
|
||||
// Use val.IsWhollyKnown before calling to be sure.
|
||||
//
|
||||
// HCL native syntax does not directly represent lists, maps, and sets, and
|
||||
// instead relies on the automatic conversions to those collection types from
|
||||
// either list or tuple constructor syntax. Therefore converting collection
|
||||
// values to source code and re-reading them will lose type information, and
|
||||
// the reader must provide a suitable type at decode time to recover the
|
||||
// original value.
|
||||
func NewExpressionLiteral(val cty.Value) *Expression {
|
||||
toks := TokensForValue(val)
|
||||
expr := newExpression()
|
||||
expr.children.AppendUnstructuredTokens(toks)
|
||||
return expr
|
||||
}
|
||||
|
||||
// NewExpressionAbsTraversal constructs an expression that represents the
|
||||
// given traversal, which must be absolute or this function will panic.
|
||||
func NewExpressionAbsTraversal(traversal hcl.Traversal) *Expression {
|
||||
if traversal.IsRelative() {
|
||||
panic("can't construct expression from relative traversal")
|
||||
}
|
||||
|
||||
physT := newTraversal()
|
||||
rootName := traversal.RootName()
|
||||
steps := traversal[1:]
|
||||
|
||||
{
|
||||
tn := newTraverseName()
|
||||
tn.name = tn.children.Append(newIdentifier(&Token{
|
||||
Type: hclsyntax.TokenIdent,
|
||||
Bytes: []byte(rootName),
|
||||
}))
|
||||
physT.steps.Add(physT.children.Append(tn))
|
||||
}
|
||||
|
||||
for _, step := range steps {
|
||||
switch ts := step.(type) {
|
||||
case hcl.TraverseAttr:
|
||||
tn := newTraverseName()
|
||||
tn.children.AppendUnstructuredTokens(Tokens{
|
||||
{
|
||||
Type: hclsyntax.TokenDot,
|
||||
Bytes: []byte{'.'},
|
||||
},
|
||||
})
|
||||
tn.name = tn.children.Append(newIdentifier(&Token{
|
||||
Type: hclsyntax.TokenIdent,
|
||||
Bytes: []byte(ts.Name),
|
||||
}))
|
||||
physT.steps.Add(physT.children.Append(tn))
|
||||
case hcl.TraverseIndex:
|
||||
ti := newTraverseIndex()
|
||||
ti.children.AppendUnstructuredTokens(Tokens{
|
||||
{
|
||||
Type: hclsyntax.TokenOBrack,
|
||||
Bytes: []byte{'['},
|
||||
},
|
||||
})
|
||||
indexExpr := NewExpressionLiteral(ts.Key)
|
||||
ti.key = ti.children.Append(indexExpr)
|
||||
ti.children.AppendUnstructuredTokens(Tokens{
|
||||
{
|
||||
Type: hclsyntax.TokenCBrack,
|
||||
Bytes: []byte{']'},
|
||||
},
|
||||
})
|
||||
physT.steps.Add(physT.children.Append(ti))
|
||||
}
|
||||
}
|
||||
|
||||
expr := newExpression()
|
||||
expr.absTraversals.Add(expr.children.Append(physT))
|
||||
return expr
|
||||
}
|
||||
|
||||
// Variables returns the absolute traversals that exist within the receiving
|
||||
// expression.
|
||||
func (e *Expression) Variables() []*Traversal {
|
||||
nodes := e.absTraversals.List()
|
||||
ret := make([]*Traversal, len(nodes))
|
||||
for i, node := range nodes {
|
||||
ret[i] = node.content.(*Traversal)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// RenameVariablePrefix examines each of the absolute traversals in the
|
||||
// receiving expression to see if they have the given sequence of names as
|
||||
// a prefix prefix. If so, they are updated in place to have the given
|
||||
// replacement names instead of that prefix.
|
||||
//
|
||||
// This can be used to implement symbol renaming. The calling application can
|
||||
// visit all relevant expressions in its input and apply the same renaming
|
||||
// to implement a global symbol rename.
|
||||
//
|
||||
// The search and replacement traversals must be the same length, or this
|
||||
// method will panic. Only attribute access operations can be matched and
|
||||
// replaced. Index steps never match the prefix.
|
||||
func (e *Expression) RenameVariablePrefix(search, replacement []string) {
|
||||
if len(search) != len(replacement) {
|
||||
panic(fmt.Sprintf("search and replacement length mismatch (%d and %d)", len(search), len(replacement)))
|
||||
}
|
||||
Traversals:
|
||||
for node := range e.absTraversals {
|
||||
traversal := node.content.(*Traversal)
|
||||
if len(traversal.steps) < len(search) {
|
||||
// If it's shorter then it can't have our prefix
|
||||
continue
|
||||
}
|
||||
|
||||
stepNodes := traversal.steps.List()
|
||||
for i, name := range search {
|
||||
step, isName := stepNodes[i].content.(*TraverseName)
|
||||
if !isName {
|
||||
continue Traversals // only name nodes can match
|
||||
}
|
||||
foundNameBytes := step.name.content.(*identifier).token.Bytes
|
||||
if len(foundNameBytes) != len(name) {
|
||||
continue Traversals
|
||||
}
|
||||
if string(foundNameBytes) != name {
|
||||
continue Traversals
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here then the prefix matched, so now we'll swap in
|
||||
// the replacement strings.
|
||||
for i, name := range replacement {
|
||||
step := stepNodes[i].content.(*TraverseName)
|
||||
token := step.name.content.(*identifier).token
|
||||
token.Bytes = []byte(name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Traversal represents a sequence of variable, attribute, and/or index
|
||||
// operations.
|
||||
type Traversal struct {
|
||||
inTree
|
||||
|
||||
steps nodeSet
|
||||
}
|
||||
|
||||
func newTraversal() *Traversal {
|
||||
return &Traversal{
|
||||
inTree: newInTree(),
|
||||
steps: newNodeSet(),
|
||||
}
|
||||
}
|
||||
|
||||
type TraverseName struct {
|
||||
inTree
|
||||
|
||||
name *node
|
||||
}
|
||||
|
||||
func newTraverseName() *TraverseName {
|
||||
return &TraverseName{
|
||||
inTree: newInTree(),
|
||||
}
|
||||
}
|
||||
|
||||
type TraverseIndex struct {
|
||||
inTree
|
||||
|
||||
key *node
|
||||
}
|
||||
|
||||
func newTraverseIndex() *TraverseIndex {
|
||||
return &TraverseIndex{
|
||||
inTree: newInTree(),
|
||||
}
|
||||
}
|
11
vendor/github.com/hashicorp/hcl2/hclwrite/doc.go
generated
vendored
Normal file
11
vendor/github.com/hashicorp/hcl2/hclwrite/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
// Package hclwrite deals with the problem of generating HCL configuration
|
||||
// and of making specific surgical changes to existing HCL configurations.
|
||||
//
|
||||
// It operates at a different level of abstraction than the main HCL parser
|
||||
// and AST, since details such as the placement of comments and newlines
|
||||
// are preserved when unchanged.
|
||||
//
|
||||
// The hclwrite API follows a similar principle to XML/HTML DOM, allowing nodes
|
||||
// to be read out, created and inserted, etc. Nodes represent syntax constructs
|
||||
// rather than semantic concepts.
|
||||
package hclwrite
|
492
vendor/github.com/hashicorp/hcl2/hclwrite/format.go
generated
vendored
Normal file
492
vendor/github.com/hashicorp/hcl2/hclwrite/format.go
generated
vendored
Normal file
|
@ -0,0 +1,492 @@
|
|||
package hclwrite
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl2/hcl/hclsyntax"
|
||||
)
|
||||
|
||||
var inKeyword = hclsyntax.Keyword([]byte{'i', 'n'})
|
||||
|
||||
// placeholder token used when we don't have a token but we don't want
|
||||
// to pass a real "nil" and complicate things with nil pointer checks
|
||||
var nilToken = &Token{
|
||||
Type: hclsyntax.TokenNil,
|
||||
Bytes: []byte{},
|
||||
SpacesBefore: 0,
|
||||
}
|
||||
|
||||
// format rewrites tokens within the given sequence, in-place, to adjust the
|
||||
// whitespace around their content to achieve canonical formatting.
|
||||
func format(tokens Tokens) {
|
||||
// Formatting is a multi-pass process. More details on the passes below,
|
||||
// but this is the overview:
|
||||
// - adjust the leading space on each line to create appropriate
|
||||
// indentation
|
||||
// - adjust spaces between tokens in a single cell using a set of rules
|
||||
// - adjust the leading space in the "assign" and "comment" cells on each
|
||||
// line to vertically align with neighboring lines.
|
||||
// All of these steps operate in-place on the given tokens, so a caller
|
||||
// may collect a flat sequence of all of the tokens underlying an AST
|
||||
// and pass it here and we will then indirectly modify the AST itself.
|
||||
// Formatting must change only whitespace. Specifically, that means
|
||||
// changing the SpacesBefore attribute on a token while leaving the
|
||||
// other token attributes unchanged.
|
||||
|
||||
lines := linesForFormat(tokens)
|
||||
formatIndent(lines)
|
||||
formatSpaces(lines)
|
||||
formatCells(lines)
|
||||
}
|
||||
|
||||
func formatIndent(lines []formatLine) {
|
||||
// Our methodology for indents is to take the input one line at a time
|
||||
// and count the bracketing delimiters on each line. If a line has a net
|
||||
// increase in open brackets, we increase the indent level by one and
|
||||
// remember how many new openers we had. If the line has a net _decrease_,
|
||||
// we'll compare it to the most recent number of openers and decrease the
|
||||
// dedent level by one each time we pass an indent level remembered
|
||||
// earlier.
|
||||
// The "indent stack" used here allows for us to recognize degenerate
|
||||
// input where brackets are not symmetrical within lines and avoid
|
||||
// pushing things too far left or right, creating confusion.
|
||||
|
||||
// We'll start our indent stack at a reasonable capacity to minimize the
|
||||
// chance of us needing to grow it; 10 here means 10 levels of indent,
|
||||
// which should be more than enough for reasonable HCL uses.
|
||||
indents := make([]int, 0, 10)
|
||||
|
||||
inHeredoc := false
|
||||
for i := range lines {
|
||||
line := &lines[i]
|
||||
if len(line.lead) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if inHeredoc {
|
||||
for _, token := range line.lead {
|
||||
if token.Type == hclsyntax.TokenCHeredoc {
|
||||
inHeredoc = false
|
||||
}
|
||||
}
|
||||
continue // don't touch indentation inside heredocs
|
||||
}
|
||||
|
||||
if line.lead[0].Type == hclsyntax.TokenNewline {
|
||||
// Never place spaces before a newline
|
||||
line.lead[0].SpacesBefore = 0
|
||||
continue
|
||||
}
|
||||
|
||||
netBrackets := 0
|
||||
for _, token := range line.lead {
|
||||
netBrackets += tokenBracketChange(token)
|
||||
}
|
||||
for _, token := range line.assign {
|
||||
netBrackets += tokenBracketChange(token)
|
||||
if token.Type == hclsyntax.TokenOHeredoc {
|
||||
inHeredoc = true
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case netBrackets > 0:
|
||||
line.lead[0].SpacesBefore = 2 * len(indents)
|
||||
indents = append(indents, netBrackets)
|
||||
case netBrackets < 0:
|
||||
closed := -netBrackets
|
||||
for closed > 0 && len(indents) > 0 {
|
||||
switch {
|
||||
|
||||
case closed > indents[len(indents)-1]:
|
||||
closed -= indents[len(indents)-1]
|
||||
indents = indents[:len(indents)-1]
|
||||
|
||||
case closed < indents[len(indents)-1]:
|
||||
indents[len(indents)-1] -= closed
|
||||
closed = 0
|
||||
|
||||
default:
|
||||
indents = indents[:len(indents)-1]
|
||||
closed = 0
|
||||
}
|
||||
}
|
||||
line.lead[0].SpacesBefore = 2 * len(indents)
|
||||
default:
|
||||
line.lead[0].SpacesBefore = 2 * len(indents)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func formatSpaces(lines []formatLine) {
|
||||
for _, line := range lines {
|
||||
for i, token := range line.lead {
|
||||
var before, after *Token
|
||||
if i > 0 {
|
||||
before = line.lead[i-1]
|
||||
} else {
|
||||
before = nilToken
|
||||
}
|
||||
if i < (len(line.lead) - 1) {
|
||||
after = line.lead[i+1]
|
||||
} else {
|
||||
after = nilToken
|
||||
}
|
||||
if spaceAfterToken(token, before, after) {
|
||||
after.SpacesBefore = 1
|
||||
} else {
|
||||
after.SpacesBefore = 0
|
||||
}
|
||||
}
|
||||
for i, token := range line.assign {
|
||||
if i == 0 {
|
||||
// first token in "assign" always has one space before to
|
||||
// separate the equals sign from what it's assigning.
|
||||
token.SpacesBefore = 1
|
||||
}
|
||||
|
||||
var before, after *Token
|
||||
if i > 0 {
|
||||
before = line.assign[i-1]
|
||||
} else {
|
||||
before = nilToken
|
||||
}
|
||||
if i < (len(line.assign) - 1) {
|
||||
after = line.assign[i+1]
|
||||
} else {
|
||||
after = nilToken
|
||||
}
|
||||
if spaceAfterToken(token, before, after) {
|
||||
after.SpacesBefore = 1
|
||||
} else {
|
||||
after.SpacesBefore = 0
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func formatCells(lines []formatLine) {
|
||||
|
||||
chainStart := -1
|
||||
maxColumns := 0
|
||||
|
||||
// We'll deal with the "assign" cell first, since moving that will
|
||||
// also impact the "comment" cell.
|
||||
closeAssignChain := func(i int) {
|
||||
for _, chainLine := range lines[chainStart:i] {
|
||||
columns := chainLine.lead.Columns()
|
||||
spaces := (maxColumns - columns) + 1
|
||||
chainLine.assign[0].SpacesBefore = spaces
|
||||
}
|
||||
chainStart = -1
|
||||
maxColumns = 0
|
||||
}
|
||||
for i, line := range lines {
|
||||
if line.assign == nil {
|
||||
if chainStart != -1 {
|
||||
closeAssignChain(i)
|
||||
}
|
||||
} else {
|
||||
if chainStart == -1 {
|
||||
chainStart = i
|
||||
}
|
||||
columns := line.lead.Columns()
|
||||
if columns > maxColumns {
|
||||
maxColumns = columns
|
||||
}
|
||||
}
|
||||
}
|
||||
if chainStart != -1 {
|
||||
closeAssignChain(len(lines))
|
||||
}
|
||||
|
||||
// Now we'll deal with the comments
|
||||
closeCommentChain := func(i int) {
|
||||
for _, chainLine := range lines[chainStart:i] {
|
||||
columns := chainLine.lead.Columns() + chainLine.assign.Columns()
|
||||
spaces := (maxColumns - columns) + 1
|
||||
chainLine.comment[0].SpacesBefore = spaces
|
||||
}
|
||||
chainStart = -1
|
||||
maxColumns = 0
|
||||
}
|
||||
for i, line := range lines {
|
||||
if line.comment == nil {
|
||||
if chainStart != -1 {
|
||||
closeCommentChain(i)
|
||||
}
|
||||
} else {
|
||||
if chainStart == -1 {
|
||||
chainStart = i
|
||||
}
|
||||
columns := line.lead.Columns() + line.assign.Columns()
|
||||
if columns > maxColumns {
|
||||
maxColumns = columns
|
||||
}
|
||||
}
|
||||
}
|
||||
if chainStart != -1 {
|
||||
closeCommentChain(len(lines))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// spaceAfterToken decides whether a particular subject token should have a
|
||||
// space after it when surrounded by the given before and after tokens.
|
||||
// "before" can be TokenNil, if the subject token is at the start of a sequence.
|
||||
func spaceAfterToken(subject, before, after *Token) bool {
|
||||
switch {
|
||||
|
||||
case after.Type == hclsyntax.TokenNewline || after.Type == hclsyntax.TokenNil:
|
||||
// Never add spaces before a newline
|
||||
return false
|
||||
|
||||
case subject.Type == hclsyntax.TokenIdent && after.Type == hclsyntax.TokenOParen:
|
||||
// Don't split a function name from open paren in a call
|
||||
return false
|
||||
|
||||
case subject.Type == hclsyntax.TokenDot || after.Type == hclsyntax.TokenDot:
|
||||
// Don't use spaces around attribute access dots
|
||||
return false
|
||||
|
||||
case after.Type == hclsyntax.TokenComma:
|
||||
// No space right before a comma in an argument list
|
||||
return false
|
||||
|
||||
case subject.Type == hclsyntax.TokenComma:
|
||||
// Always a space after a comma
|
||||
return true
|
||||
|
||||
case subject.Type == hclsyntax.TokenQuotedLit || subject.Type == hclsyntax.TokenStringLit || subject.Type == hclsyntax.TokenOQuote || subject.Type == hclsyntax.TokenOHeredoc || after.Type == hclsyntax.TokenQuotedLit || after.Type == hclsyntax.TokenStringLit || after.Type == hclsyntax.TokenCQuote || after.Type == hclsyntax.TokenCHeredoc:
|
||||
// No extra spaces within templates
|
||||
return false
|
||||
|
||||
case inKeyword.TokenMatches(subject.asHCLSyntax()) && before.Type == hclsyntax.TokenIdent:
|
||||
// This is a special case for inside for expressions where a user
|
||||
// might want to use a literal tuple constructor:
|
||||
// [for x in [foo]: x]
|
||||
// ... in that case, we would normally produce in[foo] thinking that
|
||||
// in is a reference, but we'll recognize it as a keyword here instead
|
||||
// to make the result less confusing.
|
||||
return true
|
||||
|
||||
case after.Type == hclsyntax.TokenOBrack && (subject.Type == hclsyntax.TokenIdent || subject.Type == hclsyntax.TokenNumberLit || tokenBracketChange(subject) < 0):
|
||||
return false
|
||||
|
||||
case subject.Type == hclsyntax.TokenMinus:
|
||||
// Since a minus can either be subtraction or negation, and the latter
|
||||
// should _not_ have a space after it, we need to use some heuristics
|
||||
// to decide which case this is.
|
||||
// We guess that we have a negation if the token before doesn't look
|
||||
// like it could be the end of an expression.
|
||||
|
||||
switch before.Type {
|
||||
|
||||
case hclsyntax.TokenNil:
|
||||
// Minus at the start of input must be a negation
|
||||
return false
|
||||
|
||||
case hclsyntax.TokenOParen, hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenEqual, hclsyntax.TokenColon, hclsyntax.TokenComma, hclsyntax.TokenQuestion:
|
||||
// Minus immediately after an opening bracket or separator must be a negation.
|
||||
return false
|
||||
|
||||
case hclsyntax.TokenPlus, hclsyntax.TokenStar, hclsyntax.TokenSlash, hclsyntax.TokenPercent, hclsyntax.TokenMinus:
|
||||
// Minus immediately after another arithmetic operator must be negation.
|
||||
return false
|
||||
|
||||
case hclsyntax.TokenEqualOp, hclsyntax.TokenNotEqual, hclsyntax.TokenGreaterThan, hclsyntax.TokenGreaterThanEq, hclsyntax.TokenLessThan, hclsyntax.TokenLessThanEq:
|
||||
// Minus immediately after another comparison operator must be negation.
|
||||
return false
|
||||
|
||||
case hclsyntax.TokenAnd, hclsyntax.TokenOr, hclsyntax.TokenBang:
|
||||
// Minus immediately after logical operator doesn't make sense but probably intended as negation.
|
||||
return false
|
||||
|
||||
default:
|
||||
return true
|
||||
}
|
||||
|
||||
case subject.Type == hclsyntax.TokenOBrace || after.Type == hclsyntax.TokenCBrace:
|
||||
// Unlike other bracket types, braces have spaces on both sides of them,
|
||||
// both in single-line nested blocks foo { bar = baz } and in object
|
||||
// constructor expressions foo = { bar = baz }.
|
||||
if subject.Type == hclsyntax.TokenOBrace && after.Type == hclsyntax.TokenCBrace {
|
||||
// An open brace followed by a close brace is an exception, however.
|
||||
// e.g. foo {} rather than foo { }
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
// In the unlikely event that an interpolation expression is just
|
||||
// a single object constructor, we'll put a space between the ${ and
|
||||
// the following { to make this more obvious, and then the same
|
||||
// thing for the two braces at the end.
|
||||
case (subject.Type == hclsyntax.TokenTemplateInterp || subject.Type == hclsyntax.TokenTemplateControl) && after.Type == hclsyntax.TokenOBrace:
|
||||
return true
|
||||
case subject.Type == hclsyntax.TokenCBrace && after.Type == hclsyntax.TokenTemplateSeqEnd:
|
||||
return true
|
||||
|
||||
// Don't add spaces between interpolated items
|
||||
case subject.Type == hclsyntax.TokenTemplateSeqEnd && after.Type == hclsyntax.TokenTemplateInterp:
|
||||
return false
|
||||
|
||||
case tokenBracketChange(subject) > 0:
|
||||
// No spaces after open brackets
|
||||
return false
|
||||
|
||||
case tokenBracketChange(after) < 0:
|
||||
// No spaces before close brackets
|
||||
return false
|
||||
|
||||
default:
|
||||
// Most tokens are space-separated
|
||||
return true
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func linesForFormat(tokens Tokens) []formatLine {
|
||||
if len(tokens) == 0 {
|
||||
return make([]formatLine, 0)
|
||||
}
|
||||
|
||||
// first we'll count our lines, so we can allocate the array for them in
|
||||
// a single block. (We want to minimize memory pressure in this codepath,
|
||||
// so it can be run somewhat-frequently by editor integrations.)
|
||||
lineCount := 1 // if there are zero newlines then there is one line
|
||||
for _, tok := range tokens {
|
||||
if tokenIsNewline(tok) {
|
||||
lineCount++
|
||||
}
|
||||
}
|
||||
|
||||
// To start, we'll just put everything in the "lead" cell on each line,
|
||||
// and then do another pass over the lines afterwards to adjust.
|
||||
lines := make([]formatLine, lineCount)
|
||||
li := 0
|
||||
lineStart := 0
|
||||
for i, tok := range tokens {
|
||||
if tok.Type == hclsyntax.TokenEOF {
|
||||
// The EOF token doesn't belong to any line, and terminates the
|
||||
// token sequence.
|
||||
lines[li].lead = tokens[lineStart:i]
|
||||
break
|
||||
}
|
||||
|
||||
if tokenIsNewline(tok) {
|
||||
lines[li].lead = tokens[lineStart : i+1]
|
||||
lineStart = i + 1
|
||||
li++
|
||||
}
|
||||
}
|
||||
|
||||
// If a set of tokens doesn't end in TokenEOF (e.g. because it's a
|
||||
// fragment of tokens from the middle of a file) then we might fall
|
||||
// out here with a line still pending.
|
||||
if lineStart < len(tokens) {
|
||||
lines[li].lead = tokens[lineStart:]
|
||||
if lines[li].lead[len(lines[li].lead)-1].Type == hclsyntax.TokenEOF {
|
||||
lines[li].lead = lines[li].lead[:len(lines[li].lead)-1]
|
||||
}
|
||||
}
|
||||
|
||||
// Now we'll pick off any trailing comments and attribute assignments
|
||||
// to shuffle off into the "comment" and "assign" cells.
|
||||
inHeredoc := false
|
||||
for i := range lines {
|
||||
line := &lines[i]
|
||||
if len(line.lead) == 0 {
|
||||
// if the line is empty then there's nothing for us to do
|
||||
// (this should happen only for the final line, because all other
|
||||
// lines would have a newline token of some kind)
|
||||
continue
|
||||
}
|
||||
|
||||
if inHeredoc {
|
||||
for _, tok := range line.lead {
|
||||
if tok.Type == hclsyntax.TokenCHeredoc {
|
||||
inHeredoc = false
|
||||
break
|
||||
}
|
||||
}
|
||||
// Inside a heredoc everything is "lead", even if there's a
|
||||
// template interpolation embedded in there that might otherwise
|
||||
// confuse our logic below.
|
||||
continue
|
||||
}
|
||||
|
||||
for _, tok := range line.lead {
|
||||
if tok.Type == hclsyntax.TokenOHeredoc {
|
||||
inHeredoc = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(line.lead) > 1 && line.lead[len(line.lead)-1].Type == hclsyntax.TokenComment {
|
||||
line.comment = line.lead[len(line.lead)-1:]
|
||||
line.lead = line.lead[:len(line.lead)-1]
|
||||
}
|
||||
|
||||
for i, tok := range line.lead {
|
||||
if i > 0 && tok.Type == hclsyntax.TokenEqual {
|
||||
// We only move the tokens into "assign" if the RHS seems to
|
||||
// be a whole expression, which we determine by counting
|
||||
// brackets. If there's a net positive number of brackets
|
||||
// then that suggests we're introducing a multi-line expression.
|
||||
netBrackets := 0
|
||||
for _, token := range line.lead[i:] {
|
||||
netBrackets += tokenBracketChange(token)
|
||||
}
|
||||
|
||||
if netBrackets == 0 {
|
||||
line.assign = line.lead[i:]
|
||||
line.lead = line.lead[:i]
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return lines
|
||||
}
|
||||
|
||||
func tokenIsNewline(tok *Token) bool {
|
||||
if tok.Type == hclsyntax.TokenNewline {
|
||||
return true
|
||||
} else if tok.Type == hclsyntax.TokenComment {
|
||||
// Single line tokens (# and //) consume their terminating newline,
|
||||
// so we need to treat them as newline tokens as well.
|
||||
if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func tokenBracketChange(tok *Token) int {
|
||||
switch tok.Type {
|
||||
case hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenOParen, hclsyntax.TokenTemplateControl, hclsyntax.TokenTemplateInterp:
|
||||
return 1
|
||||
case hclsyntax.TokenCBrace, hclsyntax.TokenCBrack, hclsyntax.TokenCParen, hclsyntax.TokenTemplateSeqEnd:
|
||||
return -1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// formatLine represents a single line of source code for formatting purposes,
|
||||
// splitting its tokens into up to three "cells":
|
||||
//
|
||||
// lead: always present, representing everything up to one of the others
|
||||
// assign: if line contains an attribute assignment, represents the tokens
|
||||
// starting at (and including) the equals symbol
|
||||
// comment: if line contains any non-comment tokens and ends with a
|
||||
// single-line comment token, represents the comment.
|
||||
//
|
||||
// When formatting, the leading spaces of the first tokens in each of these
|
||||
// cells is adjusted to align vertically their occurences on consecutive
|
||||
// rows.
|
||||
type formatLine struct {
|
||||
lead Tokens
|
||||
assign Tokens
|
||||
comment Tokens
|
||||
}
|
250
vendor/github.com/hashicorp/hcl2/hclwrite/generate.go
generated
vendored
Normal file
250
vendor/github.com/hashicorp/hcl2/hclwrite/generate.go
generated
vendored
Normal file
|
@ -0,0 +1,250 @@
|
|||
package hclwrite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
"github.com/hashicorp/hcl2/hcl/hclsyntax"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// TokensForValue returns a sequence of tokens that represents the given
|
||||
// constant value.
|
||||
//
|
||||
// This function only supports types that are used by HCL. In particular, it
|
||||
// does not support capsule types and will panic if given one.
|
||||
//
|
||||
// It is not possible to express an unknown value in source code, so this
|
||||
// function will panic if the given value is unknown or contains any unknown
|
||||
// values. A caller can call the value's IsWhollyKnown method to verify that
|
||||
// no unknown values are present before calling TokensForValue.
|
||||
func TokensForValue(val cty.Value) Tokens {
|
||||
toks := appendTokensForValue(val, nil)
|
||||
format(toks) // fiddle with the SpacesBefore field to get canonical spacing
|
||||
return toks
|
||||
}
|
||||
|
||||
// TokensForTraversal returns a sequence of tokens that represents the given
|
||||
// traversal.
|
||||
//
|
||||
// If the traversal is absolute then the result is a self-contained, valid
|
||||
// reference expression. If the traversal is relative then the returned tokens
|
||||
// could be appended to some other expression tokens to traverse into the
|
||||
// represented expression.
|
||||
func TokensForTraversal(traversal hcl.Traversal) Tokens {
|
||||
toks := appendTokensForTraversal(traversal, nil)
|
||||
format(toks) // fiddle with the SpacesBefore field to get canonical spacing
|
||||
return toks
|
||||
}
|
||||
|
||||
func appendTokensForValue(val cty.Value, toks Tokens) Tokens {
|
||||
switch {
|
||||
|
||||
case !val.IsKnown():
|
||||
panic("cannot produce tokens for unknown value")
|
||||
|
||||
case val.IsNull():
|
||||
toks = append(toks, &Token{
|
||||
Type: hclsyntax.TokenIdent,
|
||||
Bytes: []byte(`null`),
|
||||
})
|
||||
|
||||
case val.Type() == cty.Bool:
|
||||
var src []byte
|
||||
if val.True() {
|
||||
src = []byte(`true`)
|
||||
} else {
|
||||
src = []byte(`false`)
|
||||
}
|
||||
toks = append(toks, &Token{
|
||||
Type: hclsyntax.TokenIdent,
|
||||
Bytes: src,
|
||||
})
|
||||
|
||||
case val.Type() == cty.Number:
|
||||
bf := val.AsBigFloat()
|
||||
srcStr := bf.Text('f', -1)
|
||||
toks = append(toks, &Token{
|
||||
Type: hclsyntax.TokenNumberLit,
|
||||
Bytes: []byte(srcStr),
|
||||
})
|
||||
|
||||
case val.Type() == cty.String:
|
||||
// TODO: If it's a multi-line string ending in a newline, format
|
||||
// it as a HEREDOC instead.
|
||||
src := escapeQuotedStringLit(val.AsString())
|
||||
toks = append(toks, &Token{
|
||||
Type: hclsyntax.TokenOQuote,
|
||||
Bytes: []byte{'"'},
|
||||
})
|
||||
if len(src) > 0 {
|
||||
toks = append(toks, &Token{
|
||||
Type: hclsyntax.TokenQuotedLit,
|
||||
Bytes: src,
|
||||
})
|
||||
}
|
||||
toks = append(toks, &Token{
|
||||
Type: hclsyntax.TokenCQuote,
|
||||
Bytes: []byte{'"'},
|
||||
})
|
||||
|
||||
case val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType():
|
||||
toks = append(toks, &Token{
|
||||
Type: hclsyntax.TokenOBrack,
|
||||
Bytes: []byte{'['},
|
||||
})
|
||||
|
||||
i := 0
|
||||
for it := val.ElementIterator(); it.Next(); {
|
||||
if i > 0 {
|
||||
toks = append(toks, &Token{
|
||||
Type: hclsyntax.TokenComma,
|
||||
Bytes: []byte{','},
|
||||
})
|
||||
}
|
||||
_, eVal := it.Element()
|
||||
toks = appendTokensForValue(eVal, toks)
|
||||
i++
|
||||
}
|
||||
|
||||
toks = append(toks, &Token{
|
||||
Type: hclsyntax.TokenCBrack,
|
||||
Bytes: []byte{']'},
|
||||
})
|
||||
|
||||
case val.Type().IsMapType() || val.Type().IsObjectType():
|
||||
toks = append(toks, &Token{
|
||||
Type: hclsyntax.TokenOBrace,
|
||||
Bytes: []byte{'{'},
|
||||
})
|
||||
|
||||
i := 0
|
||||
for it := val.ElementIterator(); it.Next(); {
|
||||
if i > 0 {
|
||||
toks = append(toks, &Token{
|
||||
Type: hclsyntax.TokenComma,
|
||||
Bytes: []byte{','},
|
||||
})
|
||||
}
|
||||
eKey, eVal := it.Element()
|
||||
if hclsyntax.ValidIdentifier(eKey.AsString()) {
|
||||
toks = append(toks, &Token{
|
||||
Type: hclsyntax.TokenIdent,
|
||||
Bytes: []byte(eKey.AsString()),
|
||||
})
|
||||
} else {
|
||||
toks = appendTokensForValue(eKey, toks)
|
||||
}
|
||||
toks = append(toks, &Token{
|
||||
Type: hclsyntax.TokenEqual,
|
||||
Bytes: []byte{'='},
|
||||
})
|
||||
toks = appendTokensForValue(eVal, toks)
|
||||
i++
|
||||
}
|
||||
|
||||
toks = append(toks, &Token{
|
||||
Type: hclsyntax.TokenCBrace,
|
||||
Bytes: []byte{'}'},
|
||||
})
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("cannot produce tokens for %#v", val))
|
||||
}
|
||||
|
||||
return toks
|
||||
}
|
||||
|
||||
func appendTokensForTraversal(traversal hcl.Traversal, toks Tokens) Tokens {
|
||||
for _, step := range traversal {
|
||||
appendTokensForTraversalStep(step, toks)
|
||||
}
|
||||
return toks
|
||||
}
|
||||
|
||||
func appendTokensForTraversalStep(step hcl.Traverser, toks Tokens) {
|
||||
switch ts := step.(type) {
|
||||
case hcl.TraverseRoot:
|
||||
toks = append(toks, &Token{
|
||||
Type: hclsyntax.TokenIdent,
|
||||
Bytes: []byte(ts.Name),
|
||||
})
|
||||
case hcl.TraverseAttr:
|
||||
toks = append(
|
||||
toks,
|
||||
&Token{
|
||||
Type: hclsyntax.TokenDot,
|
||||
Bytes: []byte{'.'},
|
||||
},
|
||||
&Token{
|
||||
Type: hclsyntax.TokenIdent,
|
||||
Bytes: []byte(ts.Name),
|
||||
},
|
||||
)
|
||||
case hcl.TraverseIndex:
|
||||
toks = append(toks, &Token{
|
||||
Type: hclsyntax.TokenOBrack,
|
||||
Bytes: []byte{'['},
|
||||
})
|
||||
appendTokensForValue(ts.Key, toks)
|
||||
toks = append(toks, &Token{
|
||||
Type: hclsyntax.TokenCBrack,
|
||||
Bytes: []byte{']'},
|
||||
})
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported traversal step type %T", step))
|
||||
}
|
||||
}
|
||||
|
||||
func escapeQuotedStringLit(s string) []byte {
|
||||
if len(s) == 0 {
|
||||
return nil
|
||||
}
|
||||
buf := make([]byte, 0, len(s))
|
||||
for i, r := range s {
|
||||
switch r {
|
||||
case '\n':
|
||||
buf = append(buf, '\\', 'n')
|
||||
case '\r':
|
||||
buf = append(buf, '\\', 'r')
|
||||
case '\t':
|
||||
buf = append(buf, '\\', 't')
|
||||
case '"':
|
||||
buf = append(buf, '\\', '"')
|
||||
case '\\':
|
||||
buf = append(buf, '\\', '\\')
|
||||
case '$', '%':
|
||||
buf = appendRune(buf, r)
|
||||
remain := s[i+1:]
|
||||
if len(remain) > 0 && remain[0] == '{' {
|
||||
// Double up our template introducer symbol to escape it.
|
||||
buf = appendRune(buf, r)
|
||||
}
|
||||
default:
|
||||
if !unicode.IsPrint(r) {
|
||||
var fmted string
|
||||
if r < 65536 {
|
||||
fmted = fmt.Sprintf("\\u%04x", r)
|
||||
} else {
|
||||
fmted = fmt.Sprintf("\\U%08x", r)
|
||||
}
|
||||
buf = append(buf, fmted...)
|
||||
} else {
|
||||
buf = appendRune(buf, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
func appendRune(b []byte, r rune) []byte {
|
||||
l := utf8.RuneLen(r)
|
||||
for i := 0; i < l; i++ {
|
||||
b = append(b, 0) // make room at the end of our buffer
|
||||
}
|
||||
ch := b[len(b)-l:]
|
||||
utf8.EncodeRune(ch, r)
|
||||
return b
|
||||
}
|
23
vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go
generated
vendored
Normal file
23
vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
package hclwrite
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl2/hcl/hclsyntax"
|
||||
)
|
||||
|
||||
type nativeNodeSorter struct {
|
||||
Nodes []hclsyntax.Node
|
||||
}
|
||||
|
||||
func (s nativeNodeSorter) Len() int {
|
||||
return len(s.Nodes)
|
||||
}
|
||||
|
||||
func (s nativeNodeSorter) Less(i, j int) bool {
|
||||
rangeI := s.Nodes[i].Range()
|
||||
rangeJ := s.Nodes[j].Range()
|
||||
return rangeI.Start.Byte < rangeJ.Start.Byte
|
||||
}
|
||||
|
||||
func (s nativeNodeSorter) Swap(i, j int) {
|
||||
s.Nodes[i], s.Nodes[j] = s.Nodes[j], s.Nodes[i]
|
||||
}
|
236
vendor/github.com/hashicorp/hcl2/hclwrite/node.go
generated
vendored
Normal file
236
vendor/github.com/hashicorp/hcl2/hclwrite/node.go
generated
vendored
Normal file
|
@ -0,0 +1,236 @@
|
|||
package hclwrite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
// node represents a node in the AST.
|
||||
type node struct {
|
||||
content nodeContent
|
||||
|
||||
list *nodes
|
||||
before, after *node
|
||||
}
|
||||
|
||||
func newNode(c nodeContent) *node {
|
||||
return &node{
|
||||
content: c,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *node) Equal(other *node) bool {
|
||||
return cmp.Equal(n.content, other.content)
|
||||
}
|
||||
|
||||
func (n *node) BuildTokens(to Tokens) Tokens {
|
||||
return n.content.BuildTokens(to)
|
||||
}
|
||||
|
||||
// Detach removes the receiver from the list it currently belongs to. If the
|
||||
// node is not currently in a list, this is a no-op.
|
||||
func (n *node) Detach() {
|
||||
if n.list == nil {
|
||||
return
|
||||
}
|
||||
if n.before != nil {
|
||||
n.before.after = n.after
|
||||
}
|
||||
if n.after != nil {
|
||||
n.after.before = n.before
|
||||
}
|
||||
if n.list.first == n {
|
||||
n.list.first = n.after
|
||||
}
|
||||
if n.list.last == n {
|
||||
n.list.last = n.before
|
||||
}
|
||||
n.list = nil
|
||||
n.before = nil
|
||||
n.after = nil
|
||||
}
|
||||
|
||||
// ReplaceWith removes the receiver from the list it currently belongs to and
|
||||
// inserts a new node with the given content in its place. If the node is not
|
||||
// currently in a list, this function will panic.
|
||||
//
|
||||
// The return value is the newly-constructed node, containing the given content.
|
||||
// After this function returns, the reciever is no longer attached to a list.
|
||||
func (n *node) ReplaceWith(c nodeContent) *node {
|
||||
if n.list == nil {
|
||||
panic("can't replace node that is not in a list")
|
||||
}
|
||||
|
||||
before := n.before
|
||||
after := n.after
|
||||
list := n.list
|
||||
n.before, n.after, n.list = nil, nil, nil
|
||||
|
||||
nn := newNode(c)
|
||||
nn.before = before
|
||||
nn.after = after
|
||||
nn.list = list
|
||||
if before != nil {
|
||||
before.after = nn
|
||||
}
|
||||
if after != nil {
|
||||
after.before = nn
|
||||
}
|
||||
return nn
|
||||
}
|
||||
|
||||
func (n *node) assertUnattached() {
|
||||
if n.list != nil {
|
||||
panic(fmt.Sprintf("attempt to attach already-attached node %#v", n))
|
||||
}
|
||||
}
|
||||
|
||||
// nodeContent is the interface type implemented by all AST content types.
|
||||
type nodeContent interface {
|
||||
walkChildNodes(w internalWalkFunc)
|
||||
BuildTokens(to Tokens) Tokens
|
||||
}
|
||||
|
||||
// nodes is a list of nodes.
|
||||
type nodes struct {
|
||||
first, last *node
|
||||
}
|
||||
|
||||
func (ns *nodes) BuildTokens(to Tokens) Tokens {
|
||||
for n := ns.first; n != nil; n = n.after {
|
||||
to = n.BuildTokens(to)
|
||||
}
|
||||
return to
|
||||
}
|
||||
|
||||
func (ns *nodes) Clear() {
|
||||
ns.first = nil
|
||||
ns.last = nil
|
||||
}
|
||||
|
||||
func (ns *nodes) Append(c nodeContent) *node {
|
||||
n := &node{
|
||||
content: c,
|
||||
}
|
||||
ns.AppendNode(n)
|
||||
n.list = ns
|
||||
return n
|
||||
}
|
||||
|
||||
func (ns *nodes) AppendNode(n *node) {
|
||||
if ns.last != nil {
|
||||
n.before = ns.last
|
||||
ns.last.after = n
|
||||
}
|
||||
n.list = ns
|
||||
ns.last = n
|
||||
if ns.first == nil {
|
||||
ns.first = n
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *nodes) AppendUnstructuredTokens(tokens Tokens) *node {
|
||||
if len(tokens) == 0 {
|
||||
return nil
|
||||
}
|
||||
n := newNode(tokens)
|
||||
ns.AppendNode(n)
|
||||
n.list = ns
|
||||
return n
|
||||
}
|
||||
|
||||
// nodeSet is an unordered set of nodes. It is used to describe a set of nodes
|
||||
// that all belong to the same list that have some role or characteristic
|
||||
// in common.
|
||||
type nodeSet map[*node]struct{}
|
||||
|
||||
func newNodeSet() nodeSet {
|
||||
return make(nodeSet)
|
||||
}
|
||||
|
||||
func (ns nodeSet) Has(n *node) bool {
|
||||
if ns == nil {
|
||||
return false
|
||||
}
|
||||
_, exists := ns[n]
|
||||
return exists
|
||||
}
|
||||
|
||||
func (ns nodeSet) Add(n *node) {
|
||||
ns[n] = struct{}{}
|
||||
}
|
||||
|
||||
func (ns nodeSet) Remove(n *node) {
|
||||
delete(ns, n)
|
||||
}
|
||||
|
||||
func (ns nodeSet) List() []*node {
|
||||
if len(ns) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ret := make([]*node, 0, len(ns))
|
||||
|
||||
// Determine which list we are working with. We assume here that all of
|
||||
// the nodes belong to the same list, since that is part of the contract
|
||||
// for nodeSet.
|
||||
var list *nodes
|
||||
for n := range ns {
|
||||
list = n.list
|
||||
break
|
||||
}
|
||||
|
||||
// We recover the order by iterating over the whole list. This is not
|
||||
// the most efficient way to do it, but our node lists should always be
|
||||
// small so not worth making things more complex.
|
||||
for n := list.first; n != nil; n = n.after {
|
||||
if ns.Has(n) {
|
||||
ret = append(ret, n)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
type internalWalkFunc func(*node)
|
||||
|
||||
// inTree can be embedded into a content struct that has child nodes to get
|
||||
// a standard implementation of the NodeContent interface and a record of
|
||||
// a potential parent node.
|
||||
type inTree struct {
|
||||
parent *node
|
||||
children *nodes
|
||||
}
|
||||
|
||||
func newInTree() inTree {
|
||||
return inTree{
|
||||
children: &nodes{},
|
||||
}
|
||||
}
|
||||
|
||||
func (it *inTree) assertUnattached() {
|
||||
if it.parent != nil {
|
||||
panic(fmt.Sprintf("node is already attached to %T", it.parent.content))
|
||||
}
|
||||
}
|
||||
|
||||
func (it *inTree) walkChildNodes(w internalWalkFunc) {
|
||||
for n := it.children.first; n != nil; n = n.after {
|
||||
w(n)
|
||||
}
|
||||
}
|
||||
|
||||
func (it *inTree) BuildTokens(to Tokens) Tokens {
|
||||
for n := it.children.first; n != nil; n = n.after {
|
||||
to = n.BuildTokens(to)
|
||||
}
|
||||
return to
|
||||
}
|
||||
|
||||
// leafNode can be embedded into a content struct to give it a do-nothing
|
||||
// implementation of walkChildNodes
|
||||
type leafNode struct {
|
||||
}
|
||||
|
||||
func (n *leafNode) walkChildNodes(w internalWalkFunc) {
|
||||
}
|
594
vendor/github.com/hashicorp/hcl2/hclwrite/parser.go
generated
vendored
Normal file
594
vendor/github.com/hashicorp/hcl2/hclwrite/parser.go
generated
vendored
Normal file
|
@ -0,0 +1,594 @@
|
|||
package hclwrite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
"github.com/hashicorp/hcl2/hcl/hclsyntax"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// Our "parser" here is actually not doing any parsing of its own. Instead,
|
||||
// it leans on the native parser in hclsyntax, and then uses the source ranges
|
||||
// from the AST to partition the raw token sequence to match the raw tokens
|
||||
// up to AST nodes.
|
||||
//
|
||||
// This strategy feels somewhat counter-intuitive, since most of the work the
|
||||
// parser does is thrown away here, but this strategy is chosen because the
|
||||
// normal parsing work done by hclsyntax is considered to be the "main case",
|
||||
// while modifying and re-printing source is more of an edge case, used only
|
||||
// in ancillary tools, and so it's good to keep all the main parsing logic
|
||||
// with the main case but keep all of the extra complexity of token wrangling
|
||||
// out of the main parser, which is already rather complex just serving the
|
||||
// use-cases it already serves.
|
||||
//
|
||||
// If the parsing step produces any errors, the returned File is nil because
|
||||
// we can't reliably extract tokens from the partial AST produced by an
|
||||
// erroneous parse.
|
||||
func parse(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) {
|
||||
file, diags := hclsyntax.ParseConfig(src, filename, start)
|
||||
if diags.HasErrors() {
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
// To do our work here, we use the "native" tokens (those from hclsyntax)
|
||||
// to match against source ranges in the AST, but ultimately produce
|
||||
// slices from our sequence of "writer" tokens, which contain only
|
||||
// *relative* position information that is more appropriate for
|
||||
// transformation/writing use-cases.
|
||||
nativeTokens, diags := hclsyntax.LexConfig(src, filename, start)
|
||||
if diags.HasErrors() {
|
||||
// should never happen, since we would've caught these diags in
|
||||
// the first call above.
|
||||
return nil, diags
|
||||
}
|
||||
writerTokens := writerTokens(nativeTokens)
|
||||
|
||||
from := inputTokens{
|
||||
nativeTokens: nativeTokens,
|
||||
writerTokens: writerTokens,
|
||||
}
|
||||
|
||||
before, root, after := parseBody(file.Body.(*hclsyntax.Body), from)
|
||||
ret := &File{
|
||||
inTree: newInTree(),
|
||||
|
||||
srcBytes: src,
|
||||
body: root,
|
||||
}
|
||||
|
||||
nodes := ret.inTree.children
|
||||
nodes.Append(before.Tokens())
|
||||
nodes.AppendNode(root)
|
||||
nodes.Append(after.Tokens())
|
||||
|
||||
return ret, diags
|
||||
}
|
||||
|
||||
type inputTokens struct {
|
||||
nativeTokens hclsyntax.Tokens
|
||||
writerTokens Tokens
|
||||
}
|
||||
|
||||
func (it inputTokens) Partition(rng hcl.Range) (before, within, after inputTokens) {
|
||||
start, end := partitionTokens(it.nativeTokens, rng)
|
||||
before = it.Slice(0, start)
|
||||
within = it.Slice(start, end)
|
||||
after = it.Slice(end, len(it.nativeTokens))
|
||||
return
|
||||
}
|
||||
|
||||
func (it inputTokens) PartitionType(ty hclsyntax.TokenType) (before, within, after inputTokens) {
|
||||
for i, t := range it.writerTokens {
|
||||
if t.Type == ty {
|
||||
return it.Slice(0, i), it.Slice(i, i+1), it.Slice(i+1, len(it.nativeTokens))
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("didn't find any token of type %s", ty))
|
||||
}
|
||||
|
||||
func (it inputTokens) PartitionTypeSingle(ty hclsyntax.TokenType) (before inputTokens, found *Token, after inputTokens) {
|
||||
before, within, after := it.PartitionType(ty)
|
||||
if within.Len() != 1 {
|
||||
panic("PartitionType found more than one token")
|
||||
}
|
||||
return before, within.Tokens()[0], after
|
||||
}
|
||||
|
||||
// PartitionIncludeComments is like Partition except the returned "within"
|
||||
// range includes any lead and line comments associated with the range.
|
||||
func (it inputTokens) PartitionIncludingComments(rng hcl.Range) (before, within, after inputTokens) {
|
||||
start, end := partitionTokens(it.nativeTokens, rng)
|
||||
start = partitionLeadCommentTokens(it.nativeTokens[:start])
|
||||
_, afterNewline := partitionLineEndTokens(it.nativeTokens[end:])
|
||||
end += afterNewline
|
||||
|
||||
before = it.Slice(0, start)
|
||||
within = it.Slice(start, end)
|
||||
after = it.Slice(end, len(it.nativeTokens))
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
// PartitionBlockItem is similar to PartitionIncludeComments but it returns
|
||||
// the comments as separate token sequences so that they can be captured into
|
||||
// AST attributes. It makes assumptions that apply only to block items, so
|
||||
// should not be used for other constructs.
|
||||
func (it inputTokens) PartitionBlockItem(rng hcl.Range) (before, leadComments, within, lineComments, newline, after inputTokens) {
|
||||
before, within, after = it.Partition(rng)
|
||||
before, leadComments = before.PartitionLeadComments()
|
||||
lineComments, newline, after = after.PartitionLineEndTokens()
|
||||
return
|
||||
}
|
||||
|
||||
func (it inputTokens) PartitionLeadComments() (before, within inputTokens) {
|
||||
start := partitionLeadCommentTokens(it.nativeTokens)
|
||||
before = it.Slice(0, start)
|
||||
within = it.Slice(start, len(it.nativeTokens))
|
||||
return
|
||||
}
|
||||
|
||||
func (it inputTokens) PartitionLineEndTokens() (comments, newline, after inputTokens) {
|
||||
afterComments, afterNewline := partitionLineEndTokens(it.nativeTokens)
|
||||
comments = it.Slice(0, afterComments)
|
||||
newline = it.Slice(afterComments, afterNewline)
|
||||
after = it.Slice(afterNewline, len(it.nativeTokens))
|
||||
return
|
||||
}
|
||||
|
||||
func (it inputTokens) Slice(start, end int) inputTokens {
|
||||
// When we slice, we create a new slice with no additional capacity because
|
||||
// we expect that these slices will be mutated in order to insert
|
||||
// new code into the AST, and we want to ensure that a new underlying
|
||||
// array gets allocated in that case, rather than writing into some
|
||||
// following slice and corrupting it.
|
||||
return inputTokens{
|
||||
nativeTokens: it.nativeTokens[start:end:end],
|
||||
writerTokens: it.writerTokens[start:end:end],
|
||||
}
|
||||
}
|
||||
|
||||
func (it inputTokens) Len() int {
|
||||
return len(it.nativeTokens)
|
||||
}
|
||||
|
||||
func (it inputTokens) Tokens() Tokens {
|
||||
return it.writerTokens
|
||||
}
|
||||
|
||||
func (it inputTokens) Types() []hclsyntax.TokenType {
|
||||
ret := make([]hclsyntax.TokenType, len(it.nativeTokens))
|
||||
for i, tok := range it.nativeTokens {
|
||||
ret[i] = tok.Type
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// parseBody locates the given body within the given input tokens and returns
|
||||
// the resulting *Body object as well as the tokens that appeared before and
|
||||
// after it.
|
||||
func parseBody(nativeBody *hclsyntax.Body, from inputTokens) (inputTokens, *node, inputTokens) {
|
||||
before, within, after := from.PartitionIncludingComments(nativeBody.SrcRange)
|
||||
|
||||
// The main AST doesn't retain the original source ordering of the
|
||||
// body items, so we need to reconstruct that ordering by inspecting
|
||||
// their source ranges.
|
||||
nativeItems := make([]hclsyntax.Node, 0, len(nativeBody.Attributes)+len(nativeBody.Blocks))
|
||||
for _, nativeAttr := range nativeBody.Attributes {
|
||||
nativeItems = append(nativeItems, nativeAttr)
|
||||
}
|
||||
for _, nativeBlock := range nativeBody.Blocks {
|
||||
nativeItems = append(nativeItems, nativeBlock)
|
||||
}
|
||||
sort.Sort(nativeNodeSorter{nativeItems})
|
||||
|
||||
body := &Body{
|
||||
inTree: newInTree(),
|
||||
items: newNodeSet(),
|
||||
}
|
||||
|
||||
remain := within
|
||||
for _, nativeItem := range nativeItems {
|
||||
beforeItem, item, afterItem := parseBodyItem(nativeItem, remain)
|
||||
|
||||
if beforeItem.Len() > 0 {
|
||||
body.AppendUnstructuredTokens(beforeItem.Tokens())
|
||||
}
|
||||
body.appendItemNode(item)
|
||||
|
||||
remain = afterItem
|
||||
}
|
||||
|
||||
if remain.Len() > 0 {
|
||||
body.AppendUnstructuredTokens(remain.Tokens())
|
||||
}
|
||||
|
||||
return before, newNode(body), after
|
||||
}
|
||||
|
||||
func parseBodyItem(nativeItem hclsyntax.Node, from inputTokens) (inputTokens, *node, inputTokens) {
|
||||
before, leadComments, within, lineComments, newline, after := from.PartitionBlockItem(nativeItem.Range())
|
||||
|
||||
var item *node
|
||||
|
||||
switch tItem := nativeItem.(type) {
|
||||
case *hclsyntax.Attribute:
|
||||
item = parseAttribute(tItem, within, leadComments, lineComments, newline)
|
||||
case *hclsyntax.Block:
|
||||
item = parseBlock(tItem, within, leadComments, lineComments, newline)
|
||||
default:
|
||||
// should never happen if caller is behaving
|
||||
panic("unsupported native item type")
|
||||
}
|
||||
|
||||
return before, item, after
|
||||
}
|
||||
|
||||
func parseAttribute(nativeAttr *hclsyntax.Attribute, from, leadComments, lineComments, newline inputTokens) *node {
|
||||
attr := &Attribute{
|
||||
inTree: newInTree(),
|
||||
}
|
||||
children := attr.inTree.children
|
||||
|
||||
{
|
||||
cn := newNode(newComments(leadComments.Tokens()))
|
||||
attr.leadComments = cn
|
||||
children.AppendNode(cn)
|
||||
}
|
||||
|
||||
before, nameTokens, from := from.Partition(nativeAttr.NameRange)
|
||||
{
|
||||
children.AppendUnstructuredTokens(before.Tokens())
|
||||
if nameTokens.Len() != 1 {
|
||||
// Should never happen with valid input
|
||||
panic("attribute name is not exactly one token")
|
||||
}
|
||||
token := nameTokens.Tokens()[0]
|
||||
in := newNode(newIdentifier(token))
|
||||
attr.name = in
|
||||
children.AppendNode(in)
|
||||
}
|
||||
|
||||
before, equalsTokens, from := from.Partition(nativeAttr.EqualsRange)
|
||||
children.AppendUnstructuredTokens(before.Tokens())
|
||||
children.AppendUnstructuredTokens(equalsTokens.Tokens())
|
||||
|
||||
before, exprTokens, from := from.Partition(nativeAttr.Expr.Range())
|
||||
{
|
||||
children.AppendUnstructuredTokens(before.Tokens())
|
||||
exprNode := parseExpression(nativeAttr.Expr, exprTokens)
|
||||
attr.expr = exprNode
|
||||
children.AppendNode(exprNode)
|
||||
}
|
||||
|
||||
{
|
||||
cn := newNode(newComments(lineComments.Tokens()))
|
||||
attr.lineComments = cn
|
||||
children.AppendNode(cn)
|
||||
}
|
||||
|
||||
children.AppendUnstructuredTokens(newline.Tokens())
|
||||
|
||||
// Collect any stragglers, though there shouldn't be any
|
||||
children.AppendUnstructuredTokens(from.Tokens())
|
||||
|
||||
return newNode(attr)
|
||||
}
|
||||
|
||||
func parseBlock(nativeBlock *hclsyntax.Block, from, leadComments, lineComments, newline inputTokens) *node {
|
||||
block := &Block{
|
||||
inTree: newInTree(),
|
||||
labels: newNodeSet(),
|
||||
}
|
||||
children := block.inTree.children
|
||||
|
||||
{
|
||||
cn := newNode(newComments(leadComments.Tokens()))
|
||||
block.leadComments = cn
|
||||
children.AppendNode(cn)
|
||||
}
|
||||
|
||||
before, typeTokens, from := from.Partition(nativeBlock.TypeRange)
|
||||
{
|
||||
children.AppendUnstructuredTokens(before.Tokens())
|
||||
if typeTokens.Len() != 1 {
|
||||
// Should never happen with valid input
|
||||
panic("block type name is not exactly one token")
|
||||
}
|
||||
token := typeTokens.Tokens()[0]
|
||||
in := newNode(newIdentifier(token))
|
||||
block.typeName = in
|
||||
children.AppendNode(in)
|
||||
}
|
||||
|
||||
for _, rng := range nativeBlock.LabelRanges {
|
||||
var labelTokens inputTokens
|
||||
before, labelTokens, from = from.Partition(rng)
|
||||
children.AppendUnstructuredTokens(before.Tokens())
|
||||
tokens := labelTokens.Tokens()
|
||||
ln := newNode(newQuoted(tokens))
|
||||
block.labels.Add(ln)
|
||||
children.AppendNode(ln)
|
||||
}
|
||||
|
||||
before, oBrace, from := from.Partition(nativeBlock.OpenBraceRange)
|
||||
children.AppendUnstructuredTokens(before.Tokens())
|
||||
children.AppendUnstructuredTokens(oBrace.Tokens())
|
||||
|
||||
// We go a bit out of order here: we go hunting for the closing brace
|
||||
// so that we have a delimited body, but then we'll deal with the body
|
||||
// before we actually append the closing brace and any straggling tokens
|
||||
// that appear after it.
|
||||
bodyTokens, cBrace, from := from.Partition(nativeBlock.CloseBraceRange)
|
||||
before, body, after := parseBody(nativeBlock.Body, bodyTokens)
|
||||
children.AppendUnstructuredTokens(before.Tokens())
|
||||
block.body = body
|
||||
children.AppendNode(body)
|
||||
children.AppendUnstructuredTokens(after.Tokens())
|
||||
|
||||
children.AppendUnstructuredTokens(cBrace.Tokens())
|
||||
|
||||
// stragglers
|
||||
children.AppendUnstructuredTokens(from.Tokens())
|
||||
if lineComments.Len() > 0 {
|
||||
// blocks don't actually have line comments, so we'll just treat
|
||||
// them as extra stragglers
|
||||
children.AppendUnstructuredTokens(lineComments.Tokens())
|
||||
}
|
||||
children.AppendUnstructuredTokens(newline.Tokens())
|
||||
|
||||
return newNode(block)
|
||||
}
|
||||
|
||||
func parseExpression(nativeExpr hclsyntax.Expression, from inputTokens) *node {
|
||||
expr := newExpression()
|
||||
children := expr.inTree.children
|
||||
|
||||
nativeVars := nativeExpr.Variables()
|
||||
|
||||
for _, nativeTraversal := range nativeVars {
|
||||
before, traversal, after := parseTraversal(nativeTraversal, from)
|
||||
children.AppendUnstructuredTokens(before.Tokens())
|
||||
children.AppendNode(traversal)
|
||||
expr.absTraversals.Add(traversal)
|
||||
from = after
|
||||
}
|
||||
// Attach any stragglers that don't belong to a traversal to the expression
|
||||
// itself. In an expression with no traversals at all, this is just the
|
||||
// entirety of "from".
|
||||
children.AppendUnstructuredTokens(from.Tokens())
|
||||
|
||||
return newNode(expr)
|
||||
}
|
||||
|
||||
func parseTraversal(nativeTraversal hcl.Traversal, from inputTokens) (before inputTokens, n *node, after inputTokens) {
|
||||
traversal := newTraversal()
|
||||
children := traversal.inTree.children
|
||||
before, from, after = from.Partition(nativeTraversal.SourceRange())
|
||||
|
||||
stepAfter := from
|
||||
for _, nativeStep := range nativeTraversal {
|
||||
before, step, after := parseTraversalStep(nativeStep, stepAfter)
|
||||
children.AppendUnstructuredTokens(before.Tokens())
|
||||
children.AppendNode(step)
|
||||
traversal.steps.Add(step)
|
||||
stepAfter = after
|
||||
}
|
||||
|
||||
return before, newNode(traversal), after
|
||||
}
|
||||
|
||||
func parseTraversalStep(nativeStep hcl.Traverser, from inputTokens) (before inputTokens, n *node, after inputTokens) {
|
||||
var children *nodes
|
||||
switch tNativeStep := nativeStep.(type) {
|
||||
|
||||
case hcl.TraverseRoot, hcl.TraverseAttr:
|
||||
step := newTraverseName()
|
||||
children = step.inTree.children
|
||||
before, from, after = from.Partition(nativeStep.SourceRange())
|
||||
inBefore, token, inAfter := from.PartitionTypeSingle(hclsyntax.TokenIdent)
|
||||
name := newIdentifier(token)
|
||||
children.AppendUnstructuredTokens(inBefore.Tokens())
|
||||
step.name = children.Append(name)
|
||||
children.AppendUnstructuredTokens(inAfter.Tokens())
|
||||
return before, newNode(step), after
|
||||
|
||||
case hcl.TraverseIndex:
|
||||
step := newTraverseIndex()
|
||||
children = step.inTree.children
|
||||
before, from, after = from.Partition(nativeStep.SourceRange())
|
||||
|
||||
var inBefore, oBrack, keyTokens, cBrack inputTokens
|
||||
inBefore, oBrack, from = from.PartitionType(hclsyntax.TokenOBrack)
|
||||
children.AppendUnstructuredTokens(inBefore.Tokens())
|
||||
children.AppendUnstructuredTokens(oBrack.Tokens())
|
||||
keyTokens, cBrack, from = from.PartitionType(hclsyntax.TokenCBrack)
|
||||
|
||||
keyVal := tNativeStep.Key
|
||||
switch keyVal.Type() {
|
||||
case cty.String:
|
||||
key := newQuoted(keyTokens.Tokens())
|
||||
step.key = children.Append(key)
|
||||
case cty.Number:
|
||||
valBefore, valToken, valAfter := keyTokens.PartitionTypeSingle(hclsyntax.TokenNumberLit)
|
||||
children.AppendUnstructuredTokens(valBefore.Tokens())
|
||||
key := newNumber(valToken)
|
||||
step.key = children.Append(key)
|
||||
children.AppendUnstructuredTokens(valAfter.Tokens())
|
||||
}
|
||||
|
||||
children.AppendUnstructuredTokens(cBrack.Tokens())
|
||||
children.AppendUnstructuredTokens(from.Tokens())
|
||||
|
||||
return before, newNode(step), after
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported traversal step type %T", nativeStep))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// writerTokens takes a sequence of tokens as produced by the main hclsyntax
|
||||
// package and transforms it into an equivalent sequence of tokens using
|
||||
// this package's own token model.
|
||||
//
|
||||
// The resulting list contains the same number of tokens and uses the same
|
||||
// indices as the input, allowing the two sets of tokens to be correlated
|
||||
// by index.
|
||||
func writerTokens(nativeTokens hclsyntax.Tokens) Tokens {
|
||||
// Ultimately we want a slice of token _pointers_, but since we can
|
||||
// predict how much memory we're going to devote to tokens we'll allocate
|
||||
// it all as a single flat buffer and thus give the GC less work to do.
|
||||
tokBuf := make([]Token, len(nativeTokens))
|
||||
var lastByteOffset int
|
||||
for i, mainToken := range nativeTokens {
|
||||
// Create a copy of the bytes so that we can mutate without
|
||||
// corrupting the original token stream.
|
||||
bytes := make([]byte, len(mainToken.Bytes))
|
||||
copy(bytes, mainToken.Bytes)
|
||||
|
||||
tokBuf[i] = Token{
|
||||
Type: mainToken.Type,
|
||||
Bytes: bytes,
|
||||
|
||||
// We assume here that spaces are always ASCII spaces, since
|
||||
// that's what the scanner also assumes, and thus the number
|
||||
// of bytes skipped is also the number of space characters.
|
||||
SpacesBefore: mainToken.Range.Start.Byte - lastByteOffset,
|
||||
}
|
||||
|
||||
lastByteOffset = mainToken.Range.End.Byte
|
||||
}
|
||||
|
||||
// Now make a slice of pointers into the previous slice.
|
||||
ret := make(Tokens, len(tokBuf))
|
||||
for i := range ret {
|
||||
ret[i] = &tokBuf[i]
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// partitionTokens takes a sequence of tokens and a hcl.Range and returns
|
||||
// two indices within the token sequence that correspond with the range
|
||||
// boundaries, such that the slice operator could be used to produce
|
||||
// three token sequences for before, within, and after respectively:
|
||||
//
|
||||
// start, end := partitionTokens(toks, rng)
|
||||
// before := toks[:start]
|
||||
// within := toks[start:end]
|
||||
// after := toks[end:]
|
||||
//
|
||||
// This works best when the range is aligned with token boundaries (e.g.
|
||||
// because it was produced in terms of the scanner's result) but if that isn't
|
||||
// true then it will make a best effort that may produce strange results at
|
||||
// the boundaries.
|
||||
//
|
||||
// Native hclsyntax tokens are used here, because they contain the necessary
|
||||
// absolute position information. However, since writerTokens produces a
|
||||
// correlatable sequence of writer tokens, the resulting indices can be
|
||||
// used also to index into its result, allowing the partitioning of writer
|
||||
// tokens to be driven by the partitioning of native tokens.
|
||||
//
|
||||
// The tokens are assumed to be in source order and non-overlapping, which
|
||||
// will be true if the token sequence from the scanner is used directly.
|
||||
func partitionTokens(toks hclsyntax.Tokens, rng hcl.Range) (start, end int) {
|
||||
// We us a linear search here because we assume tha in most cases our
|
||||
// target range is close to the beginning of the sequence, and the seqences
|
||||
// are generally small for most reasonable files anyway.
|
||||
for i := 0; ; i++ {
|
||||
if i >= len(toks) {
|
||||
// No tokens for the given range at all!
|
||||
return len(toks), len(toks)
|
||||
}
|
||||
|
||||
if toks[i].Range.Start.Byte >= rng.Start.Byte {
|
||||
start = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for i := start; ; i++ {
|
||||
if i >= len(toks) {
|
||||
// The range "hangs off" the end of the token sequence
|
||||
return start, len(toks)
|
||||
}
|
||||
|
||||
if toks[i].Range.Start.Byte >= rng.End.Byte {
|
||||
end = i // end marker is exclusive
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return start, end
|
||||
}
|
||||
|
||||
// partitionLeadCommentTokens takes a sequence of tokens that is assumed
|
||||
// to immediately precede a construct that can have lead comment tokens,
|
||||
// and returns the index into that sequence where the lead comments begin.
|
||||
//
|
||||
// Lead comments are defined as whole lines containing only comment tokens
|
||||
// with no blank lines between. If no such lines are found, the returned
|
||||
// index will be len(toks).
|
||||
func partitionLeadCommentTokens(toks hclsyntax.Tokens) int {
|
||||
// single-line comments (which is what we're interested in here)
|
||||
// consume their trailing newline, so we can just walk backwards
|
||||
// until we stop seeing comment tokens.
|
||||
for i := len(toks) - 1; i >= 0; i-- {
|
||||
if toks[i].Type != hclsyntax.TokenComment {
|
||||
return i + 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// partitionLineEndTokens takes a sequence of tokens that is assumed
|
||||
// to immediately follow a construct that can have a line comment, and
|
||||
// returns first the index where any line comments end and then second
|
||||
// the index immediately after the trailing newline.
|
||||
//
|
||||
// Line comments are defined as comments that appear immediately after
|
||||
// a construct on the same line where its significant tokens ended.
|
||||
//
|
||||
// Since single-line comment tokens (# and //) include the newline that
|
||||
// terminates them, in the presence of these the two returned indices
|
||||
// will be the same since the comment itself serves as the line end.
|
||||
func partitionLineEndTokens(toks hclsyntax.Tokens) (afterComment, afterNewline int) {
|
||||
for i := 0; i < len(toks); i++ {
|
||||
tok := toks[i]
|
||||
if tok.Type != hclsyntax.TokenComment {
|
||||
switch tok.Type {
|
||||
case hclsyntax.TokenNewline:
|
||||
return i, i + 1
|
||||
case hclsyntax.TokenEOF:
|
||||
// Although this is valid, we mustn't include the EOF
|
||||
// itself as our "newline" or else strange things will
|
||||
// happen when we try to append new items.
|
||||
return i, i
|
||||
default:
|
||||
// If we have well-formed input here then nothing else should be
|
||||
// possible. This path should never happen, because we only try
|
||||
// to extract tokens from the sequence if the parser succeeded,
|
||||
// and it should catch this problem itself.
|
||||
panic("malformed line trailers: expected only comments and newlines")
|
||||
}
|
||||
}
|
||||
|
||||
if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' {
|
||||
// Newline at the end of a single-line comment serves both as
|
||||
// the end of comments *and* the end of the line.
|
||||
return i + 1, i + 1
|
||||
}
|
||||
}
|
||||
return len(toks), len(toks)
|
||||
}
|
||||
|
||||
// lexConfig uses the hclsyntax scanner to get a token stream and then
|
||||
// rewrites it into this package's token model.
|
||||
//
|
||||
// Any errors produced during scanning are ignored, so the results of this
|
||||
// function should be used with care.
|
||||
func lexConfig(src []byte) Tokens {
|
||||
mainTokens, _ := hclsyntax.LexConfig(src, "", hcl.Pos{Byte: 0, Line: 1, Column: 1})
|
||||
return writerTokens(mainTokens)
|
||||
}
|
44
vendor/github.com/hashicorp/hcl2/hclwrite/public.go
generated
vendored
Normal file
44
vendor/github.com/hashicorp/hcl2/hclwrite/public.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
package hclwrite
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
)
|
||||
|
||||
// NewFile creates a new file object that is empty and ready to have constructs
|
||||
// added t it.
|
||||
func NewFile() *File {
|
||||
body := &Body{
|
||||
inTree: newInTree(),
|
||||
items: newNodeSet(),
|
||||
}
|
||||
file := &File{
|
||||
inTree: newInTree(),
|
||||
}
|
||||
file.body = file.inTree.children.Append(body)
|
||||
return file
|
||||
}
|
||||
|
||||
// ParseConfig interprets the given source bytes into a *hclwrite.File. The
|
||||
// resulting AST can be used to perform surgical edits on the source code
|
||||
// before turning it back into bytes again.
|
||||
func ParseConfig(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) {
|
||||
return parse(src, filename, start)
|
||||
}
|
||||
|
||||
// Format takes source code and performs simple whitespace changes to transform
|
||||
// it to a canonical layout style.
|
||||
//
|
||||
// Format skips constructing an AST and works directly with tokens, so it
|
||||
// is less expensive than formatting via the AST for situations where no other
|
||||
// changes will be made. It also ignores syntax errors and can thus be applied
|
||||
// to partial source code, although the result in that case may not be
|
||||
// desirable.
|
||||
func Format(src []byte) []byte {
|
||||
tokens := lexConfig(src)
|
||||
format(tokens)
|
||||
buf := &bytes.Buffer{}
|
||||
tokens.WriteTo(buf)
|
||||
return buf.Bytes()
|
||||
}
|
122
vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go
generated
vendored
Normal file
122
vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go
generated
vendored
Normal file
|
@ -0,0 +1,122 @@
|
|||
package hclwrite
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/apparentlymart/go-textseg/textseg"
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
"github.com/hashicorp/hcl2/hcl/hclsyntax"
|
||||
)
|
||||
|
||||
// Token is a single sequence of bytes annotated with a type. It is similar
|
||||
// in purpose to hclsyntax.Token, but discards the source position information
|
||||
// since that is not useful in code generation.
|
||||
type Token struct {
|
||||
Type hclsyntax.TokenType
|
||||
Bytes []byte
|
||||
|
||||
// We record the number of spaces before each token so that we can
|
||||
// reproduce the exact layout of the original file when we're making
|
||||
// surgical changes in-place. When _new_ code is created it will always
|
||||
// be in the canonical style, but we preserve layout of existing code.
|
||||
SpacesBefore int
|
||||
}
|
||||
|
||||
// asHCLSyntax returns the receiver expressed as an incomplete hclsyntax.Token.
|
||||
// A complete token is not possible since we don't have source location
|
||||
// information here, and so this method is unexported so we can be sure it will
|
||||
// only be used for internal purposes where we know the range isn't important.
|
||||
//
|
||||
// This is primarily intended to allow us to re-use certain functionality from
|
||||
// hclsyntax rather than re-implementing it against our own token type here.
|
||||
func (t *Token) asHCLSyntax() hclsyntax.Token {
|
||||
return hclsyntax.Token{
|
||||
Type: t.Type,
|
||||
Bytes: t.Bytes,
|
||||
Range: hcl.Range{
|
||||
Filename: "<invalid>",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Tokens is a flat list of tokens.
|
||||
type Tokens []*Token
|
||||
|
||||
func (ts Tokens) Bytes() []byte {
|
||||
buf := &bytes.Buffer{}
|
||||
ts.WriteTo(buf)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (ts Tokens) testValue() string {
|
||||
return string(ts.Bytes())
|
||||
}
|
||||
|
||||
// Columns returns the number of columns (grapheme clusters) the token sequence
|
||||
// occupies. The result is not meaningful if there are newline or single-line
|
||||
// comment tokens in the sequence.
|
||||
func (ts Tokens) Columns() int {
|
||||
ret := 0
|
||||
for _, token := range ts {
|
||||
ret += token.SpacesBefore // spaces are always worth one column each
|
||||
ct, _ := textseg.TokenCount(token.Bytes, textseg.ScanGraphemeClusters)
|
||||
ret += ct
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// WriteTo takes an io.Writer and writes the bytes for each token to it,
|
||||
// along with the spacing that separates each token. In other words, this
|
||||
// allows serializing the tokens to a file or other such byte stream.
|
||||
func (ts Tokens) WriteTo(wr io.Writer) (int64, error) {
|
||||
// We know we're going to be writing a lot of small chunks of repeated
|
||||
// space characters, so we'll prepare a buffer of these that we can
|
||||
// easily pass to wr.Write without any further allocation.
|
||||
spaces := make([]byte, 40)
|
||||
for i := range spaces {
|
||||
spaces[i] = ' '
|
||||
}
|
||||
|
||||
var n int64
|
||||
var err error
|
||||
for _, token := range ts {
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
for spacesBefore := token.SpacesBefore; spacesBefore > 0; spacesBefore -= len(spaces) {
|
||||
thisChunk := spacesBefore
|
||||
if thisChunk > len(spaces) {
|
||||
thisChunk = len(spaces)
|
||||
}
|
||||
var thisN int
|
||||
thisN, err = wr.Write(spaces[:thisChunk])
|
||||
n += int64(thisN)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
var thisN int
|
||||
thisN, err = wr.Write(token.Bytes)
|
||||
n += int64(thisN)
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (ts Tokens) walkChildNodes(w internalWalkFunc) {
|
||||
// Unstructured tokens have no child nodes
|
||||
}
|
||||
|
||||
func (ts Tokens) BuildTokens(to Tokens) Tokens {
|
||||
return append(to, ts...)
|
||||
}
|
||||
|
||||
func newIdentToken(name string) *Token {
|
||||
return &Token{
|
||||
Type: hclsyntax.TokenIdent,
|
||||
Bytes: []byte(name),
|
||||
}
|
||||
}
|
21
vendor/github.com/zclconf/go-cty/cty/capsule_test.go
generated
vendored
Normal file
21
vendor/github.com/zclconf/go-cty/cty/capsule_test.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
package cty
|
||||
|
||||
import "reflect"
|
||||
|
||||
type capsuleTestType1Native struct {
|
||||
name string
|
||||
}
|
||||
|
||||
type capsuleTestType2Native struct {
|
||||
name string
|
||||
}
|
||||
|
||||
var capsuleTestType1 = Capsule(
|
||||
"capsule test type 1",
|
||||
reflect.TypeOf(capsuleTestType1Native{}),
|
||||
)
|
||||
|
||||
var capsuleTestType2 = Capsule(
|
||||
"capsule test type 2",
|
||||
reflect.TypeOf(capsuleTestType2Native{}),
|
||||
)
|
54
vendor/github.com/zclconf/go-cty/cty/gob_test.go
generated
vendored
Normal file
54
vendor/github.com/zclconf/go-cty/cty/gob_test.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
package cty
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"encoding/gob"
|
||||
)
|
||||
|
||||
func TestGobabilty(t *testing.T) {
|
||||
tests := []Value{
|
||||
StringVal("hi"),
|
||||
True,
|
||||
NumberIntVal(1),
|
||||
NumberFloatVal(96.5),
|
||||
ListVal([]Value{True}),
|
||||
MapVal(map[string]Value{"true": True}),
|
||||
SetVal([]Value{True}),
|
||||
TupleVal([]Value{True}),
|
||||
ObjectVal(map[string]Value{"true": True}),
|
||||
}
|
||||
|
||||
for _, testValue := range tests {
|
||||
t.Run(testValue.GoString(), func(t *testing.T) {
|
||||
tv := testGob{
|
||||
testValue,
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
enc := gob.NewEncoder(buf)
|
||||
|
||||
err := enc.Encode(tv)
|
||||
if err != nil {
|
||||
t.Fatalf("gob encode error: %s", err)
|
||||
}
|
||||
|
||||
var ov testGob
|
||||
|
||||
dec := gob.NewDecoder(buf)
|
||||
err = dec.Decode(&ov)
|
||||
if err != nil {
|
||||
t.Fatalf("gob decode error: %s", err)
|
||||
}
|
||||
|
||||
if !ov.Value.RawEquals(tv.Value) {
|
||||
t.Errorf("value did not survive gobbing\ninput: %#v\noutput: %#v", tv, ov)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testGob struct {
|
||||
Value Value
|
||||
}
|
86
vendor/github.com/zclconf/go-cty/cty/json_test.go
generated
vendored
Normal file
86
vendor/github.com/zclconf/go-cty/cty/json_test.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
package cty
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTypeJSONable(t *testing.T) {
|
||||
tests := []struct {
|
||||
Type Type
|
||||
Want string
|
||||
}{
|
||||
{
|
||||
String,
|
||||
`"string"`,
|
||||
},
|
||||
{
|
||||
Number,
|
||||
`"number"`,
|
||||
},
|
||||
{
|
||||
Bool,
|
||||
`"bool"`,
|
||||
},
|
||||
{
|
||||
List(Bool),
|
||||
`["list","bool"]`,
|
||||
},
|
||||
{
|
||||
Map(Bool),
|
||||
`["map","bool"]`,
|
||||
},
|
||||
{
|
||||
Set(Bool),
|
||||
`["set","bool"]`,
|
||||
},
|
||||
{
|
||||
List(Map(Bool)),
|
||||
`["list",["map","bool"]]`,
|
||||
},
|
||||
{
|
||||
Tuple([]Type{Bool, String}),
|
||||
`["tuple",["bool","string"]]`,
|
||||
},
|
||||
{
|
||||
Object(map[string]Type{"bool": Bool, "string": String}),
|
||||
`["object",{"bool":"bool","string":"string"}]`,
|
||||
},
|
||||
{
|
||||
DynamicPseudoType,
|
||||
`"dynamic"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.Type.GoString(), func(t *testing.T) {
|
||||
result, err := json.Marshal(test.Type)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from Marshal: %s", err)
|
||||
}
|
||||
|
||||
resultStr := string(result)
|
||||
|
||||
if resultStr != test.Want {
|
||||
t.Errorf(
|
||||
"wrong result\ntype: %#v\ngot: %s\nwant: %s",
|
||||
test.Type, resultStr, test.Want,
|
||||
)
|
||||
}
|
||||
|
||||
var ty Type
|
||||
err = json.Unmarshal(result, &ty)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from Unmarshal: %s", err)
|
||||
}
|
||||
|
||||
if !ty.Equals(test.Type) {
|
||||
t.Errorf(
|
||||
"type did not unmarshal correctly\njson: %s\ngot: %#v\nwant: %#v",
|
||||
resultStr, ty, test.Type,
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
110
vendor/github.com/zclconf/go-cty/cty/object_type_test.go
generated
vendored
Normal file
110
vendor/github.com/zclconf/go-cty/cty/object_type_test.go
generated
vendored
Normal file
|
@ -0,0 +1,110 @@
|
|||
package cty
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestObjectTypeEquals(t *testing.T) {
|
||||
tests := []struct {
|
||||
LHS Type // Must be typeObject
|
||||
RHS Type
|
||||
Expected bool
|
||||
}{
|
||||
{
|
||||
Object(map[string]Type{}),
|
||||
Object(map[string]Type{}),
|
||||
true,
|
||||
},
|
||||
{
|
||||
Object(map[string]Type{
|
||||
"name": String,
|
||||
}),
|
||||
Object(map[string]Type{
|
||||
"name": String,
|
||||
}),
|
||||
true,
|
||||
},
|
||||
{
|
||||
// Attribute names should be normalized
|
||||
Object(map[string]Type{
|
||||
"h\u00e9llo": String, // precombined é
|
||||
}),
|
||||
Object(map[string]Type{
|
||||
"he\u0301llo": String, // e with combining acute accent
|
||||
}),
|
||||
true,
|
||||
},
|
||||
{
|
||||
Object(map[string]Type{
|
||||
"person": Object(map[string]Type{
|
||||
"name": String,
|
||||
}),
|
||||
}),
|
||||
Object(map[string]Type{
|
||||
"person": Object(map[string]Type{
|
||||
"name": String,
|
||||
}),
|
||||
}),
|
||||
true,
|
||||
},
|
||||
{
|
||||
Object(map[string]Type{
|
||||
"name": String,
|
||||
}),
|
||||
Object(map[string]Type{}),
|
||||
false,
|
||||
},
|
||||
{
|
||||
Object(map[string]Type{
|
||||
"name": String,
|
||||
}),
|
||||
Object(map[string]Type{
|
||||
"name": Number,
|
||||
}),
|
||||
false,
|
||||
},
|
||||
{
|
||||
Object(map[string]Type{
|
||||
"name": String,
|
||||
}),
|
||||
Object(map[string]Type{
|
||||
"nombre": String,
|
||||
}),
|
||||
false,
|
||||
},
|
||||
{
|
||||
Object(map[string]Type{
|
||||
"name": String,
|
||||
}),
|
||||
Object(map[string]Type{
|
||||
"name": String,
|
||||
"age": Number,
|
||||
}),
|
||||
false,
|
||||
},
|
||||
{
|
||||
Object(map[string]Type{
|
||||
"person": Object(map[string]Type{
|
||||
"name": String,
|
||||
}),
|
||||
}),
|
||||
Object(map[string]Type{
|
||||
"person": Object(map[string]Type{
|
||||
"name": String,
|
||||
"age": Number,
|
||||
}),
|
||||
}),
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(fmt.Sprintf("%#v.Equals(%#v)", test.LHS, test.RHS), func(t *testing.T) {
|
||||
got := test.LHS.Equals(test.RHS)
|
||||
if got != test.Expected {
|
||||
t.Errorf("Equals returned %#v; want %#v", got, test.Expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
14
vendor/github.com/zclconf/go-cty/cty/path.go
generated
vendored
14
vendor/github.com/zclconf/go-cty/cty/path.go
generated
vendored
|
@ -15,6 +15,10 @@ import (
|
|||
// but callers can also feel free to just produce a slice of PathStep manually
|
||||
// and convert to this type, which may be more appropriate in environments
|
||||
// where memory pressure is a concern.
|
||||
//
|
||||
// Although a Path is technically mutable, by convention callers should not
|
||||
// mutate a path once it has been built and passed to some other subsystem.
|
||||
// Instead, use Copy and then mutate the copy before using it.
|
||||
type Path []PathStep
|
||||
|
||||
// PathStep represents a single step down into a data structure, as part
|
||||
|
@ -132,9 +136,13 @@ type IndexStep struct {
|
|||
// Apply returns the value resulting from indexing the given value with
|
||||
// our key value.
|
||||
func (s IndexStep) Apply(val Value) (Value, error) {
|
||||
if val == NilVal || val.IsNull() {
|
||||
return NilVal, errors.New("cannot index a null value")
|
||||
}
|
||||
|
||||
switch s.Key.Type() {
|
||||
case Number:
|
||||
if !val.Type().IsListType() {
|
||||
if !(val.Type().IsListType() || val.Type().IsTupleType()) {
|
||||
return NilVal, errors.New("not a list type")
|
||||
}
|
||||
case String:
|
||||
|
@ -170,6 +178,10 @@ type GetAttrStep struct {
|
|||
// Apply returns the value of our named attribute from the given value, which
|
||||
// must be of an object type that has a value of that name.
|
||||
func (s GetAttrStep) Apply(val Value) (Value, error) {
|
||||
if val == NilVal || val.IsNull() {
|
||||
return NilVal, errors.New("cannot access attributes on a null value")
|
||||
}
|
||||
|
||||
if !val.Type().IsObjectType() {
|
||||
return NilVal, errors.New("not an object type")
|
||||
}
|
||||
|
|
198
vendor/github.com/zclconf/go-cty/cty/path_set.go
generated
vendored
Normal file
198
vendor/github.com/zclconf/go-cty/cty/path_set.go
generated
vendored
Normal file
|
@ -0,0 +1,198 @@
|
|||
package cty
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/crc64"
|
||||
|
||||
"github.com/zclconf/go-cty/cty/set"
|
||||
)
|
||||
|
||||
// PathSet represents a set of Path objects. This can be used, for example,
|
||||
// to talk about a subset of paths within a value that meet some criteria,
|
||||
// without directly modifying the values at those paths.
|
||||
type PathSet struct {
|
||||
set set.Set
|
||||
}
|
||||
|
||||
// NewPathSet creates and returns a PathSet, with initial contents optionally
|
||||
// set by the given arguments.
|
||||
func NewPathSet(paths ...Path) PathSet {
|
||||
ret := PathSet{
|
||||
set: set.NewSet(pathSetRules{}),
|
||||
}
|
||||
|
||||
for _, path := range paths {
|
||||
ret.Add(path)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// Add inserts a single given path into the set.
|
||||
//
|
||||
// Paths are immutable after construction by convention. It is particularly
|
||||
// important not to mutate a path after it has been placed into a PathSet.
|
||||
// If a Path is mutated while in a set, behavior is undefined.
|
||||
func (s PathSet) Add(path Path) {
|
||||
s.set.Add(path)
|
||||
}
|
||||
|
||||
// AddAllSteps is like Add but it also adds all of the steps leading to
|
||||
// the given path.
|
||||
//
|
||||
// For example, if given a path representing "foo.bar", it will add both
|
||||
// "foo" and "bar".
|
||||
func (s PathSet) AddAllSteps(path Path) {
|
||||
for i := 1; i <= len(path); i++ {
|
||||
s.Add(path[:i])
|
||||
}
|
||||
}
|
||||
|
||||
// Has returns true if the given path is in the receiving set.
|
||||
func (s PathSet) Has(path Path) bool {
|
||||
return s.set.Has(path)
|
||||
}
|
||||
|
||||
// List makes and returns a slice of all of the paths in the receiving set,
|
||||
// in an undefined but consistent order.
|
||||
func (s PathSet) List() []Path {
|
||||
if s.Empty() {
|
||||
return nil
|
||||
}
|
||||
ret := make([]Path, 0, s.set.Length())
|
||||
for it := s.set.Iterator(); it.Next(); {
|
||||
ret = append(ret, it.Value().(Path))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Remove modifies the receving set to no longer include the given path.
|
||||
// If the given path was already absent, this is a no-op.
|
||||
func (s PathSet) Remove(path Path) {
|
||||
s.set.Remove(path)
|
||||
}
|
||||
|
||||
// Empty returns true if the length of the receiving set is zero.
|
||||
func (s PathSet) Empty() bool {
|
||||
return s.set.Length() == 0
|
||||
}
|
||||
|
||||
// Union returns a new set whose contents are the union of the receiver and
|
||||
// the given other set.
|
||||
func (s PathSet) Union(other PathSet) PathSet {
|
||||
return PathSet{
|
||||
set: s.set.Union(other.set),
|
||||
}
|
||||
}
|
||||
|
||||
// Intersection returns a new set whose contents are the intersection of the
|
||||
// receiver and the given other set.
|
||||
func (s PathSet) Intersection(other PathSet) PathSet {
|
||||
return PathSet{
|
||||
set: s.set.Intersection(other.set),
|
||||
}
|
||||
}
|
||||
|
||||
// Subtract returns a new set whose contents are those from the receiver with
|
||||
// any elements of the other given set subtracted.
|
||||
func (s PathSet) Subtract(other PathSet) PathSet {
|
||||
return PathSet{
|
||||
set: s.set.Subtract(other.set),
|
||||
}
|
||||
}
|
||||
|
||||
// SymmetricDifference returns a new set whose contents are the symmetric
|
||||
// difference of the receiver and the given other set.
|
||||
func (s PathSet) SymmetricDifference(other PathSet) PathSet {
|
||||
return PathSet{
|
||||
set: s.set.SymmetricDifference(other.set),
|
||||
}
|
||||
}
|
||||
|
||||
// Equal returns true if and only if both the receiver and the given other
|
||||
// set contain exactly the same paths.
|
||||
func (s PathSet) Equal(other PathSet) bool {
|
||||
if s.set.Length() != other.set.Length() {
|
||||
return false
|
||||
}
|
||||
// Now we know the lengths are the same we only need to test in one
|
||||
// direction whether everything in one is in the other.
|
||||
for it := s.set.Iterator(); it.Next(); {
|
||||
if !other.set.Has(it.Value()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var crc64Table = crc64.MakeTable(crc64.ISO)
|
||||
|
||||
var indexStepPlaceholder = []byte("#")
|
||||
|
||||
// pathSetRules is an implementation of set.Rules from the set package,
|
||||
// used internally within PathSet.
|
||||
type pathSetRules struct {
|
||||
}
|
||||
|
||||
func (r pathSetRules) Hash(v interface{}) int {
|
||||
path := v.(Path)
|
||||
hash := crc64.New(crc64Table)
|
||||
|
||||
for _, rawStep := range path {
|
||||
switch step := rawStep.(type) {
|
||||
case GetAttrStep:
|
||||
// (this creates some garbage converting the string name to a
|
||||
// []byte, but that's okay since cty is not designed to be
|
||||
// used in tight loops under memory pressure.)
|
||||
hash.Write([]byte(step.Name))
|
||||
default:
|
||||
// For any other step type we just append a predefined value,
|
||||
// which means that e.g. all indexes into a given collection will
|
||||
// hash to the same value but we assume that collections are
|
||||
// small and thus this won't hurt too much.
|
||||
hash.Write(indexStepPlaceholder)
|
||||
}
|
||||
}
|
||||
|
||||
// We discard half of the hash on 32-bit platforms; collisions just make
|
||||
// our lookups take marginally longer, so not a big deal.
|
||||
return int(hash.Sum64())
|
||||
}
|
||||
|
||||
func (r pathSetRules) Equivalent(a, b interface{}) bool {
|
||||
aPath := a.(Path)
|
||||
bPath := b.(Path)
|
||||
|
||||
if len(aPath) != len(bPath) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range aPath {
|
||||
switch aStep := aPath[i].(type) {
|
||||
case GetAttrStep:
|
||||
bStep, ok := bPath[i].(GetAttrStep)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if aStep.Name != bStep.Name {
|
||||
return false
|
||||
}
|
||||
case IndexStep:
|
||||
bStep, ok := bPath[i].(IndexStep)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
eq := aStep.Key.Equals(bStep.Key)
|
||||
if !eq.IsKnown() || eq.False() {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
// Should never happen, since we document PathStep as a closed type.
|
||||
panic(fmt.Errorf("unsupported step type %T", aStep))
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
71
vendor/github.com/zclconf/go-cty/cty/path_set_test.go
generated
vendored
Normal file
71
vendor/github.com/zclconf/go-cty/cty/path_set_test.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
package cty
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPathSet(t *testing.T) {
|
||||
helloWorld := Path{
|
||||
GetAttrStep{Name: "hello"},
|
||||
GetAttrStep{Name: "world"},
|
||||
}
|
||||
s := NewPathSet(helloWorld)
|
||||
|
||||
if got, want := s.Has(helloWorld), true; got != want {
|
||||
t.Errorf("set does not have hello.world; should have it")
|
||||
}
|
||||
if got, want := s.Has(helloWorld[:1]), false; got != want {
|
||||
t.Errorf("set has hello; should not have it")
|
||||
}
|
||||
|
||||
if got, want := s.List(), []Path{helloWorld}; !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("wrong list result\ngot: %#v\nwant: %#v", got, want)
|
||||
}
|
||||
|
||||
fooBarBaz := Path{
|
||||
GetAttrStep{Name: "foo"},
|
||||
IndexStep{Key: StringVal("bar")},
|
||||
GetAttrStep{Name: "baz"},
|
||||
}
|
||||
s.AddAllSteps(fooBarBaz)
|
||||
if got, want := s.Has(helloWorld), true; got != want {
|
||||
t.Errorf("set does not have hello.world; should have it")
|
||||
}
|
||||
if got, want := s.Has(fooBarBaz), true; got != want {
|
||||
t.Errorf("set does not have foo['bar'].baz; should have it")
|
||||
}
|
||||
if got, want := s.Has(fooBarBaz[:2]), true; got != want {
|
||||
t.Errorf("set does not have foo['bar']; should have it")
|
||||
}
|
||||
if got, want := s.Has(fooBarBaz[:1]), true; got != want {
|
||||
t.Errorf("set does not have foo; should have it")
|
||||
}
|
||||
|
||||
s.Remove(fooBarBaz[:2])
|
||||
if got, want := s.Has(fooBarBaz[:2]), false; got != want {
|
||||
t.Errorf("set has foo['bar']; should not have it")
|
||||
}
|
||||
if got, want := s.Has(fooBarBaz), true; got != want {
|
||||
t.Errorf("set does not have foo['bar'].baz; should have it")
|
||||
}
|
||||
if got, want := s.Has(fooBarBaz[:1]), true; got != want {
|
||||
t.Errorf("set does not have foo; should have it")
|
||||
}
|
||||
|
||||
new := NewPathSet(s.List()...)
|
||||
if got, want := s.Equal(new), true; got != want {
|
||||
t.Errorf("new set does not equal original; want equal sets")
|
||||
}
|
||||
new.Remove(helloWorld)
|
||||
if got, want := s.Equal(new), false; got != want {
|
||||
t.Errorf("new set equals original; want non-equal sets")
|
||||
}
|
||||
new.Add(Path{
|
||||
GetAttrStep{Name: "goodbye"},
|
||||
GetAttrStep{Name: "world"},
|
||||
})
|
||||
if got, want := s.Equal(new), false; got != want {
|
||||
t.Errorf("new set equals original; want non-equal sets")
|
||||
}
|
||||
}
|
125
vendor/github.com/zclconf/go-cty/cty/path_test.go
generated
vendored
Normal file
125
vendor/github.com/zclconf/go-cty/cty/path_test.go
generated
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
package cty_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
func TestPathApply(t *testing.T) {
|
||||
tests := []struct {
|
||||
Start cty.Value
|
||||
Path cty.Path
|
||||
Want cty.Value
|
||||
WantErr string
|
||||
}{
|
||||
{
|
||||
cty.StringVal("hello"),
|
||||
nil,
|
||||
cty.StringVal("hello"),
|
||||
``,
|
||||
},
|
||||
{
|
||||
cty.StringVal("hello"),
|
||||
(cty.Path)(nil).Index(cty.StringVal("boop")),
|
||||
cty.NilVal,
|
||||
`at step 0: not a map type`,
|
||||
},
|
||||
{
|
||||
cty.StringVal("hello"),
|
||||
(cty.Path)(nil).Index(cty.NumberIntVal(0)),
|
||||
cty.NilVal,
|
||||
`at step 0: not a list type`,
|
||||
},
|
||||
{
|
||||
cty.ListVal([]cty.Value{
|
||||
cty.StringVal("hello"),
|
||||
}),
|
||||
(cty.Path)(nil).Index(cty.NumberIntVal(0)),
|
||||
cty.StringVal("hello"),
|
||||
``,
|
||||
},
|
||||
{
|
||||
cty.TupleVal([]cty.Value{
|
||||
cty.StringVal("hello"),
|
||||
}),
|
||||
(cty.Path)(nil).Index(cty.NumberIntVal(0)),
|
||||
cty.StringVal("hello"),
|
||||
``,
|
||||
},
|
||||
{
|
||||
cty.ListValEmpty(cty.String),
|
||||
(cty.Path)(nil).Index(cty.NumberIntVal(0)),
|
||||
cty.NilVal,
|
||||
`at step 0: value does not have given index key`,
|
||||
},
|
||||
{
|
||||
cty.ListVal([]cty.Value{
|
||||
cty.StringVal("hello"),
|
||||
}),
|
||||
(cty.Path)(nil).Index(cty.NumberIntVal(1)),
|
||||
cty.NilVal,
|
||||
`at step 0: value does not have given index key`,
|
||||
},
|
||||
{
|
||||
cty.ListVal([]cty.Value{
|
||||
cty.StringVal("hello"),
|
||||
}),
|
||||
(cty.Path)(nil).Index(cty.NumberIntVal(0)).GetAttr("foo"),
|
||||
cty.NilVal,
|
||||
`at step 1: not an object type`,
|
||||
},
|
||||
{
|
||||
cty.ListVal([]cty.Value{
|
||||
cty.EmptyObjectVal,
|
||||
}),
|
||||
(cty.Path)(nil).Index(cty.NumberIntVal(0)).GetAttr("foo"),
|
||||
cty.NilVal,
|
||||
`at step 1: object has no attribute "foo"`,
|
||||
},
|
||||
{
|
||||
cty.NullVal(cty.List(cty.String)),
|
||||
(cty.Path)(nil).Index(cty.NumberIntVal(0)),
|
||||
cty.NilVal,
|
||||
`at step 0: cannot index a null value`,
|
||||
},
|
||||
{
|
||||
cty.NullVal(cty.Map(cty.String)),
|
||||
(cty.Path)(nil).Index(cty.NumberIntVal(0)),
|
||||
cty.NilVal,
|
||||
`at step 0: cannot index a null value`,
|
||||
},
|
||||
{
|
||||
cty.NullVal(cty.EmptyObject),
|
||||
(cty.Path)(nil).GetAttr("foo"),
|
||||
cty.NilVal,
|
||||
`at step 0: cannot access attributes on a null value`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(fmt.Sprintf("%#v %#v", test.Start, test.Path), func(t *testing.T) {
|
||||
got, gotErr := test.Path.Apply(test.Start)
|
||||
t.Logf("testing path apply\nstart: %#v\npath: %#v", test.Start, test.Path)
|
||||
|
||||
if test.WantErr != "" {
|
||||
if gotErr == nil {
|
||||
t.Fatalf("succeeded, but want error\nwant error: %s", test.WantErr)
|
||||
}
|
||||
|
||||
if gotErrStr := gotErr.Error(); gotErrStr != test.WantErr {
|
||||
t.Fatalf("wrong error\ngot error: %s\nwant error: %s", gotErrStr, test.WantErr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if gotErr != nil {
|
||||
t.Fatalf("failed, but want success\ngot error: %s", gotErr.Error())
|
||||
}
|
||||
if !test.Want.RawEquals(got) {
|
||||
t.Fatalf("wrong result\ngot: %#v\nwant: %#v", got, test.Want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
39
vendor/github.com/zclconf/go-cty/cty/primitive_type_test.go
generated
vendored
Normal file
39
vendor/github.com/zclconf/go-cty/cty/primitive_type_test.go
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
package cty
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTypeIsPrimitiveType(t *testing.T) {
|
||||
tests := []struct {
|
||||
Type Type
|
||||
Want bool
|
||||
}{
|
||||
{String, true},
|
||||
{Number, true},
|
||||
{Bool, true},
|
||||
{DynamicPseudoType, false},
|
||||
{List(String), false},
|
||||
|
||||
// Make sure our primitive constants are correctly constructed
|
||||
{True.Type(), true},
|
||||
{False.Type(), true},
|
||||
{Zero.Type(), true},
|
||||
{PositiveInfinity.Type(), true},
|
||||
{NegativeInfinity.Type(), true},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("%d %#v", i, test.Type), func(t *testing.T) {
|
||||
got := test.Type.IsPrimitiveType()
|
||||
if got != test.Want {
|
||||
t.Errorf(
|
||||
"wrong result\ntype: %#v\ngot: %#v\nwant: %#v",
|
||||
test.Type,
|
||||
test.Want, got,
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
129
vendor/github.com/zclconf/go-cty/cty/set_internals_test.go
generated
vendored
Normal file
129
vendor/github.com/zclconf/go-cty/cty/set_internals_test.go
generated
vendored
Normal file
|
@ -0,0 +1,129 @@
|
|||
package cty
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSetHashBytes(t *testing.T) {
|
||||
tests := []struct {
|
||||
value Value
|
||||
want string
|
||||
}{
|
||||
{
|
||||
UnknownVal(Number),
|
||||
"?",
|
||||
},
|
||||
{
|
||||
UnknownVal(String),
|
||||
"?",
|
||||
},
|
||||
{
|
||||
NullVal(Number),
|
||||
"~",
|
||||
},
|
||||
{
|
||||
NullVal(String),
|
||||
"~",
|
||||
},
|
||||
{
|
||||
DynamicVal,
|
||||
"?",
|
||||
},
|
||||
{
|
||||
NumberVal(big.NewFloat(12)),
|
||||
"12",
|
||||
},
|
||||
{
|
||||
StringVal(""),
|
||||
`""`,
|
||||
},
|
||||
{
|
||||
StringVal("pizza"),
|
||||
`"pizza"`,
|
||||
},
|
||||
{
|
||||
True,
|
||||
"T",
|
||||
},
|
||||
{
|
||||
False,
|
||||
"F",
|
||||
},
|
||||
{
|
||||
ListValEmpty(Bool),
|
||||
"[]",
|
||||
},
|
||||
{
|
||||
ListValEmpty(DynamicPseudoType),
|
||||
"[]",
|
||||
},
|
||||
{
|
||||
ListVal([]Value{True, False}),
|
||||
"[T;F;]",
|
||||
},
|
||||
{
|
||||
ListVal([]Value{UnknownVal(Bool)}),
|
||||
"[?;]",
|
||||
},
|
||||
{
|
||||
ListVal([]Value{ListValEmpty(Bool)}),
|
||||
"[[];]",
|
||||
},
|
||||
{
|
||||
MapValEmpty(Bool),
|
||||
"{}",
|
||||
},
|
||||
{
|
||||
MapVal(map[string]Value{"true": True, "false": False}),
|
||||
`{"false":F;"true":T;}`,
|
||||
},
|
||||
{
|
||||
MapVal(map[string]Value{"true": True, "unknown": UnknownVal(Bool), "dynamic": DynamicVal}),
|
||||
`{"dynamic":?;"true":T;"unknown":?;}`,
|
||||
},
|
||||
{
|
||||
SetValEmpty(Bool),
|
||||
"[]",
|
||||
},
|
||||
{
|
||||
SetVal([]Value{True, True, False}),
|
||||
"[F;T;]",
|
||||
},
|
||||
{
|
||||
SetVal([]Value{UnknownVal(Bool), UnknownVal(Bool)}),
|
||||
"[?;?;]", // unknowns are never equal, so we can have multiple of them
|
||||
},
|
||||
{
|
||||
EmptyObjectVal,
|
||||
"<>",
|
||||
},
|
||||
{
|
||||
ObjectVal(map[string]Value{
|
||||
"name": StringVal("ermintrude"),
|
||||
"age": NumberVal(big.NewFloat(54)),
|
||||
}),
|
||||
`<54;"ermintrude";>`,
|
||||
},
|
||||
{
|
||||
EmptyTupleVal,
|
||||
"<>",
|
||||
},
|
||||
{
|
||||
TupleVal([]Value{
|
||||
StringVal("ermintrude"),
|
||||
NumberVal(big.NewFloat(54)),
|
||||
}),
|
||||
`<"ermintrude";54;>`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.value.GoString(), func(t *testing.T) {
|
||||
got := string(makeSetHashBytes(test.value))
|
||||
if got != test.want {
|
||||
t.Errorf("wrong result\ngot: %s\nwant: %s", got, test.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
40
vendor/github.com/zclconf/go-cty/cty/set_type_test.go
generated
vendored
Normal file
40
vendor/github.com/zclconf/go-cty/cty/set_type_test.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
package cty
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSetOperations(t *testing.T) {
|
||||
// This test is for the mechanisms that allow a calling application to
|
||||
// implement set operations using the underlying set.Set type. This is
|
||||
// not expected to be a common case but is useful, for example, for
|
||||
// implementing the set-related functions in function/stdlib .
|
||||
|
||||
s1 := SetVal([]Value{
|
||||
StringVal("a"),
|
||||
StringVal("b"),
|
||||
StringVal("c"),
|
||||
})
|
||||
s2 := SetVal([]Value{
|
||||
StringVal("c"),
|
||||
StringVal("d"),
|
||||
StringVal("e"),
|
||||
})
|
||||
|
||||
s1r := s1.AsValueSet()
|
||||
s2r := s2.AsValueSet()
|
||||
s3r := s1r.Union(s2r)
|
||||
|
||||
s3 := SetValFromValueSet(s3r)
|
||||
|
||||
if got, want := s3.LengthInt(), 5; got != want {
|
||||
t.Errorf("wrong length %d; want %d", got, want)
|
||||
}
|
||||
|
||||
for _, wantStr := range []string{"a", "b", "c", "d", "e"} {
|
||||
if got, want := s3.HasElement(StringVal(wantStr)), True; got != want {
|
||||
t.Errorf("missing element %q", wantStr)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
64
vendor/github.com/zclconf/go-cty/cty/tuple_type_test.go
generated
vendored
Normal file
64
vendor/github.com/zclconf/go-cty/cty/tuple_type_test.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
package cty
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTupleTypeEquals(t *testing.T) {
|
||||
tests := []struct {
|
||||
LHS Type // Must be typeTuple
|
||||
RHS Type
|
||||
Expected bool
|
||||
}{
|
||||
{
|
||||
Tuple([]Type{}),
|
||||
Tuple([]Type{}),
|
||||
true,
|
||||
},
|
||||
{
|
||||
EmptyTuple,
|
||||
Tuple([]Type{}),
|
||||
true,
|
||||
},
|
||||
{
|
||||
Tuple([]Type{String}),
|
||||
Tuple([]Type{String}),
|
||||
true,
|
||||
},
|
||||
{
|
||||
Tuple([]Type{Tuple([]Type{String})}),
|
||||
Tuple([]Type{Tuple([]Type{String})}),
|
||||
true,
|
||||
},
|
||||
{
|
||||
Tuple([]Type{String}),
|
||||
EmptyTuple,
|
||||
false,
|
||||
},
|
||||
{
|
||||
Tuple([]Type{String}),
|
||||
Tuple([]Type{Number}),
|
||||
false,
|
||||
},
|
||||
{
|
||||
Tuple([]Type{String}),
|
||||
Tuple([]Type{String, Number}),
|
||||
false,
|
||||
},
|
||||
{
|
||||
Tuple([]Type{String}),
|
||||
Tuple([]Type{Tuple([]Type{String})}),
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(fmt.Sprintf("%#v.Equals(%#v)", test.LHS, test.RHS), func(t *testing.T) {
|
||||
got := test.LHS.Equals(test.RHS)
|
||||
if got != test.Expected {
|
||||
t.Errorf("Equals returned %#v; want %#v", got, test.Expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
29
vendor/github.com/zclconf/go-cty/cty/type_conform.go
generated
vendored
29
vendor/github.com/zclconf/go-cty/cty/type_conform.go
generated
vendored
|
@ -50,23 +50,20 @@ func testConformance(given Type, want Type, path Path, errs *[]error) {
|
|||
givenAttrs := given.AttributeTypes()
|
||||
wantAttrs := want.AttributeTypes()
|
||||
|
||||
if len(givenAttrs) != len(wantAttrs) {
|
||||
// Something is missing from one of them.
|
||||
for k := range givenAttrs {
|
||||
if _, exists := wantAttrs[k]; !exists {
|
||||
*errs = append(
|
||||
*errs,
|
||||
errorf(path, "unsupported attribute %q", k),
|
||||
)
|
||||
}
|
||||
for k := range givenAttrs {
|
||||
if _, exists := wantAttrs[k]; !exists {
|
||||
*errs = append(
|
||||
*errs,
|
||||
errorf(path, "unsupported attribute %q", k),
|
||||
)
|
||||
}
|
||||
for k := range wantAttrs {
|
||||
if _, exists := givenAttrs[k]; !exists {
|
||||
*errs = append(
|
||||
*errs,
|
||||
errorf(path, "missing required attribute %q", k),
|
||||
)
|
||||
}
|
||||
}
|
||||
for k := range wantAttrs {
|
||||
if _, exists := givenAttrs[k]; !exists {
|
||||
*errs = append(
|
||||
*errs,
|
||||
errorf(path, "missing required attribute %q", k),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
209
vendor/github.com/zclconf/go-cty/cty/type_conform_test.go
generated
vendored
Normal file
209
vendor/github.com/zclconf/go-cty/cty/type_conform_test.go
generated
vendored
Normal file
|
@ -0,0 +1,209 @@
|
|||
package cty
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTypeTestConformance(t *testing.T) {
|
||||
tests := []struct {
|
||||
Receiver Type
|
||||
Given Type
|
||||
Conforms bool
|
||||
}{
|
||||
{
|
||||
Receiver: Number,
|
||||
Given: Number,
|
||||
Conforms: true,
|
||||
},
|
||||
{
|
||||
Receiver: Number,
|
||||
Given: String,
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: Number,
|
||||
Given: DynamicPseudoType,
|
||||
Conforms: true,
|
||||
},
|
||||
{
|
||||
Receiver: DynamicPseudoType,
|
||||
Given: DynamicPseudoType,
|
||||
Conforms: true,
|
||||
},
|
||||
{
|
||||
Receiver: DynamicPseudoType,
|
||||
Given: Number,
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: List(Number),
|
||||
Given: List(Number),
|
||||
Conforms: true,
|
||||
},
|
||||
{
|
||||
Receiver: List(Number),
|
||||
Given: Map(Number),
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: List(Number),
|
||||
Given: List(DynamicPseudoType),
|
||||
Conforms: true,
|
||||
},
|
||||
{
|
||||
Receiver: List(Number),
|
||||
Given: List(String),
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: Map(Number),
|
||||
Given: Map(Number),
|
||||
Conforms: true,
|
||||
},
|
||||
{
|
||||
Receiver: Map(Number),
|
||||
Given: Set(Number),
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: List(Number),
|
||||
Given: Map(DynamicPseudoType),
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: Map(Number),
|
||||
Given: Map(DynamicPseudoType),
|
||||
Conforms: true,
|
||||
},
|
||||
{
|
||||
Receiver: Map(Number),
|
||||
Given: Map(String),
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: Set(Number),
|
||||
Given: Set(Number),
|
||||
Conforms: true,
|
||||
},
|
||||
{
|
||||
Receiver: Set(Number),
|
||||
Given: List(Number),
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: Set(Number),
|
||||
Given: List(DynamicPseudoType),
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: Set(Number),
|
||||
Given: Set(DynamicPseudoType),
|
||||
Conforms: true,
|
||||
},
|
||||
{
|
||||
Receiver: Set(Number),
|
||||
Given: Set(String),
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: EmptyObject,
|
||||
Given: EmptyObject,
|
||||
Conforms: true,
|
||||
},
|
||||
{
|
||||
Receiver: EmptyObject,
|
||||
Given: Object(map[string]Type{"name": String}),
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: Object(map[string]Type{"name": String}),
|
||||
Given: EmptyObject,
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: Object(map[string]Type{"name": String}),
|
||||
Given: Object(map[string]Type{"name": String}),
|
||||
Conforms: true,
|
||||
},
|
||||
{
|
||||
Receiver: Object(map[string]Type{"name": String}),
|
||||
Given: Object(map[string]Type{"gnome": String}),
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: Object(map[string]Type{"name": Number}),
|
||||
Given: Object(map[string]Type{"name": String}),
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: Object(map[string]Type{"name": Number}),
|
||||
Given: Object(map[string]Type{"name": String, "number": Number}),
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: EmptyTuple,
|
||||
Given: EmptyTuple,
|
||||
Conforms: true,
|
||||
},
|
||||
{
|
||||
Receiver: EmptyTuple,
|
||||
Given: Tuple([]Type{String}),
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Given: Tuple([]Type{String}),
|
||||
Receiver: EmptyTuple,
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: Tuple([]Type{String}),
|
||||
Given: Tuple([]Type{String}),
|
||||
Conforms: true,
|
||||
},
|
||||
{
|
||||
Receiver: Tuple([]Type{String}),
|
||||
Given: Tuple([]Type{Number}),
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: Tuple([]Type{String, Number}),
|
||||
Given: Tuple([]Type{String, Number}),
|
||||
Conforms: true,
|
||||
},
|
||||
{
|
||||
Receiver: Tuple([]Type{String}),
|
||||
Given: Tuple([]Type{String, Number}),
|
||||
Conforms: false,
|
||||
},
|
||||
{
|
||||
Receiver: Tuple([]Type{String, Number}),
|
||||
Given: Tuple([]Type{String}),
|
||||
Conforms: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(fmt.Sprintf("(%#v).TestConformance(%#v)", test.Receiver, test.Given), func(t *testing.T) {
|
||||
errs := test.Receiver.TestConformance(test.Given)
|
||||
if test.Conforms {
|
||||
if errs != nil {
|
||||
errStrs := make([]string, 0, len(errs))
|
||||
for _, err := range errs {
|
||||
if pathErr, ok := err.(PathError); ok {
|
||||
errStrs = append(errStrs, fmt.Sprintf("at %#v: %s", pathErr.Path, pathErr))
|
||||
} else {
|
||||
errStrs = append(errStrs, err.Error())
|
||||
}
|
||||
}
|
||||
t.Errorf("(%#v).TestConformance(%#v): unexpected errors\n%s", test.Receiver, test.Given, strings.Join(errStrs, "\n"))
|
||||
}
|
||||
} else {
|
||||
if errs == nil {
|
||||
t.Errorf("(%#v).TestConformance(%#v): expected errors, but got none", test.Receiver, test.Given)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
8
vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go
generated
vendored
8
vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go
generated
vendored
|
@ -8,12 +8,12 @@ package cty
|
|||
// represent unknowns, such as JSON, as long as the caller does not need to
|
||||
// retain the unknown value information.
|
||||
func UnknownAsNull(val Value) Value {
|
||||
if !val.IsKnown() {
|
||||
return NullVal(val.Type())
|
||||
}
|
||||
|
||||
ty := val.Type()
|
||||
switch {
|
||||
case val.IsNull():
|
||||
return val
|
||||
case !val.IsKnown():
|
||||
return NullVal(ty)
|
||||
case ty.IsListType() || ty.IsTupleType() || ty.IsSetType():
|
||||
length := val.LengthInt()
|
||||
if length == 0 {
|
||||
|
|
195
vendor/github.com/zclconf/go-cty/cty/unknown_as_null_test.go
generated
vendored
Normal file
195
vendor/github.com/zclconf/go-cty/cty/unknown_as_null_test.go
generated
vendored
Normal file
|
@ -0,0 +1,195 @@
|
|||
package cty
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestUnknownAsNull(t *testing.T) {
|
||||
tests := []struct {
|
||||
Input Value
|
||||
Want Value
|
||||
}{
|
||||
{
|
||||
StringVal("hello"),
|
||||
StringVal("hello"),
|
||||
},
|
||||
{
|
||||
NullVal(String),
|
||||
NullVal(String),
|
||||
},
|
||||
{
|
||||
UnknownVal(String),
|
||||
NullVal(String),
|
||||
},
|
||||
|
||||
{
|
||||
NullVal(DynamicPseudoType),
|
||||
NullVal(DynamicPseudoType),
|
||||
},
|
||||
{
|
||||
NullVal(Object(map[string]Type{"test": String})),
|
||||
NullVal(Object(map[string]Type{"test": String})),
|
||||
},
|
||||
{
|
||||
DynamicVal,
|
||||
NullVal(DynamicPseudoType),
|
||||
},
|
||||
|
||||
{
|
||||
ListValEmpty(String),
|
||||
ListValEmpty(String),
|
||||
},
|
||||
{
|
||||
ListVal([]Value{
|
||||
StringVal("hello"),
|
||||
}),
|
||||
ListVal([]Value{
|
||||
StringVal("hello"),
|
||||
}),
|
||||
},
|
||||
{
|
||||
ListVal([]Value{
|
||||
NullVal(String),
|
||||
}),
|
||||
ListVal([]Value{
|
||||
NullVal(String),
|
||||
}),
|
||||
},
|
||||
{
|
||||
ListVal([]Value{
|
||||
UnknownVal(String),
|
||||
}),
|
||||
ListVal([]Value{
|
||||
NullVal(String),
|
||||
}),
|
||||
},
|
||||
|
||||
{
|
||||
SetValEmpty(String),
|
||||
SetValEmpty(String),
|
||||
},
|
||||
{
|
||||
SetVal([]Value{
|
||||
StringVal("hello"),
|
||||
}),
|
||||
SetVal([]Value{
|
||||
StringVal("hello"),
|
||||
}),
|
||||
},
|
||||
{
|
||||
SetVal([]Value{
|
||||
NullVal(String),
|
||||
}),
|
||||
SetVal([]Value{
|
||||
NullVal(String),
|
||||
}),
|
||||
},
|
||||
{
|
||||
SetVal([]Value{
|
||||
UnknownVal(String),
|
||||
}),
|
||||
SetVal([]Value{
|
||||
NullVal(String),
|
||||
}),
|
||||
},
|
||||
|
||||
{
|
||||
EmptyTupleVal,
|
||||
EmptyTupleVal,
|
||||
},
|
||||
{
|
||||
TupleVal([]Value{
|
||||
StringVal("hello"),
|
||||
}),
|
||||
TupleVal([]Value{
|
||||
StringVal("hello"),
|
||||
}),
|
||||
},
|
||||
{
|
||||
TupleVal([]Value{
|
||||
NullVal(String),
|
||||
}),
|
||||
TupleVal([]Value{
|
||||
NullVal(String),
|
||||
}),
|
||||
},
|
||||
{
|
||||
TupleVal([]Value{
|
||||
UnknownVal(String),
|
||||
}),
|
||||
TupleVal([]Value{
|
||||
NullVal(String),
|
||||
}),
|
||||
},
|
||||
|
||||
{
|
||||
MapValEmpty(String),
|
||||
MapValEmpty(String),
|
||||
},
|
||||
{
|
||||
MapVal(map[string]Value{
|
||||
"greeting": StringVal("hello"),
|
||||
}),
|
||||
MapVal(map[string]Value{
|
||||
"greeting": StringVal("hello"),
|
||||
}),
|
||||
},
|
||||
{
|
||||
MapVal(map[string]Value{
|
||||
"greeting": NullVal(String),
|
||||
}),
|
||||
MapVal(map[string]Value{
|
||||
"greeting": NullVal(String),
|
||||
}),
|
||||
},
|
||||
{
|
||||
MapVal(map[string]Value{
|
||||
"greeting": UnknownVal(String),
|
||||
}),
|
||||
MapVal(map[string]Value{
|
||||
"greeting": NullVal(String),
|
||||
}),
|
||||
},
|
||||
|
||||
{
|
||||
EmptyObjectVal,
|
||||
EmptyObjectVal,
|
||||
},
|
||||
{
|
||||
ObjectVal(map[string]Value{
|
||||
"greeting": StringVal("hello"),
|
||||
}),
|
||||
ObjectVal(map[string]Value{
|
||||
"greeting": StringVal("hello"),
|
||||
}),
|
||||
},
|
||||
{
|
||||
ObjectVal(map[string]Value{
|
||||
"greeting": NullVal(String),
|
||||
}),
|
||||
ObjectVal(map[string]Value{
|
||||
"greeting": NullVal(String),
|
||||
}),
|
||||
},
|
||||
{
|
||||
ObjectVal(map[string]Value{
|
||||
"greeting": UnknownVal(String),
|
||||
}),
|
||||
ObjectVal(map[string]Value{
|
||||
"greeting": NullVal(String),
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.Input.GoString(), func(t *testing.T) {
|
||||
got := UnknownAsNull(test.Input)
|
||||
if !got.RawEquals(test.Want) {
|
||||
t.Errorf(
|
||||
"wrong result\ninput: %#v\ngot: %#v\nwant: %#v",
|
||||
test.Input, got, test.Want,
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
26
vendor/github.com/zclconf/go-cty/cty/value_init.go
generated
vendored
26
vendor/github.com/zclconf/go-cty/cty/value_init.go
generated
vendored
|
@ -30,6 +30,32 @@ func NumberVal(v *big.Float) Value {
|
|||
}
|
||||
}
|
||||
|
||||
// ParseNumberVal returns a Value of type number produced by parsing the given
|
||||
// string as a decimal real number. To ensure that two identical strings will
|
||||
// always produce an equal number, always use this function to derive a number
|
||||
// from a string; it will ensure that the precision and rounding mode for the
|
||||
// internal big decimal is configured in a consistent way.
|
||||
//
|
||||
// If the given string cannot be parsed as a number, the returned error has
|
||||
// the message "a number is required", making it suitable to return to an
|
||||
// end-user to signal a type conversion error.
|
||||
//
|
||||
// If the given string contains a number that becomes a recurring fraction
|
||||
// when expressed in binary then it will be truncated to have a 512-bit
|
||||
// mantissa. Note that this is a higher precision than that of a float64,
|
||||
// so coverting the same decimal number first to float64 and then calling
|
||||
// NumberFloatVal will not produce an equal result; the conversion first
|
||||
// to float64 will round the mantissa to fewer than 512 bits.
|
||||
func ParseNumberVal(s string) (Value, error) {
|
||||
// Base 10, precision 512, and rounding to nearest even is the standard
|
||||
// way to handle numbers arriving as strings.
|
||||
f, _, err := big.ParseFloat(s, 10, 512, big.ToNearestEven)
|
||||
if err != nil {
|
||||
return NilVal, fmt.Errorf("a number is required")
|
||||
}
|
||||
return NumberVal(f), nil
|
||||
}
|
||||
|
||||
// NumberIntVal returns a Value of type Number whose internal value is equal
|
||||
// to the given integer.
|
||||
func NumberIntVal(v int64) Value {
|
||||
|
|
84
vendor/github.com/zclconf/go-cty/cty/value_ops.go
generated
vendored
84
vendor/github.com/zclconf/go-cty/cty/value_ops.go
generated
vendored
|
@ -14,16 +14,15 @@ func (val Value) GoString() string {
|
|||
return "cty.NilVal"
|
||||
}
|
||||
|
||||
if val.ty == DynamicPseudoType {
|
||||
return "cty.DynamicVal"
|
||||
}
|
||||
|
||||
if !val.IsKnown() {
|
||||
return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty)
|
||||
}
|
||||
if val.IsNull() {
|
||||
return fmt.Sprintf("cty.NullVal(%#v)", val.ty)
|
||||
}
|
||||
if val == DynamicVal { // is unknown, so must be before the IsKnown check below
|
||||
return "cty.DynamicVal"
|
||||
}
|
||||
if !val.IsKnown() {
|
||||
return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty)
|
||||
}
|
||||
|
||||
// By the time we reach here we've dealt with all of the exceptions around
|
||||
// unknowns and nulls, so we're guaranteed that the values are the
|
||||
|
@ -71,10 +70,62 @@ func (val Value) GoString() string {
|
|||
// Equals returns True if the receiver and the given other value have the
|
||||
// same type and are exactly equal in value.
|
||||
//
|
||||
// The usual short-circuit rules apply, so the result can be unknown or typed
|
||||
// as dynamic if either of the given values are. Use RawEquals to compare
|
||||
// if two values are equal *ignoring* the short-circuit rules.
|
||||
// As a special case, two null values are always equal regardless of type.
|
||||
//
|
||||
// The usual short-circuit rules apply, so the result will be unknown if
|
||||
// either of the given values are.
|
||||
//
|
||||
// Use RawEquals to compare if two values are equal *ignoring* the
|
||||
// short-circuit rules and the exception for null values.
|
||||
func (val Value) Equals(other Value) Value {
|
||||
// Start by handling Unknown values before considering types.
|
||||
// This needs to be done since Null values are always equal regardless of
|
||||
// type.
|
||||
switch {
|
||||
case !val.IsKnown() && !other.IsKnown():
|
||||
// both unknown
|
||||
return UnknownVal(Bool)
|
||||
case val.IsKnown() && !other.IsKnown():
|
||||
switch {
|
||||
case val.IsNull(), other.ty.HasDynamicTypes():
|
||||
// If known is Null, we need to wait for the unkown value since
|
||||
// nulls of any type are equal.
|
||||
// An unkown with a dynamic type compares as unknown, which we need
|
||||
// to check before the type comparison below.
|
||||
return UnknownVal(Bool)
|
||||
case !val.ty.Equals(other.ty):
|
||||
// There is no null comparison or dynamic types, so unequal types
|
||||
// will never be equal.
|
||||
return False
|
||||
default:
|
||||
return UnknownVal(Bool)
|
||||
}
|
||||
case other.IsKnown() && !val.IsKnown():
|
||||
switch {
|
||||
case other.IsNull(), val.ty.HasDynamicTypes():
|
||||
// If known is Null, we need to wait for the unkown value since
|
||||
// nulls of any type are equal.
|
||||
// An unkown with a dynamic type compares as unknown, which we need
|
||||
// to check before the type comparison below.
|
||||
return UnknownVal(Bool)
|
||||
case !other.ty.Equals(val.ty):
|
||||
// There's no null comparison or dynamic types, so unequal types
|
||||
// will never be equal.
|
||||
return False
|
||||
default:
|
||||
return UnknownVal(Bool)
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case val.IsNull() && other.IsNull():
|
||||
// Nulls are always equal, regardless of type
|
||||
return BoolVal(true)
|
||||
case val.IsNull() || other.IsNull():
|
||||
// If only one is null then the result must be false
|
||||
return BoolVal(false)
|
||||
}
|
||||
|
||||
if val.ty.HasDynamicTypes() || other.ty.HasDynamicTypes() {
|
||||
return UnknownVal(Bool)
|
||||
}
|
||||
|
@ -83,17 +134,6 @@ func (val Value) Equals(other Value) Value {
|
|||
return BoolVal(false)
|
||||
}
|
||||
|
||||
if !(val.IsKnown() && other.IsKnown()) {
|
||||
return UnknownVal(Bool)
|
||||
}
|
||||
|
||||
if val.IsNull() || other.IsNull() {
|
||||
if val.IsNull() && other.IsNull() {
|
||||
return BoolVal(true)
|
||||
}
|
||||
return BoolVal(false)
|
||||
}
|
||||
|
||||
ty := val.ty
|
||||
result := false
|
||||
|
||||
|
@ -758,7 +798,7 @@ func (val Value) HasElement(elem Value) Value {
|
|||
if val.IsNull() {
|
||||
panic("can't call HasElement on a nil value")
|
||||
}
|
||||
if ty.ElementType() != elem.Type() {
|
||||
if !ty.ElementType().Equals(elem.Type()) {
|
||||
return False
|
||||
}
|
||||
|
||||
|
|
2074
vendor/github.com/zclconf/go-cty/cty/value_ops_test.go
generated
vendored
Normal file
2074
vendor/github.com/zclconf/go-cty/cty/value_ops_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
151
vendor/github.com/zclconf/go-cty/cty/walk_test.go
generated
vendored
Normal file
151
vendor/github.com/zclconf/go-cty/cty/walk_test.go
generated
vendored
Normal file
|
@ -0,0 +1,151 @@
|
|||
package cty
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestWalk(t *testing.T) {
|
||||
type Call struct {
|
||||
Path string
|
||||
Type string
|
||||
}
|
||||
|
||||
val := ObjectVal(map[string]Value{
|
||||
"string": StringVal("hello"),
|
||||
"number": NumberIntVal(10),
|
||||
"bool": True,
|
||||
"list": ListVal([]Value{True}),
|
||||
"list_empty": ListValEmpty(Bool),
|
||||
"set": SetVal([]Value{True}),
|
||||
"set_empty": ListValEmpty(Bool),
|
||||
"tuple": TupleVal([]Value{True}),
|
||||
"tuple_empty": EmptyTupleVal,
|
||||
"map": MapVal(map[string]Value{"true": True}),
|
||||
"map_empty": MapValEmpty(Bool),
|
||||
"object": ObjectVal(map[string]Value{"true": True}),
|
||||
"object_empty": EmptyObjectVal,
|
||||
"null": NullVal(List(String)),
|
||||
"unknown": UnknownVal(Map(Bool)),
|
||||
})
|
||||
|
||||
gotCalls := map[Call]struct{}{}
|
||||
wantCalls := []Call{
|
||||
{`cty.Path(nil)`, "object"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"string"}}`, "string"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"number"}}`, "number"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"bool"}}`, "bool"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"list"}}`, "list of bool"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"list"}, cty.IndexStep{Key:cty.NumberIntVal(0)}}`, "bool"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"list_empty"}}`, "list of bool"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"set"}}`, "set of bool"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"set"}, cty.IndexStep{Key:cty.True}}`, "bool"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"set_empty"}}`, "list of bool"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"tuple"}}`, "tuple"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"tuple"}, cty.IndexStep{Key:cty.NumberIntVal(0)}}`, "bool"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"tuple_empty"}}`, "tuple"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"map"}, cty.IndexStep{Key:cty.StringVal("true")}}`, "bool"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"map"}}`, "map of bool"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"map_empty"}}`, "map of bool"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"object"}}`, "object"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"object"}, cty.GetAttrStep{Name:"true"}}`, "bool"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"object_empty"}}`, "object"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"null"}}`, "list of string"},
|
||||
{`cty.Path{cty.GetAttrStep{Name:"unknown"}}`, "map of bool"},
|
||||
}
|
||||
|
||||
err := Walk(val, func(path Path, val Value) (bool, error) {
|
||||
gotCalls[Call{
|
||||
Path: fmt.Sprintf("%#v", path),
|
||||
Type: val.Type().FriendlyName(),
|
||||
}] = struct{}{}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(gotCalls) != len(wantCalls) {
|
||||
t.Errorf("wrong number of calls %d; want %d", len(gotCalls), len(wantCalls))
|
||||
}
|
||||
|
||||
for gotCall := range gotCalls {
|
||||
t.Logf("got call {%#q, %q}", gotCall.Path, gotCall.Type)
|
||||
}
|
||||
|
||||
for _, wantCall := range wantCalls {
|
||||
if _, has := gotCalls[wantCall]; !has {
|
||||
t.Errorf("missing call {%#q, %q}", wantCall.Path, wantCall.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransform(t *testing.T) {
|
||||
val := ObjectVal(map[string]Value{
|
||||
"string": StringVal("hello"),
|
||||
"number": NumberIntVal(10),
|
||||
"bool": True,
|
||||
"list": ListVal([]Value{True}),
|
||||
"list_empty": ListValEmpty(Bool),
|
||||
"set": SetVal([]Value{True}),
|
||||
"set_empty": ListValEmpty(Bool),
|
||||
"tuple": TupleVal([]Value{True}),
|
||||
"tuple_empty": EmptyTupleVal,
|
||||
"map": MapVal(map[string]Value{"true": True}),
|
||||
"map_empty": MapValEmpty(Bool),
|
||||
"object": ObjectVal(map[string]Value{"true": True}),
|
||||
"object_empty": EmptyObjectVal,
|
||||
"null": NullVal(String),
|
||||
"unknown": UnknownVal(Bool),
|
||||
"null_list": NullVal(List(String)),
|
||||
"unknown_map": UnknownVal(Map(Bool)),
|
||||
})
|
||||
|
||||
gotVal, err := Transform(val, func(path Path, val Value) (Value, error) {
|
||||
if val.Type().IsPrimitiveType() {
|
||||
return StringVal(fmt.Sprintf("%#v", path)), nil
|
||||
}
|
||||
return val, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
wantVal := ObjectVal(map[string]Value{
|
||||
"string": StringVal(`cty.Path{cty.GetAttrStep{Name:"string"}}`),
|
||||
"number": StringVal(`cty.Path{cty.GetAttrStep{Name:"number"}}`),
|
||||
"bool": StringVal(`cty.Path{cty.GetAttrStep{Name:"bool"}}`),
|
||||
"list": ListVal([]Value{StringVal(`cty.Path{cty.GetAttrStep{Name:"list"}, cty.IndexStep{Key:cty.NumberIntVal(0)}}`)}),
|
||||
"list_empty": ListValEmpty(Bool),
|
||||
"set": SetVal([]Value{StringVal(`cty.Path{cty.GetAttrStep{Name:"set"}, cty.IndexStep{Key:cty.True}}`)}),
|
||||
"set_empty": ListValEmpty(Bool),
|
||||
"tuple": TupleVal([]Value{StringVal(`cty.Path{cty.GetAttrStep{Name:"tuple"}, cty.IndexStep{Key:cty.NumberIntVal(0)}}`)}),
|
||||
"tuple_empty": EmptyTupleVal,
|
||||
"map": MapVal(map[string]Value{"true": StringVal(`cty.Path{cty.GetAttrStep{Name:"map"}, cty.IndexStep{Key:cty.StringVal("true")}}`)}),
|
||||
"map_empty": MapValEmpty(Bool),
|
||||
"object": ObjectVal(map[string]Value{"true": StringVal(`cty.Path{cty.GetAttrStep{Name:"object"}, cty.GetAttrStep{Name:"true"}}`)}),
|
||||
"object_empty": EmptyObjectVal,
|
||||
"null": StringVal(`cty.Path{cty.GetAttrStep{Name:"null"}}`),
|
||||
"unknown": StringVal(`cty.Path{cty.GetAttrStep{Name:"unknown"}}`),
|
||||
"null_list": NullVal(List(String)),
|
||||
"unknown_map": UnknownVal(Map(Bool)),
|
||||
})
|
||||
|
||||
if !gotVal.RawEquals(wantVal) {
|
||||
if got, want := len(gotVal.Type().AttributeTypes()), len(gotVal.Type().AttributeTypes()); got != want {
|
||||
t.Errorf("wrong length %d; want %d", got, want)
|
||||
}
|
||||
for it := wantVal.ElementIterator(); it.Next(); {
|
||||
key, wantElem := it.Element()
|
||||
attr := key.AsString()
|
||||
if !gotVal.Type().HasAttribute(attr) {
|
||||
t.Errorf("missing attribute %q", attr)
|
||||
continue
|
||||
}
|
||||
gotElem := gotVal.GetAttr(attr)
|
||||
if !gotElem.RawEquals(wantElem) {
|
||||
t.Errorf("wrong value for attribute %q\ngot: %#v\nwant: %#v", attr, gotElem, wantElem)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
15
vendor/vendor.json
vendored
15
vendor/vendor.json
vendored
|
@ -213,13 +213,12 @@
|
|||
{"path":"github.com/hashicorp/hcl/json/parser","checksumSHA1":"138aCV5n8n7tkGYMsMVQQnnLq+0=","revision":"6e968a3fcdcbab092f5307fd0d85479d5af1e4dc","revisionTime":"2016-11-01T18:00:25Z"},
|
||||
{"path":"github.com/hashicorp/hcl/json/scanner","checksumSHA1":"YdvFsNOMSWMLnY6fcliWQa0O5Fw=","revision":"6e968a3fcdcbab092f5307fd0d85479d5af1e4dc","revisionTime":"2016-11-01T18:00:25Z"},
|
||||
{"path":"github.com/hashicorp/hcl/json/token","checksumSHA1":"fNlXQCQEnb+B3k5UDL/r15xtSJY=","revision":"6e968a3fcdcbab092f5307fd0d85479d5af1e4dc","revisionTime":"2016-11-01T18:00:25Z"},
|
||||
{"path":"github.com/hashicorp/hcl2/ext/userfunc","checksumSHA1":"N2+7qc9e8zYkNy1itC+kWTKBTIo=","revision":"6743a2254ba3d642b7d3a0be506259a0842819ac","revisionTime":"2018-08-10T01:10:00Z"},
|
||||
{"path":"github.com/hashicorp/hcl2/gohcl","checksumSHA1":"BRJaQcKriVKEirVC7YxBxPufQF0=","revision":"6743a2254ba3d642b7d3a0be506259a0842819ac","revisionTime":"2018-08-10T01:10:00Z"},
|
||||
{"path":"github.com/hashicorp/hcl2/hcl","checksumSHA1":"LotrMqeWeTv/rNOGUHRs9iVBjoQ=","revision":"6743a2254ba3d642b7d3a0be506259a0842819ac","revisionTime":"2018-08-10T01:10:00Z"},
|
||||
{"path":"github.com/hashicorp/hcl2/hcl/hclsyntax","checksumSHA1":"RNoOVGaFtYqaPMyARZuHc2OejDs=","revision":"6743a2254ba3d642b7d3a0be506259a0842819ac","revisionTime":"2018-08-10T01:10:00Z"},
|
||||
{"path":"github.com/hashicorp/hcl2/hcl/json","checksumSHA1":"4Cr8I/nepYf4eRCl5hiazPf+afs=","revision":"6743a2254ba3d642b7d3a0be506259a0842819ac","revisionTime":"2018-08-10T01:10:00Z"},
|
||||
{"path":"github.com/hashicorp/hcl2/hcldec","checksumSHA1":"wQ3hLj4s+5jN6LePSpT0XTTvdXA=","revision":"6743a2254ba3d642b7d3a0be506259a0842819ac","revisionTime":"2018-08-10T01:10:00Z"},
|
||||
{"path":"github.com/hashicorp/hcl2/hclparse","checksumSHA1":"IzmftuG99BqNhbFGhxZaGwtiMtM=","revision":"6743a2254ba3d642b7d3a0be506259a0842819ac","revisionTime":"2018-08-10T01:10:00Z"},
|
||||
{"path":"github.com/hashicorp/hcl2/gohcl","checksumSHA1":"RFEjfMQWPAVILXE2PhL6wDW8Zg4=","revision":"fb2bc46cdbe36e247dac0c7dc185b34eaeb54c21","revisionTime":"2019-02-14T11:58:25Z","version":"master","versionExact":"master"},
|
||||
{"path":"github.com/hashicorp/hcl2/hcl","checksumSHA1":"bUO4KS1yjAWa6miewgbUUsxYVfo=","revision":"fb2bc46cdbe36e247dac0c7dc185b34eaeb54c21","revisionTime":"2019-02-14T11:58:25Z","version":"master","versionExact":"master"},
|
||||
{"path":"github.com/hashicorp/hcl2/hcl/hclsyntax","checksumSHA1":"mIcAvc+sEAQO5kIWuLVqnvXEU6Y=","revision":"fb2bc46cdbe36e247dac0c7dc185b34eaeb54c21","revisionTime":"2019-02-14T11:58:25Z","version":"master","versionExact":"master"},
|
||||
{"path":"github.com/hashicorp/hcl2/hcl/json","checksumSHA1":"56M/avlLyKDeMb0D8RqKcK2kja8=","revision":"fb2bc46cdbe36e247dac0c7dc185b34eaeb54c21","revisionTime":"2019-02-14T11:58:25Z","version":"master","versionExact":"master"},
|
||||
{"path":"github.com/hashicorp/hcl2/hcldec","checksumSHA1":"6JRj4T/iQxIe/CoKXHDjPuupmL8=","revision":"fb2bc46cdbe36e247dac0c7dc185b34eaeb54c21","revisionTime":"2019-02-14T11:58:25Z","version":"master","versionExact":"master"},
|
||||
{"path":"github.com/hashicorp/hcl2/hclwrite","checksumSHA1":"DQLzlvDUtHL1DkYDZrMx2vfKJUg=","revision":"fb2bc46cdbe36e247dac0c7dc185b34eaeb54c21","revisionTime":"2019-02-14T11:58:25Z","version":"master","versionExact":"master"},
|
||||
{"path":"github.com/hashicorp/logutils","checksumSHA1":"vt+P9D2yWDO3gdvdgCzwqunlhxU=","revision":"0dc08b1671f34c4250ce212759ebd880f743d883"},
|
||||
{"path":"github.com/hashicorp/memberlist","checksumSHA1":"yAu2gPVXIh28yJ2If5gZPrf04kU=","revision":"1a62499c21db33d57691001d5e08a71ec857b18f","revisionTime":"2019-01-03T22:22:36Z"},
|
||||
{"path":"github.com/hashicorp/net-rpc-msgpackrpc","checksumSHA1":"qnlqWJYV81ENr61SZk9c65R1mDo=","revision":"a14192a58a694c123d8fe5481d4a4727d6ae82f3"},
|
||||
|
@ -367,7 +366,7 @@
|
|||
{"path":"github.com/vishvananda/netlink/nl","checksumSHA1":"r/vcO8YkOWNHKX5HKCukaU4Xzlg=","origin":"github.com/opencontainers/runc/vendor/github.com/vishvananda/netlink/nl","revision":"459bfaec1fc6c17d8bfb12d0a0f69e7e7271ed2a","revisionTime":"2018-08-23T14:46:37Z"},
|
||||
{"path":"github.com/vmihailenco/msgpack","checksumSHA1":"t9A/EE2GhHFPHzK+ksAKgKW9ZC8=","revision":"b5e691b1eb52a28c05e67ab9df303626c095c23b","revisionTime":"2018-06-13T09:15:15Z"},
|
||||
{"path":"github.com/vmihailenco/msgpack/codes","checksumSHA1":"OcTSGT2v7/2saIGq06nDhEZwm8I=","revision":"b5e691b1eb52a28c05e67ab9df303626c095c23b","revisionTime":"2018-06-13T09:15:15Z"},
|
||||
{"path":"github.com/zclconf/go-cty/cty","checksumSHA1":"Ej+3WWvyjn0xg3aujsyT+yvvmdc=","revision":"02bd58e97b5759d478019c5a6333edbfdfed16a0","revisionTime":"2018-07-18T22:05:26Z"},
|
||||
{"path":"github.com/zclconf/go-cty/cty","checksumSHA1":"0jAKo5tFC1SpRjwB+AiPNfNAzmM=","revision":"4ca19710f0562cab70f0b3c9cbff0ecc70ee06d1","revisionTime":"2019-02-01T22:06:20Z"},
|
||||
{"path":"github.com/zclconf/go-cty/cty/convert","checksumSHA1":"1WGUPe776lvMMbaRerAbqOx19nQ=","revision":"02bd58e97b5759d478019c5a6333edbfdfed16a0","revisionTime":"2018-07-18T22:05:26Z"},
|
||||
{"path":"github.com/zclconf/go-cty/cty/function","checksumSHA1":"MyyLCGg3RREMllTJyK6ehZl/dHk=","revision":"02bd58e97b5759d478019c5a6333edbfdfed16a0","revisionTime":"2018-07-18T22:05:26Z"},
|
||||
{"path":"github.com/zclconf/go-cty/cty/function/stdlib","checksumSHA1":"kcTJOuL131/stXJ4U9tC3SASQLs=","revision":"02bd58e97b5759d478019c5a6333edbfdfed16a0","revisionTime":"2018-07-18T22:05:26Z"},
|
||||
|
|
Loading…
Reference in a new issue