Merge pull request #4559 from hashicorp/f-base-plugin

Base/device/hclspec plugin protobufs
This commit is contained in:
Alex Dadgar 2018-08-08 10:09:01 -07:00 committed by GitHub
commit 0cb8994e32
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
196 changed files with 52602 additions and 248 deletions

481
plugins/base/base.pb.go Normal file
View file

@ -0,0 +1,481 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: base.proto
package hashicorp_nomad_plugins_base
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import hclspec "hashicorp/nomad/plugins/shared/hclspec"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// PluginType enumerates the type of plugins Nomad supports
type PluginType int32
const (
PluginType_UNKNOWN PluginType = 0
PluginType_DRIVER PluginType = 1
PluginType_DEVICE PluginType = 2
)
var PluginType_name = map[int32]string{
0: "UNKNOWN",
1: "DRIVER",
2: "DEVICE",
}
var PluginType_value = map[string]int32{
"UNKNOWN": 0,
"DRIVER": 1,
"DEVICE": 2,
}
func (x PluginType) String() string {
return proto.EnumName(PluginType_name, int32(x))
}
func (PluginType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_base_6491f5f52ef6eb79, []int{0}
}
// PluginInfoRequest is used to request the plugins basic information.
type PluginInfoRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PluginInfoRequest) Reset() { *m = PluginInfoRequest{} }
func (m *PluginInfoRequest) String() string { return proto.CompactTextString(m) }
func (*PluginInfoRequest) ProtoMessage() {}
func (*PluginInfoRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_base_6491f5f52ef6eb79, []int{0}
}
func (m *PluginInfoRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PluginInfoRequest.Unmarshal(m, b)
}
func (m *PluginInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PluginInfoRequest.Marshal(b, m, deterministic)
}
func (dst *PluginInfoRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_PluginInfoRequest.Merge(dst, src)
}
func (m *PluginInfoRequest) XXX_Size() int {
return xxx_messageInfo_PluginInfoRequest.Size(m)
}
func (m *PluginInfoRequest) XXX_DiscardUnknown() {
xxx_messageInfo_PluginInfoRequest.DiscardUnknown(m)
}
var xxx_messageInfo_PluginInfoRequest proto.InternalMessageInfo
// PluginInfoResponse returns basic information about the plugin such
// that Nomad can decide whether to load the plugin or not.
type PluginInfoResponse struct {
// type indicates what type of plugin this is.
Type PluginType `protobuf:"varint,1,opt,name=type,proto3,enum=hashicorp.nomad.plugins.base.PluginType" json:"type,omitempty"`
// plugin_api_version indicates the version of the Nomad Plugin API
// this plugin is built against.
PluginApiVersion string `protobuf:"bytes,2,opt,name=plugin_api_version,json=pluginApiVersion,proto3" json:"plugin_api_version,omitempty"`
// plugin_version is the semver version of this individual plugin.
// This is divorce from Nomads development and versioning.
PluginVersion string `protobuf:"bytes,3,opt,name=plugin_version,json=pluginVersion,proto3" json:"plugin_version,omitempty"`
// name is the name of the plugin
Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PluginInfoResponse) Reset() { *m = PluginInfoResponse{} }
func (m *PluginInfoResponse) String() string { return proto.CompactTextString(m) }
func (*PluginInfoResponse) ProtoMessage() {}
func (*PluginInfoResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_base_6491f5f52ef6eb79, []int{1}
}
func (m *PluginInfoResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PluginInfoResponse.Unmarshal(m, b)
}
func (m *PluginInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PluginInfoResponse.Marshal(b, m, deterministic)
}
func (dst *PluginInfoResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_PluginInfoResponse.Merge(dst, src)
}
func (m *PluginInfoResponse) XXX_Size() int {
return xxx_messageInfo_PluginInfoResponse.Size(m)
}
func (m *PluginInfoResponse) XXX_DiscardUnknown() {
xxx_messageInfo_PluginInfoResponse.DiscardUnknown(m)
}
var xxx_messageInfo_PluginInfoResponse proto.InternalMessageInfo
func (m *PluginInfoResponse) GetType() PluginType {
if m != nil {
return m.Type
}
return PluginType_UNKNOWN
}
func (m *PluginInfoResponse) GetPluginApiVersion() string {
if m != nil {
return m.PluginApiVersion
}
return ""
}
func (m *PluginInfoResponse) GetPluginVersion() string {
if m != nil {
return m.PluginVersion
}
return ""
}
func (m *PluginInfoResponse) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// ConfigSchemaRequest is used to request the configurations schema.
type ConfigSchemaRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ConfigSchemaRequest) Reset() { *m = ConfigSchemaRequest{} }
func (m *ConfigSchemaRequest) String() string { return proto.CompactTextString(m) }
func (*ConfigSchemaRequest) ProtoMessage() {}
func (*ConfigSchemaRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_base_6491f5f52ef6eb79, []int{2}
}
func (m *ConfigSchemaRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ConfigSchemaRequest.Unmarshal(m, b)
}
func (m *ConfigSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ConfigSchemaRequest.Marshal(b, m, deterministic)
}
func (dst *ConfigSchemaRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ConfigSchemaRequest.Merge(dst, src)
}
func (m *ConfigSchemaRequest) XXX_Size() int {
return xxx_messageInfo_ConfigSchemaRequest.Size(m)
}
func (m *ConfigSchemaRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ConfigSchemaRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ConfigSchemaRequest proto.InternalMessageInfo
// ConfigSchemaResponse returns the plugins configuration schema.
type ConfigSchemaResponse struct {
// spec is the plugins configuration schema
Spec *hclspec.Spec `protobuf:"bytes,1,opt,name=spec,proto3" json:"spec,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ConfigSchemaResponse) Reset() { *m = ConfigSchemaResponse{} }
func (m *ConfigSchemaResponse) String() string { return proto.CompactTextString(m) }
func (*ConfigSchemaResponse) ProtoMessage() {}
func (*ConfigSchemaResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_base_6491f5f52ef6eb79, []int{3}
}
func (m *ConfigSchemaResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ConfigSchemaResponse.Unmarshal(m, b)
}
func (m *ConfigSchemaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ConfigSchemaResponse.Marshal(b, m, deterministic)
}
func (dst *ConfigSchemaResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ConfigSchemaResponse.Merge(dst, src)
}
func (m *ConfigSchemaResponse) XXX_Size() int {
return xxx_messageInfo_ConfigSchemaResponse.Size(m)
}
func (m *ConfigSchemaResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ConfigSchemaResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ConfigSchemaResponse proto.InternalMessageInfo
func (m *ConfigSchemaResponse) GetSpec() *hclspec.Spec {
if m != nil {
return m.Spec
}
return nil
}
// SetConfigRequest is used to set the configuration
type SetConfigRequest struct {
// msgpack_config is the configuration encoded as MessagePack.
MsgpackConfig []byte `protobuf:"bytes,1,opt,name=msgpack_config,json=msgpackConfig,proto3" json:"msgpack_config,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SetConfigRequest) Reset() { *m = SetConfigRequest{} }
func (m *SetConfigRequest) String() string { return proto.CompactTextString(m) }
func (*SetConfigRequest) ProtoMessage() {}
func (*SetConfigRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_base_6491f5f52ef6eb79, []int{4}
}
func (m *SetConfigRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SetConfigRequest.Unmarshal(m, b)
}
func (m *SetConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SetConfigRequest.Marshal(b, m, deterministic)
}
func (dst *SetConfigRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SetConfigRequest.Merge(dst, src)
}
func (m *SetConfigRequest) XXX_Size() int {
return xxx_messageInfo_SetConfigRequest.Size(m)
}
func (m *SetConfigRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SetConfigRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SetConfigRequest proto.InternalMessageInfo
func (m *SetConfigRequest) GetMsgpackConfig() []byte {
if m != nil {
return m.MsgpackConfig
}
return nil
}
// SetConfigResponse is used to respond to setting the configuration
type SetConfigResponse struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SetConfigResponse) Reset() { *m = SetConfigResponse{} }
func (m *SetConfigResponse) String() string { return proto.CompactTextString(m) }
func (*SetConfigResponse) ProtoMessage() {}
func (*SetConfigResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_base_6491f5f52ef6eb79, []int{5}
}
func (m *SetConfigResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SetConfigResponse.Unmarshal(m, b)
}
func (m *SetConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SetConfigResponse.Marshal(b, m, deterministic)
}
func (dst *SetConfigResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SetConfigResponse.Merge(dst, src)
}
func (m *SetConfigResponse) XXX_Size() int {
return xxx_messageInfo_SetConfigResponse.Size(m)
}
func (m *SetConfigResponse) XXX_DiscardUnknown() {
xxx_messageInfo_SetConfigResponse.DiscardUnknown(m)
}
var xxx_messageInfo_SetConfigResponse proto.InternalMessageInfo
func init() {
proto.RegisterType((*PluginInfoRequest)(nil), "hashicorp.nomad.plugins.base.PluginInfoRequest")
proto.RegisterType((*PluginInfoResponse)(nil), "hashicorp.nomad.plugins.base.PluginInfoResponse")
proto.RegisterType((*ConfigSchemaRequest)(nil), "hashicorp.nomad.plugins.base.ConfigSchemaRequest")
proto.RegisterType((*ConfigSchemaResponse)(nil), "hashicorp.nomad.plugins.base.ConfigSchemaResponse")
proto.RegisterType((*SetConfigRequest)(nil), "hashicorp.nomad.plugins.base.SetConfigRequest")
proto.RegisterType((*SetConfigResponse)(nil), "hashicorp.nomad.plugins.base.SetConfigResponse")
proto.RegisterEnum("hashicorp.nomad.plugins.base.PluginType", PluginType_name, PluginType_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// BasePluginClient is the client API for BasePlugin service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type BasePluginClient interface {
// PluginInfo describes the type and version of a plugin.
PluginInfo(ctx context.Context, in *PluginInfoRequest, opts ...grpc.CallOption) (*PluginInfoResponse, error)
// ConfigSchema returns the schema for parsing the plugins configuration.
ConfigSchema(ctx context.Context, in *ConfigSchemaRequest, opts ...grpc.CallOption) (*ConfigSchemaResponse, error)
// SetConfig is used to set the configuration.
SetConfig(ctx context.Context, in *SetConfigRequest, opts ...grpc.CallOption) (*SetConfigResponse, error)
}
type basePluginClient struct {
cc *grpc.ClientConn
}
func NewBasePluginClient(cc *grpc.ClientConn) BasePluginClient {
return &basePluginClient{cc}
}
func (c *basePluginClient) PluginInfo(ctx context.Context, in *PluginInfoRequest, opts ...grpc.CallOption) (*PluginInfoResponse, error) {
out := new(PluginInfoResponse)
err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.base.BasePlugin/PluginInfo", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *basePluginClient) ConfigSchema(ctx context.Context, in *ConfigSchemaRequest, opts ...grpc.CallOption) (*ConfigSchemaResponse, error) {
out := new(ConfigSchemaResponse)
err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.base.BasePlugin/ConfigSchema", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *basePluginClient) SetConfig(ctx context.Context, in *SetConfigRequest, opts ...grpc.CallOption) (*SetConfigResponse, error) {
out := new(SetConfigResponse)
err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.base.BasePlugin/SetConfig", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// BasePluginServer is the server API for BasePlugin service.
type BasePluginServer interface {
// PluginInfo describes the type and version of a plugin.
PluginInfo(context.Context, *PluginInfoRequest) (*PluginInfoResponse, error)
// ConfigSchema returns the schema for parsing the plugins configuration.
ConfigSchema(context.Context, *ConfigSchemaRequest) (*ConfigSchemaResponse, error)
// SetConfig is used to set the configuration.
SetConfig(context.Context, *SetConfigRequest) (*SetConfigResponse, error)
}
func RegisterBasePluginServer(s *grpc.Server, srv BasePluginServer) {
s.RegisterService(&_BasePlugin_serviceDesc, srv)
}
func _BasePlugin_PluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PluginInfoRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BasePluginServer).PluginInfo(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/hashicorp.nomad.plugins.base.BasePlugin/PluginInfo",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BasePluginServer).PluginInfo(ctx, req.(*PluginInfoRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BasePlugin_ConfigSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ConfigSchemaRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BasePluginServer).ConfigSchema(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/hashicorp.nomad.plugins.base.BasePlugin/ConfigSchema",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BasePluginServer).ConfigSchema(ctx, req.(*ConfigSchemaRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BasePlugin_SetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SetConfigRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BasePluginServer).SetConfig(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/hashicorp.nomad.plugins.base.BasePlugin/SetConfig",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BasePluginServer).SetConfig(ctx, req.(*SetConfigRequest))
}
return interceptor(ctx, in, info, handler)
}
var _BasePlugin_serviceDesc = grpc.ServiceDesc{
ServiceName: "hashicorp.nomad.plugins.base.BasePlugin",
HandlerType: (*BasePluginServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "PluginInfo",
Handler: _BasePlugin_PluginInfo_Handler,
},
{
MethodName: "ConfigSchema",
Handler: _BasePlugin_ConfigSchema_Handler,
},
{
MethodName: "SetConfig",
Handler: _BasePlugin_SetConfig_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "base.proto",
}
func init() { proto.RegisterFile("base.proto", fileDescriptor_base_6491f5f52ef6eb79) }
var fileDescriptor_base_6491f5f52ef6eb79 = []byte{
// 411 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xdd, 0x8a, 0xd3, 0x40,
0x14, 0x6e, 0x6a, 0xa8, 0xf4, 0xf4, 0x87, 0x38, 0x55, 0x28, 0xc1, 0x8b, 0x12, 0x10, 0x8a, 0x94,
0x89, 0x8d, 0x78, 0x21, 0x78, 0xa1, 0xd6, 0x5e, 0x14, 0xa1, 0x4a, 0xaa, 0xd5, 0xbb, 0x30, 0x4d,
0xa7, 0x4d, 0xb0, 0x99, 0x99, 0x66, 0x52, 0xa5, 0xcf, 0xe6, 0x0b, 0xed, 0x63, 0x2c, 0x99, 0x49,
0xba, 0xd9, 0x65, 0xb7, 0x74, 0xaf, 0x32, 0x9c, 0xef, 0xe7, 0x9c, 0xf3, 0x1d, 0x02, 0xb0, 0x22,
0x92, 0x62, 0x91, 0xf2, 0x8c, 0xa3, 0x97, 0x11, 0x91, 0x51, 0x1c, 0xf2, 0x54, 0x60, 0xc6, 0x13,
0xb2, 0xc6, 0x62, 0x77, 0xd8, 0xc6, 0x4c, 0xe2, 0x9c, 0x63, 0xbf, 0x3b, 0xa1, 0xae, 0x42, 0xdd,
0x02, 0x75, 0x65, 0x44, 0x52, 0xba, 0x76, 0xa3, 0x70, 0x27, 0x05, 0x0d, 0xf3, 0x6f, 0x90, 0x3f,
0xb4, 0xa9, 0xd3, 0x83, 0x67, 0xdf, 0x15, 0x71, 0xc6, 0x36, 0xdc, 0xa7, 0xfb, 0x03, 0x95, 0x99,
0xf3, 0xdf, 0x00, 0x54, 0xad, 0x4a, 0xc1, 0x99, 0xa4, 0xe8, 0x03, 0x98, 0xd9, 0x51, 0xd0, 0xbe,
0x31, 0x30, 0x86, 0x5d, 0x6f, 0x88, 0xcf, 0xcd, 0x83, 0xb5, 0xfe, 0xc7, 0x51, 0x50, 0x5f, 0xa9,
0xd0, 0x08, 0x90, 0x26, 0x04, 0x44, 0xc4, 0xc1, 0x5f, 0x9a, 0xca, 0x98, 0xb3, 0x7e, 0x7d, 0x60,
0x0c, 0x9b, 0xbe, 0xa5, 0x91, 0x4f, 0x22, 0x5e, 0xea, 0x3a, 0x7a, 0x05, 0xdd, 0x82, 0x5d, 0x32,
0x9f, 0x28, 0x66, 0x47, 0x57, 0x4b, 0x1a, 0x02, 0x93, 0x91, 0x84, 0xf6, 0x4d, 0x05, 0xaa, 0xb7,
0xf3, 0x02, 0x7a, 0x13, 0xce, 0x36, 0xf1, 0x76, 0x11, 0x46, 0x34, 0x21, 0xe5, 0x52, 0xbf, 0xe1,
0xf9, 0xed, 0x72, 0xb1, 0xd5, 0x47, 0x30, 0xf3, 0x3c, 0xd4, 0x56, 0x2d, 0x6f, 0xf4, 0xe0, 0x56,
0x3a, 0x47, 0x5c, 0xe4, 0x88, 0x17, 0x82, 0x86, 0xbe, 0x52, 0x3a, 0xef, 0xc1, 0x5a, 0xd0, 0x4c,
0x9b, 0x17, 0xdd, 0xf2, 0xf9, 0x13, 0xb9, 0x15, 0x24, 0xfc, 0x13, 0x84, 0x0a, 0x50, 0xfe, 0x6d,
0xbf, 0x53, 0x54, 0x35, 0x3b, 0x8f, 0xbf, 0x22, 0xd5, 0x13, 0xbd, 0x1e, 0x03, 0xdc, 0xa4, 0x87,
0x5a, 0xf0, 0xf4, 0xe7, 0xfc, 0xeb, 0xfc, 0xdb, 0xaf, 0xb9, 0x55, 0x43, 0x00, 0x8d, 0x2f, 0xfe,
0x6c, 0x39, 0xf5, 0x2d, 0x43, 0xbd, 0xa7, 0xcb, 0xd9, 0x64, 0x6a, 0xd5, 0xbd, 0xab, 0x3a, 0xc0,
0x67, 0x22, 0xa9, 0xd6, 0xa1, 0x7d, 0xe9, 0x90, 0xdf, 0x0f, 0xb9, 0x97, 0x5c, 0xaa, 0x72, 0x7f,
0xfb, 0xcd, 0xe5, 0x02, 0x3d, 0xb2, 0x53, 0x43, 0xff, 0xa0, 0x5d, 0x8d, 0x17, 0x8d, 0xcf, 0x7b,
0xdc, 0x73, 0x21, 0xdb, 0x7b, 0x8c, 0xe4, 0xd4, 0x98, 0x41, 0xf3, 0x14, 0x21, 0xc2, 0xe7, 0x2d,
0xee, 0x9e, 0xc9, 0x76, 0x2f, 0xe6, 0x97, 0xfd, 0x56, 0x0d, 0xf5, 0xe3, 0xbc, 0xbd, 0x0e, 0x00,
0x00, 0xff, 0xff, 0x56, 0x9e, 0x46, 0x0c, 0x9b, 0x03, 0x00, 0x00,
}

63
plugins/base/base.proto Normal file
View file

@ -0,0 +1,63 @@
syntax = "proto3";
package hashicorp.nomad.plugins.base;
import "hashicorp/nomad/plugins/shared/hclspec/hcl_spec.proto";
// BasePlugin is the methods that all Nomad plugins must support.
service BasePlugin {
// PluginInfo describes the type and version of a plugin.
rpc PluginInfo(PluginInfoRequest) returns (PluginInfoResponse) {}
// ConfigSchema returns the schema for parsing the plugins configuration.
rpc ConfigSchema(ConfigSchemaRequest) returns (ConfigSchemaResponse) {}
// SetConfig is used to set the configuration.
rpc SetConfig(SetConfigRequest) returns (SetConfigResponse) {}
}
// PluginType enumerates the type of plugins Nomad supports
enum PluginType {
UNKNOWN = 0;
DRIVER = 1;
DEVICE = 2;
}
// PluginInfoRequest is used to request the plugins basic information.
message PluginInfoRequest {}
// PluginInfoResponse returns basic information about the plugin such
// that Nomad can decide whether to load the plugin or not.
message PluginInfoResponse {
// type indicates what type of plugin this is.
PluginType type = 1;
// plugin_api_version indicates the version of the Nomad Plugin API
// this plugin is built against.
string plugin_api_version = 2;
// plugin_version is the semver version of this individual plugin.
// This is divorce from Nomads development and versioning.
string plugin_version = 3;
// name is the name of the plugin
string name = 4;
}
// ConfigSchemaRequest is used to request the configurations schema.
message ConfigSchemaRequest {}
// ConfigSchemaResponse returns the plugins configuration schema.
message ConfigSchemaResponse {
// spec is the plugins configuration schema
hashicorp.nomad.plugins.shared.hclspec.Spec spec = 1;
}
// SetConfigRequest is used to set the configuration
message SetConfigRequest {
// msgpack_config is the configuration encoded as MessagePack.
bytes msgpack_config = 1;
}
// SetConfigResponse is used to respond to setting the configuration
message SetConfigResponse {}

641
plugins/device/device.pb.go Normal file
View file

@ -0,0 +1,641 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: plugins/device/device.proto
package hashicorp_nomad_plugins_device
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import empty "github.com/golang/protobuf/ptypes/empty"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// DetectedDevices is the set of devices that the device plugin has
// detected and is exposing
type DetectedDevices struct {
// vendor is the name of the vendor of the device
Vendor string `protobuf:"bytes,1,opt,name=vendor,proto3" json:"vendor,omitempty"`
// device_type is the type of the device (gpu, fpga, etc).
DeviceType string `protobuf:"bytes,2,opt,name=device_type,json=deviceType,proto3" json:"device_type,omitempty"`
// device_name is the name of the device.
DeviceName string `protobuf:"bytes,3,opt,name=device_name,json=deviceName,proto3" json:"device_name,omitempty"`
// devices is the set of devices detected by the plugin.
Devices []*DetectedDevice `protobuf:"bytes,4,rep,name=devices,proto3" json:"devices,omitempty"`
// node_attributes allows adding node attributes to be used for
// constraints or affinities.
NodeAttributes map[string]string `protobuf:"bytes,5,rep,name=node_attributes,json=nodeAttributes,proto3" json:"node_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DetectedDevices) Reset() { *m = DetectedDevices{} }
func (m *DetectedDevices) String() string { return proto.CompactTextString(m) }
func (*DetectedDevices) ProtoMessage() {}
func (*DetectedDevices) Descriptor() ([]byte, []int) {
return fileDescriptor_device_ad09adbcf7924d2d, []int{0}
}
func (m *DetectedDevices) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DetectedDevices.Unmarshal(m, b)
}
func (m *DetectedDevices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DetectedDevices.Marshal(b, m, deterministic)
}
func (dst *DetectedDevices) XXX_Merge(src proto.Message) {
xxx_messageInfo_DetectedDevices.Merge(dst, src)
}
func (m *DetectedDevices) XXX_Size() int {
return xxx_messageInfo_DetectedDevices.Size(m)
}
func (m *DetectedDevices) XXX_DiscardUnknown() {
xxx_messageInfo_DetectedDevices.DiscardUnknown(m)
}
var xxx_messageInfo_DetectedDevices proto.InternalMessageInfo
func (m *DetectedDevices) GetVendor() string {
if m != nil {
return m.Vendor
}
return ""
}
func (m *DetectedDevices) GetDeviceType() string {
if m != nil {
return m.DeviceType
}
return ""
}
func (m *DetectedDevices) GetDeviceName() string {
if m != nil {
return m.DeviceName
}
return ""
}
func (m *DetectedDevices) GetDevices() []*DetectedDevice {
if m != nil {
return m.Devices
}
return nil
}
func (m *DetectedDevices) GetNodeAttributes() map[string]string {
if m != nil {
return m.NodeAttributes
}
return nil
}
// DetectedDevice is a single detected device.
type DetectedDevice struct {
// ID is the ID of the device. This ID is used during
// allocation.
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
// Health of the device.
Healthy bool `protobuf:"varint,2,opt,name=healthy,proto3" json:"healthy,omitempty"`
// health_description allows the device plugin to optionally
// annotate the health field with a human readable reason.
HealthDescription string `protobuf:"bytes,3,opt,name=health_description,json=healthDescription,proto3" json:"health_description,omitempty"`
// pci_bus_id is the PCI bus ID for the device. If reported, it
// allows Nomad to make NUMA aware optimizations.
PciBusId string `protobuf:"bytes,4,opt,name=pci_bus_id,json=pciBusId,proto3" json:"pci_bus_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DetectedDevice) Reset() { *m = DetectedDevice{} }
func (m *DetectedDevice) String() string { return proto.CompactTextString(m) }
func (*DetectedDevice) ProtoMessage() {}
func (*DetectedDevice) Descriptor() ([]byte, []int) {
return fileDescriptor_device_ad09adbcf7924d2d, []int{1}
}
func (m *DetectedDevice) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DetectedDevice.Unmarshal(m, b)
}
func (m *DetectedDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DetectedDevice.Marshal(b, m, deterministic)
}
func (dst *DetectedDevice) XXX_Merge(src proto.Message) {
xxx_messageInfo_DetectedDevice.Merge(dst, src)
}
func (m *DetectedDevice) XXX_Size() int {
return xxx_messageInfo_DetectedDevice.Size(m)
}
func (m *DetectedDevice) XXX_DiscardUnknown() {
xxx_messageInfo_DetectedDevice.DiscardUnknown(m)
}
var xxx_messageInfo_DetectedDevice proto.InternalMessageInfo
func (m *DetectedDevice) GetID() string {
if m != nil {
return m.ID
}
return ""
}
func (m *DetectedDevice) GetHealthy() bool {
if m != nil {
return m.Healthy
}
return false
}
func (m *DetectedDevice) GetHealthDescription() string {
if m != nil {
return m.HealthDescription
}
return ""
}
func (m *DetectedDevice) GetPciBusId() string {
if m != nil {
return m.PciBusId
}
return ""
}
// ReserveRequest is used to ask the device driver for information on
// how to allocate the requested devices.
type ReserveRequest struct {
// device_ids are the requested devices.
DeviceIds []string `protobuf:"bytes,1,rep,name=device_ids,json=deviceIds,proto3" json:"device_ids,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReserveRequest) Reset() { *m = ReserveRequest{} }
func (m *ReserveRequest) String() string { return proto.CompactTextString(m) }
func (*ReserveRequest) ProtoMessage() {}
func (*ReserveRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_device_ad09adbcf7924d2d, []int{2}
}
func (m *ReserveRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ReserveRequest.Unmarshal(m, b)
}
func (m *ReserveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ReserveRequest.Marshal(b, m, deterministic)
}
func (dst *ReserveRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReserveRequest.Merge(dst, src)
}
func (m *ReserveRequest) XXX_Size() int {
return xxx_messageInfo_ReserveRequest.Size(m)
}
func (m *ReserveRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ReserveRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ReserveRequest proto.InternalMessageInfo
func (m *ReserveRequest) GetDeviceIds() []string {
if m != nil {
return m.DeviceIds
}
return nil
}
// ReserveResponse informs Nomad how to expose the requested devices
// to the the task.
type ReserveResponse struct {
// container_res contains information on how to mount the device
// into a task isolated using container technologies (where the
// host is shared)
ContainerRes *ContainerReservation `protobuf:"bytes,1,opt,name=container_res,json=containerRes,proto3" json:"container_res,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReserveResponse) Reset() { *m = ReserveResponse{} }
func (m *ReserveResponse) String() string { return proto.CompactTextString(m) }
func (*ReserveResponse) ProtoMessage() {}
func (*ReserveResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_device_ad09adbcf7924d2d, []int{3}
}
func (m *ReserveResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ReserveResponse.Unmarshal(m, b)
}
func (m *ReserveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ReserveResponse.Marshal(b, m, deterministic)
}
func (dst *ReserveResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReserveResponse.Merge(dst, src)
}
func (m *ReserveResponse) XXX_Size() int {
return xxx_messageInfo_ReserveResponse.Size(m)
}
func (m *ReserveResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ReserveResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ReserveResponse proto.InternalMessageInfo
func (m *ReserveResponse) GetContainerRes() *ContainerReservation {
if m != nil {
return m.ContainerRes
}
return nil
}
// ContainerReservation returns how to mount the device into a
// container that shares the host OS.
type ContainerReservation struct {
// List of environment variable to be set
Envs map[string]string `protobuf:"bytes,1,rep,name=envs,proto3" json:"envs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Mounts for the task.
Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"`
// Devices for the task.
Devices []*DeviceSpec `protobuf:"bytes,3,rep,name=devices,proto3" json:"devices,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ContainerReservation) Reset() { *m = ContainerReservation{} }
func (m *ContainerReservation) String() string { return proto.CompactTextString(m) }
func (*ContainerReservation) ProtoMessage() {}
func (*ContainerReservation) Descriptor() ([]byte, []int) {
return fileDescriptor_device_ad09adbcf7924d2d, []int{4}
}
func (m *ContainerReservation) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ContainerReservation.Unmarshal(m, b)
}
func (m *ContainerReservation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ContainerReservation.Marshal(b, m, deterministic)
}
func (dst *ContainerReservation) XXX_Merge(src proto.Message) {
xxx_messageInfo_ContainerReservation.Merge(dst, src)
}
func (m *ContainerReservation) XXX_Size() int {
return xxx_messageInfo_ContainerReservation.Size(m)
}
func (m *ContainerReservation) XXX_DiscardUnknown() {
xxx_messageInfo_ContainerReservation.DiscardUnknown(m)
}
var xxx_messageInfo_ContainerReservation proto.InternalMessageInfo
func (m *ContainerReservation) GetEnvs() map[string]string {
if m != nil {
return m.Envs
}
return nil
}
func (m *ContainerReservation) GetMounts() []*Mount {
if m != nil {
return m.Mounts
}
return nil
}
func (m *ContainerReservation) GetDevices() []*DeviceSpec {
if m != nil {
return m.Devices
}
return nil
}
// Mount specifies a host volume to mount into a task.
// where device library or tools are installed on host and task
type Mount struct {
// Path of the mount within the task.
TaskPath string `protobuf:"bytes,1,opt,name=task_path,json=taskPath,proto3" json:"task_path,omitempty"`
// Path of the mount on the host.
HostPath string `protobuf:"bytes,2,opt,name=host_path,json=hostPath,proto3" json:"host_path,omitempty"`
// If set, the mount is read-only.
ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Mount) Reset() { *m = Mount{} }
func (m *Mount) String() string { return proto.CompactTextString(m) }
func (*Mount) ProtoMessage() {}
func (*Mount) Descriptor() ([]byte, []int) {
return fileDescriptor_device_ad09adbcf7924d2d, []int{5}
}
func (m *Mount) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Mount.Unmarshal(m, b)
}
func (m *Mount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Mount.Marshal(b, m, deterministic)
}
func (dst *Mount) XXX_Merge(src proto.Message) {
xxx_messageInfo_Mount.Merge(dst, src)
}
func (m *Mount) XXX_Size() int {
return xxx_messageInfo_Mount.Size(m)
}
func (m *Mount) XXX_DiscardUnknown() {
xxx_messageInfo_Mount.DiscardUnknown(m)
}
var xxx_messageInfo_Mount proto.InternalMessageInfo
func (m *Mount) GetTaskPath() string {
if m != nil {
return m.TaskPath
}
return ""
}
func (m *Mount) GetHostPath() string {
if m != nil {
return m.HostPath
}
return ""
}
func (m *Mount) GetReadOnly() bool {
if m != nil {
return m.ReadOnly
}
return false
}
// DeviceSpec specifies a host device to mount into a task.
type DeviceSpec struct {
// Path of the device within the task.
TaskPath string `protobuf:"bytes,1,opt,name=task_path,json=taskPath,proto3" json:"task_path,omitempty"`
// Path of the device on the host.
HostPath string `protobuf:"bytes,2,opt,name=host_path,json=hostPath,proto3" json:"host_path,omitempty"`
// Cgroups permissions of the device, candidates are one or more of
// * r - allows task to read from the specified device.
// * w - allows task to write to the specified device.
// * m - allows task to create device files that do not yet exist
Permissions string `protobuf:"bytes,3,opt,name=permissions,proto3" json:"permissions,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DeviceSpec) Reset() { *m = DeviceSpec{} }
func (m *DeviceSpec) String() string { return proto.CompactTextString(m) }
func (*DeviceSpec) ProtoMessage() {}
func (*DeviceSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_device_ad09adbcf7924d2d, []int{6}
}
func (m *DeviceSpec) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DeviceSpec.Unmarshal(m, b)
}
func (m *DeviceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DeviceSpec.Marshal(b, m, deterministic)
}
func (dst *DeviceSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeviceSpec.Merge(dst, src)
}
func (m *DeviceSpec) XXX_Size() int {
return xxx_messageInfo_DeviceSpec.Size(m)
}
func (m *DeviceSpec) XXX_DiscardUnknown() {
xxx_messageInfo_DeviceSpec.DiscardUnknown(m)
}
var xxx_messageInfo_DeviceSpec proto.InternalMessageInfo
func (m *DeviceSpec) GetTaskPath() string {
if m != nil {
return m.TaskPath
}
return ""
}
func (m *DeviceSpec) GetHostPath() string {
if m != nil {
return m.HostPath
}
return ""
}
func (m *DeviceSpec) GetPermissions() string {
if m != nil {
return m.Permissions
}
return ""
}
func init() {
proto.RegisterType((*DetectedDevices)(nil), "hashicorp.nomad.plugins.device.DetectedDevices")
proto.RegisterMapType((map[string]string)(nil), "hashicorp.nomad.plugins.device.DetectedDevices.NodeAttributesEntry")
proto.RegisterType((*DetectedDevice)(nil), "hashicorp.nomad.plugins.device.DetectedDevice")
proto.RegisterType((*ReserveRequest)(nil), "hashicorp.nomad.plugins.device.ReserveRequest")
proto.RegisterType((*ReserveResponse)(nil), "hashicorp.nomad.plugins.device.ReserveResponse")
proto.RegisterType((*ContainerReservation)(nil), "hashicorp.nomad.plugins.device.ContainerReservation")
proto.RegisterMapType((map[string]string)(nil), "hashicorp.nomad.plugins.device.ContainerReservation.EnvsEntry")
proto.RegisterType((*Mount)(nil), "hashicorp.nomad.plugins.device.Mount")
proto.RegisterType((*DeviceSpec)(nil), "hashicorp.nomad.plugins.device.DeviceSpec")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// DevicePluginClient is the client API for DevicePlugin service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type DevicePluginClient interface {
// Fingerprint allows the device plugin to return a set of
// detected devices and provide a mechanism to update the state of
// the device.
Fingerprint(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (DevicePlugin_FingerprintClient, error)
// Reserve is called by the client before starting an allocation
// that requires access to the plugins devices. The plugin can use
// this to run any setup steps and provides the mounting details to
// the Nomad client
Reserve(ctx context.Context, in *ReserveRequest, opts ...grpc.CallOption) (*ReserveResponse, error)
}
type devicePluginClient struct {
cc *grpc.ClientConn
}
func NewDevicePluginClient(cc *grpc.ClientConn) DevicePluginClient {
return &devicePluginClient{cc}
}
func (c *devicePluginClient) Fingerprint(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (DevicePlugin_FingerprintClient, error) {
stream, err := c.cc.NewStream(ctx, &_DevicePlugin_serviceDesc.Streams[0], "/hashicorp.nomad.plugins.device.DevicePlugin/Fingerprint", opts...)
if err != nil {
return nil, err
}
x := &devicePluginFingerprintClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type DevicePlugin_FingerprintClient interface {
Recv() (*DetectedDevices, error)
grpc.ClientStream
}
type devicePluginFingerprintClient struct {
grpc.ClientStream
}
func (x *devicePluginFingerprintClient) Recv() (*DetectedDevices, error) {
m := new(DetectedDevices)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *devicePluginClient) Reserve(ctx context.Context, in *ReserveRequest, opts ...grpc.CallOption) (*ReserveResponse, error) {
out := new(ReserveResponse)
err := c.cc.Invoke(ctx, "/hashicorp.nomad.plugins.device.DevicePlugin/Reserve", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// DevicePluginServer is the server API for DevicePlugin service.
type DevicePluginServer interface {
// Fingerprint allows the device plugin to return a set of
// detected devices and provide a mechanism to update the state of
// the device.
Fingerprint(*empty.Empty, DevicePlugin_FingerprintServer) error
// Reserve is called by the client before starting an allocation
// that requires access to the plugins devices. The plugin can use
// this to run any setup steps and provides the mounting details to
// the Nomad client
Reserve(context.Context, *ReserveRequest) (*ReserveResponse, error)
}
func RegisterDevicePluginServer(s *grpc.Server, srv DevicePluginServer) {
s.RegisterService(&_DevicePlugin_serviceDesc, srv)
}
func _DevicePlugin_Fingerprint_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(empty.Empty)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(DevicePluginServer).Fingerprint(m, &devicePluginFingerprintServer{stream})
}
type DevicePlugin_FingerprintServer interface {
Send(*DetectedDevices) error
grpc.ServerStream
}
type devicePluginFingerprintServer struct {
grpc.ServerStream
}
func (x *devicePluginFingerprintServer) Send(m *DetectedDevices) error {
return x.ServerStream.SendMsg(m)
}
func _DevicePlugin_Reserve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReserveRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DevicePluginServer).Reserve(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/hashicorp.nomad.plugins.device.DevicePlugin/Reserve",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DevicePluginServer).Reserve(ctx, req.(*ReserveRequest))
}
return interceptor(ctx, in, info, handler)
}
var _DevicePlugin_serviceDesc = grpc.ServiceDesc{
ServiceName: "hashicorp.nomad.plugins.device.DevicePlugin",
HandlerType: (*DevicePluginServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Reserve",
Handler: _DevicePlugin_Reserve_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "Fingerprint",
Handler: _DevicePlugin_Fingerprint_Handler,
ServerStreams: true,
},
},
Metadata: "plugins/device/device.proto",
}
func init() { proto.RegisterFile("plugins/device/device.proto", fileDescriptor_device_ad09adbcf7924d2d) }
var fileDescriptor_device_ad09adbcf7924d2d = []byte{
// 625 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xdb, 0x6e, 0xd3, 0x40,
0x10, 0xad, 0x9d, 0x5e, 0x92, 0x49, 0x49, 0x61, 0xa9, 0x2a, 0x2b, 0xe5, 0x12, 0x59, 0x42, 0xaa,
0x90, 0x70, 0x50, 0x40, 0x02, 0x21, 0x81, 0x54, 0x9a, 0x22, 0xf2, 0x40, 0xa9, 0x0c, 0x2f, 0xf0,
0x80, 0xe5, 0xd8, 0x43, 0xbc, 0xaa, 0xb3, 0xbb, 0xec, 0xae, 0x23, 0xf9, 0x0b, 0xf8, 0x15, 0x3e,
0x89, 0x2f, 0xe0, 0x3b, 0x90, 0x77, 0x9d, 0x34, 0x41, 0x15, 0x51, 0xe1, 0xc9, 0xde, 0x99, 0x73,
0xce, 0x1c, 0x8f, 0x67, 0x07, 0x0e, 0x45, 0x5e, 0x4c, 0x28, 0x53, 0xfd, 0x14, 0x67, 0x34, 0xc1,
0xfa, 0x11, 0x08, 0xc9, 0x35, 0x27, 0xf7, 0xb2, 0x58, 0x65, 0x34, 0xe1, 0x52, 0x04, 0x8c, 0x4f,
0xe3, 0x34, 0xa8, 0xc1, 0x81, 0x45, 0x75, 0x0f, 0x27, 0x9c, 0x4f, 0x72, 0xec, 0x1b, 0xf4, 0xb8,
0xf8, 0xda, 0xc7, 0xa9, 0xd0, 0xa5, 0x25, 0xfb, 0xbf, 0x5c, 0xd8, 0x1b, 0xa2, 0xc6, 0x44, 0x63,
0x3a, 0x34, 0x78, 0x45, 0x0e, 0x60, 0x7b, 0x86, 0x2c, 0xe5, 0xd2, 0x73, 0x7a, 0xce, 0x51, 0x2b,
0xac, 0x4f, 0xe4, 0x3e, 0xb4, 0xad, 0x64, 0xa4, 0x4b, 0x81, 0x9e, 0x6b, 0x92, 0x60, 0x43, 0x1f,
0x4b, 0x81, 0x4b, 0x00, 0x16, 0x4f, 0xd1, 0x6b, 0x2c, 0x03, 0xce, 0xe2, 0x29, 0x92, 0xb7, 0xb0,
0x63, 0x4f, 0xca, 0xdb, 0xec, 0x35, 0x8e, 0xda, 0x83, 0x20, 0xf8, 0xbb, 0xf9, 0x60, 0xd5, 0x5b,
0x38, 0xa7, 0x93, 0x1c, 0xf6, 0x18, 0x4f, 0x31, 0x8a, 0xb5, 0x96, 0x74, 0x5c, 0x68, 0x54, 0xde,
0x96, 0x51, 0x3c, 0xb9, 0x9e, 0xa2, 0x0a, 0xce, 0x78, 0x8a, 0xc7, 0x0b, 0x95, 0x53, 0xa6, 0x65,
0x19, 0x76, 0xd8, 0x4a, 0xb0, 0x7b, 0x0c, 0xb7, 0xaf, 0x80, 0x91, 0x9b, 0xd0, 0xb8, 0xc0, 0xb2,
0xee, 0x52, 0xf5, 0x4a, 0xf6, 0x61, 0x6b, 0x16, 0xe7, 0xc5, 0xbc, 0x39, 0xf6, 0xf0, 0xc2, 0x7d,
0xee, 0xf8, 0xdf, 0x1d, 0xe8, 0xac, 0x96, 0x26, 0x1d, 0x70, 0x47, 0xc3, 0x9a, 0xed, 0x8e, 0x86,
0xc4, 0x83, 0x9d, 0x0c, 0xe3, 0x5c, 0x67, 0xa5, 0xa1, 0x37, 0xc3, 0xf9, 0x91, 0x3c, 0x02, 0x62,
0x5f, 0xa3, 0x14, 0x55, 0x22, 0xa9, 0xd0, 0x94, 0xb3, 0xba, 0xbf, 0xb7, 0x6c, 0x66, 0x78, 0x99,
0x20, 0x77, 0x00, 0x44, 0x42, 0xa3, 0x71, 0xa1, 0x22, 0x9a, 0x7a, 0x9b, 0x06, 0xd6, 0x14, 0x09,
0x7d, 0x5d, 0xa8, 0x51, 0xea, 0xf7, 0xa1, 0x13, 0xa2, 0x42, 0x39, 0xc3, 0x10, 0xbf, 0x15, 0xa8,
0x34, 0xb9, 0x0b, 0xf5, 0x4f, 0x8a, 0x68, 0xaa, 0x3c, 0xa7, 0xd7, 0x38, 0x6a, 0x85, 0x2d, 0x1b,
0x19, 0xa5, 0xca, 0xcf, 0x61, 0x6f, 0x41, 0x50, 0x82, 0x33, 0x85, 0xe4, 0x13, 0xdc, 0x48, 0x38,
0xd3, 0x31, 0x65, 0x28, 0x23, 0x89, 0xca, 0x7c, 0x45, 0x7b, 0xf0, 0x74, 0x5d, 0xf3, 0x4f, 0xe6,
0x24, 0x2b, 0x18, 0x57, 0x76, 0xc3, 0xdd, 0x64, 0x29, 0xea, 0xff, 0x70, 0x61, 0xff, 0x2a, 0x18,
0x09, 0x61, 0x13, 0xd9, 0xcc, 0xfa, 0x6b, 0x0f, 0x5e, 0xfd, 0x4b, 0xa9, 0xe0, 0x94, 0xcd, 0xea,
0x5f, 0x6c, 0xb4, 0xc8, 0x4b, 0xd8, 0x9e, 0xf2, 0x82, 0x69, 0xe5, 0xb9, 0x46, 0xf5, 0xc1, 0x3a,
0xd5, 0x77, 0x15, 0x3a, 0xac, 0x49, 0x64, 0x78, 0x39, 0xcf, 0x0d, 0xc3, 0x7f, 0xb8, 0x7e, 0xfa,
0xaa, 0xc7, 0x07, 0x81, 0xc9, 0x62, 0x96, 0xbb, 0xcf, 0xa0, 0xb5, 0xf0, 0x75, 0xad, 0x99, 0xfa,
0x02, 0x5b, 0xc6, 0x0f, 0x39, 0x84, 0x96, 0x8e, 0xd5, 0x45, 0x24, 0x62, 0x9d, 0xd5, 0xd4, 0x66,
0x15, 0x38, 0x8f, 0x75, 0x56, 0x25, 0x33, 0xae, 0xb4, 0x4d, 0x5a, 0x8d, 0x66, 0x15, 0x98, 0x27,
0x25, 0xc6, 0x69, 0xc4, 0x59, 0x5e, 0x9a, 0x81, 0x6a, 0x86, 0xcd, 0x2a, 0xf0, 0x9e, 0xe5, 0xa5,
0x9f, 0x01, 0x5c, 0xfa, 0xfd, 0x8f, 0x22, 0x3d, 0x68, 0x0b, 0x94, 0x53, 0xaa, 0x14, 0xe5, 0x4c,
0xd5, 0x73, 0xbb, 0x1c, 0x1a, 0xfc, 0x74, 0x60, 0xd7, 0x96, 0x3a, 0x37, 0xfd, 0x22, 0x9f, 0xa1,
0xfd, 0x86, 0xb2, 0x09, 0x4a, 0x21, 0x29, 0xd3, 0xe4, 0x20, 0xb0, 0x4b, 0x2c, 0x98, 0x2f, 0xb1,
0xe0, 0xb4, 0x5a, 0x62, 0xdd, 0xfe, 0x35, 0x6f, 0xbb, 0xbf, 0xf1, 0xd8, 0x21, 0x39, 0xec, 0xd4,
0xf3, 0x4c, 0xd6, 0xee, 0x9f, 0xd5, 0x9b, 0xb2, 0xbe, 0xde, 0x1f, 0x17, 0xc5, 0xdf, 0x18, 0x6f,
0x1b, 0xcb, 0x4f, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0xda, 0xf3, 0x34, 0xb8, 0xc4, 0x05, 0x00,
0x00,
}

113
plugins/device/device.proto Normal file
View file

@ -0,0 +1,113 @@
syntax = "proto3";
package hashicorp.nomad.plugins.device;
import "google/protobuf/empty.proto";
// DevicePlugin is the API exposed by device plugins
service DevicePlugin {
// Fingerprint allows the device plugin to return a set of
// detected devices and provide a mechanism to update the state of
// the device.
rpc Fingerprint(google.protobuf.Empty) returns (stream DetectedDevices) {}
// Reserve is called by the client before starting an allocation
// that requires access to the plugins devices. The plugin can use
// this to run any setup steps and provides the mounting details to
// the Nomad client
rpc Reserve(ReserveRequest) returns (ReserveResponse) {}
}
// DetectedDevices is the set of devices that the device plugin has
// detected and is exposing
message DetectedDevices {
// vendor is the name of the vendor of the device
string vendor = 1;
// device_type is the type of the device (gpu, fpga, etc).
string device_type = 2;
// device_name is the name of the device.
string device_name = 3;
// devices is the set of devices detected by the plugin.
repeated DetectedDevice devices = 4;
// node_attributes allows adding node attributes to be used for
// constraints or affinities.
map<string, string> node_attributes = 5;
}
// DetectedDevice is a single detected device.
message DetectedDevice {
// ID is the ID of the device. This ID is used during
// allocation.
string ID = 1;
// Health of the device.
bool healthy = 2;
// health_description allows the device plugin to optionally
// annotate the health field with a human readable reason.
string health_description = 3;
// pci_bus_id is the PCI bus ID for the device. If reported, it
// allows Nomad to make NUMA aware optimizations.
string pci_bus_id = 4;
}
// ReserveRequest is used to ask the device driver for information on
// how to allocate the requested devices.
message ReserveRequest {
// device_ids are the requested devices.
repeated string device_ids = 1;
}
// ReserveResponse informs Nomad how to expose the requested devices
// to the the task.
message ReserveResponse {
// container_res contains information on how to mount the device
// into a task isolated using container technologies (where the
// host is shared)
ContainerReservation container_res = 1;
}
// ContainerReservation returns how to mount the device into a
// container that shares the host OS.
message ContainerReservation {
// List of environment variable to be set
map<string, string> envs = 1;
// Mounts for the task.
repeated Mount mounts = 2;
// Devices for the task.
repeated DeviceSpec devices = 3;
}
// Mount specifies a host volume to mount into a task.
// where device library or tools are installed on host and task
message Mount {
// Path of the mount within the task.
string task_path = 1;
// Path of the mount on the host.
string host_path = 2;
// If set, the mount is read-only.
bool read_only = 3;
}
// DeviceSpec specifies a host device to mount into a task.
message DeviceSpec {
// Path of the device within the task.
string task_path = 1;
// Path of the device on the host.
string host_path = 2;
// Cgroups permissions of the device, candidates are one or more of
// * r - allows task to read from the specified device.
// * w - allows task to write to the specified device.
// * m - allows task to create device files that do not yet exist
string permissions = 3;
}

View file

@ -0,0 +1,297 @@
package hclspec
import (
"fmt"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
"github.com/hashicorp/hcl2/hcldec"
)
var (
// nilSpecDiagnostic is the diagnostic value returned if a nil value is
// given
nilSpecDiagnostic = &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "nil spec given",
Detail: "Can not convert a nil specification. Pass a valid spec",
}
// emptyPos is the position used when parsing hcl expressions
emptyPos = hcl.Pos{
Line: 0,
Column: 0,
Byte: 0,
}
// specCtx is the context used to evaluate expressions.
specCtx = &hcl.EvalContext{
Functions: specFuncs,
}
)
// Convert converts a Spec to an hcl specification.
func Convert(spec *Spec) (hcldec.Spec, hcl.Diagnostics) {
if spec == nil {
return nil, hcl.Diagnostics([]*hcl.Diagnostic{nilSpecDiagnostic})
}
return decodeSpecBlock(spec, "")
}
// decodeSpecBlock is the recursive entry point that converts between the two
// spec types.
func decodeSpecBlock(spec *Spec, impliedName string) (hcldec.Spec, hcl.Diagnostics) {
switch spec.Block.(type) {
case *Spec_Object:
return decodeObjectSpec(spec.GetObject())
case *Spec_Array:
return decodeArraySpec(spec.GetArray())
case *Spec_Attr:
return decodeAttrSpec(spec.GetAttr(), impliedName)
case *Spec_BlockValue:
return decodeBlockSpec(spec.GetBlockValue(), impliedName)
case *Spec_BlockList:
return decodeBlockListSpec(spec.GetBlockList(), impliedName)
case *Spec_BlockSet:
return decodeBlockSetSpec(spec.GetBlockSet(), impliedName)
case *Spec_BlockMap:
return decodeBlockMapSpec(spec.GetBlockMap(), impliedName)
case *Spec_Default:
return decodeDefaultSpec(spec.GetDefault())
case *Spec_Literal:
return decodeLiteralSpec(spec.GetLiteral())
default:
// Should never happen, because the above cases should be exhaustive
// for our schema.
var diags hcl.Diagnostics
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid spec block",
Detail: fmt.Sprintf("Blocks of type %T are not expected here.", spec.Block),
})
return nil, diags
}
return nil, nil
}
func decodeObjectSpec(obj *Object) (hcldec.Spec, hcl.Diagnostics) {
var diags hcl.Diagnostics
spec := make(hcldec.ObjectSpec)
for attr, block := range obj.GetAttributes() {
propSpec, propDiags := decodeSpecBlock(block, attr)
diags = append(diags, propDiags...)
spec[attr] = propSpec
}
return spec, diags
}
func decodeArraySpec(a *Array) (hcldec.Spec, hcl.Diagnostics) {
values := a.GetValues()
var diags hcl.Diagnostics
spec := make(hcldec.TupleSpec, 0, len(values))
for _, block := range values {
elemSpec, elemDiags := decodeSpecBlock(block, "")
diags = append(diags, elemDiags...)
spec = append(spec, elemSpec)
}
return spec, diags
}
func decodeAttrSpec(attr *Attr, impliedName string) (hcldec.Spec, hcl.Diagnostics) {
// Convert the string type to an hcl.Expression
typeExpr, diags := hclsyntax.ParseExpression([]byte(attr.GetType()), "proto", emptyPos)
if diags.HasErrors() {
return nil, diags
}
spec := &hcldec.AttrSpec{
Name: impliedName,
Required: attr.GetRequired(),
}
if n := attr.GetName(); n != "" {
spec.Name = n
}
var typeDiags hcl.Diagnostics
spec.Type, typeDiags = evalTypeExpr(typeExpr)
diags = append(diags, typeDiags...)
if spec.Name == "" {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing name in attribute spec",
Detail: "The name attribute is required, to specify the attribute name that is expected in an input HCL file.",
})
return nil, diags
}
return spec, diags
}
func decodeBlockSpec(block *Block, impliedName string) (hcldec.Spec, hcl.Diagnostics) {
spec := &hcldec.BlockSpec{
TypeName: impliedName,
Required: block.GetRequired(),
}
if n := block.GetName(); n != "" {
spec.TypeName = n
}
nested, diags := decodeBlockNestedSpec(block.GetNested())
spec.Nested = nested
return spec, diags
}
func decodeBlockListSpec(block *BlockList, impliedName string) (hcldec.Spec, hcl.Diagnostics) {
spec := &hcldec.BlockListSpec{
TypeName: impliedName,
MinItems: int(block.GetMinItems()),
MaxItems: int(block.GetMaxItems()),
}
if n := block.GetName(); n != "" {
spec.TypeName = n
}
nested, diags := decodeBlockNestedSpec(block.GetNested())
spec.Nested = nested
if spec.TypeName == "" {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing name in block_list spec",
Detail: "The name attribute is required, to specify the block type name that is expected in an input HCL file.",
})
return nil, diags
}
return spec, diags
}
func decodeBlockSetSpec(block *BlockSet, impliedName string) (hcldec.Spec, hcl.Diagnostics) {
spec := &hcldec.BlockSetSpec{
TypeName: impliedName,
MinItems: int(block.GetMinItems()),
MaxItems: int(block.GetMaxItems()),
}
if n := block.GetName(); n != "" {
spec.TypeName = n
}
nested, diags := decodeBlockNestedSpec(block.GetNested())
spec.Nested = nested
if spec.TypeName == "" {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing name in block_set spec",
Detail: "The name attribute is required, to specify the block type name that is expected in an input HCL file.",
})
return nil, diags
}
return spec, diags
}
func decodeBlockMapSpec(block *BlockMap, impliedName string) (hcldec.Spec, hcl.Diagnostics) {
spec := &hcldec.BlockMapSpec{
TypeName: impliedName,
LabelNames: block.GetLabels(),
}
if n := block.GetName(); n != "" {
spec.TypeName = n
}
nested, diags := decodeBlockNestedSpec(block.GetNested())
spec.Nested = nested
if spec.TypeName == "" {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing name in block_map spec",
Detail: "The name attribute is required, to specify the block type name that is expected in an input HCL file.",
})
return nil, diags
}
if len(spec.LabelNames) < 1 {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid block label name list",
Detail: "A block_map must have at least one label specified.",
})
return nil, diags
}
return spec, diags
}
func decodeBlockNestedSpec(spec *Spec) (hcldec.Spec, hcl.Diagnostics) {
if spec == nil {
return nil, hcl.Diagnostics([]*hcl.Diagnostic{
{
Severity: hcl.DiagError,
Summary: "Missing spec block",
Detail: "A block spec must have exactly one child spec specifying how to decode block contents.",
}})
}
return decodeSpecBlock(spec, "")
}
func decodeLiteralSpec(l *Literal) (hcldec.Spec, hcl.Diagnostics) {
// Convert the string value to an hcl.Expression
valueExpr, diags := hclsyntax.ParseExpression([]byte(l.GetValue()), "proto", emptyPos)
if diags.HasErrors() {
return nil, diags
}
value, valueDiags := valueExpr.Value(specCtx)
diags = append(diags, valueDiags...)
if diags.HasErrors() {
return nil, diags
}
return &hcldec.LiteralSpec{
Value: value,
}, diags
}
func decodeDefaultSpec(d *Default) (hcldec.Spec, hcl.Diagnostics) {
// Parse the primary
primary, diags := decodeSpecBlock(d.GetPrimary(), "")
if diags.HasErrors() {
return nil, diags
}
// Parse the default
def, defDiags := decodeSpecBlock(d.GetDefault(), "")
diags = append(diags, defDiags...)
if diags.HasErrors() {
return nil, diags
}
spec := &hcldec.DefaultSpec{
Primary: primary,
Default: def,
}
return spec, diags
}

View file

@ -0,0 +1,593 @@
package hclspec
import (
"testing"
"github.com/hashicorp/hcl2/hcldec"
"github.com/stretchr/testify/require"
"github.com/zclconf/go-cty/cty"
)
type testConversions struct {
Name string
Input *Spec
Expected hcldec.Spec
ExpectedError string
}
func testSpecConversions(t *testing.T, cases []testConversions) {
t.Helper()
for _, c := range cases {
t.Run(c.Name, func(t *testing.T) {
act, diag := Convert(c.Input)
if diag.HasErrors() {
if c.ExpectedError == "" {
t.Fatalf("Convert %q failed: %v", c.Name, diag.Error())
}
require.Contains(t, diag.Error(), c.ExpectedError)
} else if c.ExpectedError != "" {
t.Fatalf("Expected error %q", c.ExpectedError)
}
require.EqualValues(t, c.Expected, act)
})
}
}
func TestDec_Convert_Object(t *testing.T) {
t.Parallel()
tests := []testConversions{
{
Name: "Object w/ only attributes",
Input: &Spec{
Block: &Spec_Object{
&Object{
Attributes: map[string]*Spec{
"foo": &Spec{
Block: &Spec_Attr{
&Attr{
Type: "string",
Required: false,
},
},
},
"bar": &Spec{
Block: &Spec_Attr{
&Attr{
Type: "number",
Required: true,
},
},
},
"baz": &Spec{
Block: &Spec_Attr{
&Attr{
Type: "bool",
},
},
},
},
},
},
},
Expected: hcldec.ObjectSpec(map[string]hcldec.Spec{
"foo": &hcldec.AttrSpec{
Name: "foo",
Type: cty.String,
Required: false,
},
"bar": &hcldec.AttrSpec{
Name: "bar",
Type: cty.Number,
Required: true,
},
"baz": &hcldec.AttrSpec{
Name: "baz",
Type: cty.Bool,
Required: false,
},
}),
},
}
testSpecConversions(t, tests)
}
func TestDec_Convert_Array(t *testing.T) {
t.Parallel()
tests := []testConversions{
{
Name: "array basic",
Input: &Spec{
Block: &Spec_Array{
Array: &Array{
Values: []*Spec{
&Spec{
Block: &Spec_Attr{
&Attr{
Name: "foo",
Required: true,
Type: "string",
},
},
},
&Spec{
Block: &Spec_Attr{
&Attr{
Name: "bar",
Required: true,
Type: "string",
},
},
},
},
},
},
},
Expected: hcldec.TupleSpec{
&hcldec.AttrSpec{
Name: "foo",
Type: cty.String,
Required: true,
},
&hcldec.AttrSpec{
Name: "bar",
Type: cty.String,
Required: true,
},
},
},
}
testSpecConversions(t, tests)
}
func TestDec_Convert_Attr(t *testing.T) {
t.Parallel()
tests := []testConversions{
{
Name: "attr basic type",
Input: &Spec{
Block: &Spec_Attr{
&Attr{
Name: "foo",
Required: true,
Type: "string",
},
},
},
Expected: &hcldec.AttrSpec{
Name: "foo",
Type: cty.String,
Required: true,
},
},
{
Name: "attr object type",
Input: &Spec{
Block: &Spec_Attr{
&Attr{
Name: "foo",
Required: true,
Type: "object({name1 = string, name2 = bool})",
},
},
},
Expected: &hcldec.AttrSpec{
Name: "foo",
Type: cty.Object(map[string]cty.Type{
"name1": cty.String,
"name2": cty.Bool,
}),
Required: true,
},
},
{
Name: "attr no name",
Input: &Spec{
Block: &Spec_Attr{
&Attr{
Required: true,
Type: "string",
},
},
},
ExpectedError: "Missing name in attribute spec",
},
}
testSpecConversions(t, tests)
}
func TestDec_Convert_Block(t *testing.T) {
t.Parallel()
tests := []testConversions{
{
Name: "block with attr",
Input: &Spec{
Block: &Spec_BlockValue{
BlockValue: &Block{
Name: "test",
Required: true,
Nested: &Spec{
Block: &Spec_Attr{
&Attr{
Name: "foo",
Type: "string",
},
},
},
},
},
},
Expected: &hcldec.BlockSpec{
TypeName: "test",
Required: true,
Nested: &hcldec.AttrSpec{
Name: "foo",
Type: cty.String,
Required: false,
},
},
},
{
Name: "block with nested block",
Input: &Spec{
Block: &Spec_BlockValue{
BlockValue: &Block{
Name: "test",
Required: true,
Nested: &Spec{
Block: &Spec_BlockValue{
BlockValue: &Block{
Name: "test",
Required: true,
Nested: &Spec{
Block: &Spec_Attr{
&Attr{
Name: "foo",
Type: "string",
},
},
},
},
},
},
},
},
},
Expected: &hcldec.BlockSpec{
TypeName: "test",
Required: true,
Nested: &hcldec.BlockSpec{
TypeName: "test",
Required: true,
Nested: &hcldec.AttrSpec{
Name: "foo",
Type: cty.String,
Required: false,
},
},
},
},
}
testSpecConversions(t, tests)
}
func TestDec_Convert_BlockList(t *testing.T) {
t.Parallel()
tests := []testConversions{
{
Name: "block list with attr",
Input: &Spec{
Block: &Spec_BlockList{
BlockList: &BlockList{
Name: "test",
MinItems: 1,
MaxItems: 3,
Nested: &Spec{
Block: &Spec_Attr{
&Attr{
Name: "foo",
Type: "string",
},
},
},
},
},
},
Expected: &hcldec.BlockListSpec{
TypeName: "test",
MinItems: 1,
MaxItems: 3,
Nested: &hcldec.AttrSpec{
Name: "foo",
Type: cty.String,
Required: false,
},
},
},
{
Name: "block list no name",
Input: &Spec{
Block: &Spec_BlockList{
BlockList: &BlockList{
MinItems: 1,
MaxItems: 3,
Nested: &Spec{
Block: &Spec_Attr{
&Attr{
Name: "foo",
Type: "string",
},
},
},
},
},
},
ExpectedError: "Missing name in block_list spec",
},
}
testSpecConversions(t, tests)
}
func TestDec_Convert_BlockSet(t *testing.T) {
t.Parallel()
tests := []testConversions{
{
Name: "block set with attr",
Input: &Spec{
Block: &Spec_BlockSet{
BlockSet: &BlockSet{
Name: "test",
MinItems: 1,
MaxItems: 3,
Nested: &Spec{
Block: &Spec_Attr{
&Attr{
Name: "foo",
Type: "string",
},
},
},
},
},
},
Expected: &hcldec.BlockSetSpec{
TypeName: "test",
MinItems: 1,
MaxItems: 3,
Nested: &hcldec.AttrSpec{
Name: "foo",
Type: cty.String,
Required: false,
},
},
},
{
Name: "block set missing name",
Input: &Spec{
Block: &Spec_BlockSet{
BlockSet: &BlockSet{
MinItems: 1,
MaxItems: 3,
Nested: &Spec{
Block: &Spec_Attr{
&Attr{
Name: "foo",
Type: "string",
},
},
},
},
},
},
ExpectedError: "Missing name in block_set spec",
},
}
testSpecConversions(t, tests)
}
func TestDec_Convert_BlockMap(t *testing.T) {
t.Parallel()
tests := []testConversions{
{
Name: "block map with attr",
Input: &Spec{
Block: &Spec_BlockMap{
BlockMap: &BlockMap{
Name: "test",
Labels: []string{"key1", "key2"},
Nested: &Spec{
Block: &Spec_Attr{
&Attr{
Name: "foo",
Type: "string",
},
},
},
},
},
},
Expected: &hcldec.BlockMapSpec{
TypeName: "test",
LabelNames: []string{"key1", "key2"},
Nested: &hcldec.AttrSpec{
Name: "foo",
Type: cty.String,
Required: false,
},
},
},
{
Name: "block map missing name",
Input: &Spec{
Block: &Spec_BlockMap{
BlockMap: &BlockMap{
Labels: []string{"key1", "key2"},
Nested: &Spec{
Block: &Spec_Attr{
&Attr{
Name: "foo",
Type: "string",
},
},
},
},
},
},
ExpectedError: "Missing name in block_map spec",
},
{
Name: "block map missing labels",
Input: &Spec{
Block: &Spec_BlockMap{
BlockMap: &BlockMap{
Name: "foo",
Nested: &Spec{
Block: &Spec_Attr{
&Attr{
Name: "foo",
Type: "string",
},
},
},
},
},
},
ExpectedError: "Invalid block label name list",
},
}
testSpecConversions(t, tests)
}
func TestDec_Convert_Default(t *testing.T) {
t.Parallel()
tests := []testConversions{
{
Name: "default attr",
Input: &Spec{
Block: &Spec_Default{
Default: &Default{
Primary: &Spec{
Block: &Spec_Attr{
&Attr{
Name: "foo",
Type: "string",
Required: true,
},
},
},
Default: &Spec{
Block: &Spec_Literal{
&Literal{
Value: "\"hi\"",
},
},
},
},
},
},
Expected: &hcldec.DefaultSpec{
Primary: &hcldec.AttrSpec{
Name: "foo",
Type: cty.String,
Required: true,
},
Default: &hcldec.LiteralSpec{
Value: cty.StringVal("hi"),
},
},
},
}
testSpecConversions(t, tests)
}
func TestDec_Convert_Literal(t *testing.T) {
t.Parallel()
tests := []testConversions{
{
Name: "bool: true",
Input: &Spec{
Block: &Spec_Literal{
Literal: &Literal{
Value: "true",
},
},
},
Expected: &hcldec.LiteralSpec{
Value: cty.BoolVal(true),
},
},
{
Name: "bool: false",
Input: &Spec{
Block: &Spec_Literal{
Literal: &Literal{
Value: "false",
},
},
},
Expected: &hcldec.LiteralSpec{
Value: cty.BoolVal(false),
},
},
{
Name: "string",
Input: &Spec{
Block: &Spec_Literal{
Literal: &Literal{
Value: "\"hi\"",
},
},
},
Expected: &hcldec.LiteralSpec{
Value: cty.StringVal("hi"),
},
},
{
Name: "string w/ func",
Input: &Spec{
Block: &Spec_Literal{
Literal: &Literal{
Value: "reverse(\"hi\")",
},
},
},
Expected: &hcldec.LiteralSpec{
Value: cty.StringVal("ih"),
},
},
{
Name: "list string",
Input: &Spec{
Block: &Spec_Literal{
Literal: &Literal{
Value: "[\"hi\", \"bye\"]",
},
},
},
Expected: &hcldec.LiteralSpec{
Value: cty.TupleVal([]cty.Value{cty.StringVal("hi"), cty.StringVal("bye")}),
},
},
}
testSpecConversions(t, tests)
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,382 @@
syntax = "proto3";
option go_package = "hclspec";
/*option go_package = "github.com/hashicorp/nomad/plugins/shared/hclspec";*/
/* Spec allows exposing the specification for an HCL body, allowing for parsing and
validation.
Certain expressions within a specification may use the following functions.
The documentation for each spec type above specifies where functions may
be used.
* `abs(number)` returns the absolute (positive) value of the given number.
* `coalesce(vals...)` returns the first non-null value given.
* `concat(lists...)` concatenates together all of the given lists to produce a new list.
* `hasindex(val, idx)` returns true if the expression `val[idx]` could succeed.
* `int(number)` returns the integer portion of the given number, rounding towards zero.
* `jsondecode(str)` interprets the given string as JSON and returns the resulting data structure.
* `jsonencode(val)` returns a JSON-serialized version of the given value.
* `length(collection)` returns the number of elements in the given collection (list, set, map, object, or tuple).
* `lower(string)` returns the given string with all uppercase letters converted to lowercase.
* `max(numbers...)` returns the greatest of the given numbers.
* `min(numbers...)` returns the smallest of the given numbers.
* `reverse(string)` returns the given string with all of the characters in reverse order.
* `strlen(string)` returns the number of characters in the given string.
* `substr(string, offset, length)` returns the requested substring of the given string.
* `upper(string)` returns the given string with all lowercase letters converted to uppercase.
## Type Expressions
Type expressions are used to describe the expected type of an attribute, as
an additional validation constraint.
A type expression uses primitive type names and compound type constructors.
A type constructor builds a new type based on one or more type expression
arguments.
The following type names and type constructors are supported:
* `any` is a wildcard that accepts a value of any type. (In HCL terms, this
is the _dynamic pseudo-type_.)
* `string` is a Unicode string.
* `number` is an arbitrary-precision floating point number.
* `bool` is a boolean value (`true` or `false`)
* `list(element_type)` constructs a list type with the given element type
* `set(element_type)` constructs a set type with the given element type
* `map(element_type)` constructs a map type with the given element type
* `object({name1 = element_type, name2 = element_type, ...})` constructs
an object type with the given attribute types.
* `tuple([element_type, element_type, ...])` constructs a tuple type with
the given element types. This can be used, for example, to require an
array with a particular number of elements, or with elements of different
types.
`null` is a valid value of any type, and not a type itself.
*/
package hashicorp.nomad.plugins.shared.hclspec;
// Spec defines the available specification types.
message Spec {
oneof block {
Object object = 1;
Array array = 2;
Attr Attr = 3;
Block block_value = 4;
BlockList block_list = 5;
BlockSet block_set = 6;
BlockMap block_map = 7;
Default default = 8;
Literal literal = 9;
}
}
/* Attr spec type reads the value of an attribute in the current body
and returns that value as its result. It also creates validation constraints
for the given attribute name and its value.
```hcl
Attr {
name = "document_root"
type = string
required = true
}
```
`Attr` spec blocks accept the following arguments:
* `name` (required) - The attribute name to expect within the HCL input file.
This may be omitted when a default name selector is created by a parent
`Object` spec, if the input attribute name should match the output JSON
object property name.
* `type` (optional) - A [type expression](#type-expressions) that the given
attribute value must conform to. If this argument is set, `hcldec` will
automatically convert the given input value to this type or produce an
error if that is not possible.
* `required` (optional) - If set to `true`, `hcldec` will produce an error
if a value is not provided for the source attribute.
`Attr` is a leaf spec type, so no nested spec blocks are permitted.
*/
message Attr {
string name = 1;
string type = 2;
bool required = 3;
}
/* Block spec type applies one nested spec block to the contents of a
block within the current body and returns the result of that spec. It also
creates validation constraints for the given block type name.
```hcl
Block {
name = "logging"
Object {
Attr "level" {
type = string
}
Attr "file" {
type = string
}
}
}
```
`Block` spec blocks accept the following arguments:
* `name` (required) - The block type name to expect within the HCL
input file. This may be omitted when a default name selector is created
by a parent `Object` spec, if the input block type name should match the
output JSON object property name.
* `required` (optional) - If set to `true`, `hcldec` will produce an error
if a block of the specified type is not present in the current body.
`Block` creates a validation constraint that there must be zero or one blocks
of the given type name, or exactly one if `required` is set.
`Block` expects a single nested spec block, which is applied to the body of
the block of the given type when it is present.
*/
message Block {
string name = 1;
bool required = 2;
Spec nested = 3;
}
/* BlockList spec type is similar to `Block`, but it accepts zero or
more blocks of a specified type rather than requiring zero or one. The
result is a JSON array with one entry per block of the given type.
```hcl
BlockList {
name = "log_file"
Object {
Attr "level" {
type = string
}
Attr "filename" {
type = string
required = true
}
}
}
```
`BlockList` spec blocks accept the following arguments:
* `name` (required) - The block type name to expect within the HCL
input file. This may be omitted when a default name selector is created
by a parent `Object` spec, if the input block type name should match the
output JSON object property name.
* `min_items` (optional) - If set to a number greater than zero, `hcldec` will
produce an error if fewer than the given number of blocks are present.
* `max_items` (optional) - If set to a number greater than zero, `hcldec` will
produce an error if more than the given number of blocks are present. This
attribute must be greater than or equal to `min_items` if both are set.
`Block` creates a validation constraint on the number of blocks of the given
type that must be present.
`Block` expects a single nested spec block, which is applied to the body of
each matching block to produce the resulting list items.
*/
message BlockList {
string name = 1;
uint64 min_items = 2;
uint64 max_items = 3;
Spec nested = 4;
}
/* BlockSet spec type behaves the same as BlockList except that
the result is in no specific order and any duplicate items are removed.
```hcl
BlockSet {
name = "log_file"
Object {
Attr "level" {
type = string
}
Attr "filename" {
type = string
required = true
}
}
}
```
The contents of `BlockSet` are the same as for `BlockList`.
*/
message BlockSet {
string name = 1;
uint64 min_items = 2;
uint64 max_items = 3;
Spec nested = 4;
}
/* BlockMap spec type is similar to `Block`, but it accepts zero or
more blocks of a specified type rather than requiring zero or one. The
result is a JSON object, or possibly multiple nested JSON objects, whose
properties are derived from the labels set on each matching block.
```hcl
BlockMap {
name = "log_file"
labels = ["filename"]
Object {
Attr "level" {
type = string
required = true
}
}
}
```
`BlockMap` spec blocks accept the following arguments:
* `name` (required) - The block type name to expect within the HCL
input file. This may be omitted when a default name selector is created
by a parent `Object` spec, if the input block type name should match the
output JSON object property name.
* `labels` (required) - A list of user-oriented block label names. Each entry
in this list creates one level of object within the output value, and
requires one additional block header label on any child block of this type.
Block header labels are the quoted strings that appear after the block type
name but before the opening `{`.
`Block` creates a validation constraint on the number of labels that blocks
of the given type must have.
`Block` expects a single nested spec block, which is applied to the body of
each matching block to produce the resulting map items.
*/
message BlockMap {
string name = 1;
repeated string labels = 2;
Spec nested = 3;
}
/* Literal spec type returns a given literal value, and creates no
validation constraints. It is most commonly used with the `Default` spec
type to create a fallback value, but can also be used e.g. to fill out
required properties in an `Object` spec that do not correspond to any
construct in the input configuration.
```hcl
Literal {
value = "hello world"
}
```
`Literal` spec blocks accept the following argument:
* `value` (required) - The value to return. This attribute may be an expression
that uses [functions](#spec-definition-functions).
`Literal` is a leaf spec type, so no nested spec blocks are permitted.
*/
message Literal {
string value = 1;
}
/* Default spec type evaluates a sequence of nested specs in turn and
returns the result of the first one that produces a non-null value.
It creates no validation constraints of its own, but passes on the validation
constraints from its first nested block.
```hcl
Default {
Attr {
name = "private"
type = bool
}
Literal {
value = false
}
}
```
A `Default` spec block must have at least one nested spec block, and should
generally have at least two since otherwise the `Default` wrapper is a no-op.
The second and any subsequent spec blocks are _fallback_ specs. These exhibit
their usual behavior but are not able to impose validation constraints on the
current body since they are not evaluated unless all prior specs produce
`null` as their result.
*/
message Default {
Spec primary = 1;
Spec default = 2;
}
/* Object spec type is the most commonly used at the root of a spec file.
Its result is a JSON object whose properties are set based on any nested
spec blocks:
```hcl
Object {
Attr "name" {
type = "string"
}
Block "address" {
Object {
Attr "street" {
type = "string"
}
# ...
}
}
}
```
Nested spec blocks inside `Object` must always have an extra block label
`"name"`, `"address"` and `"street"` in the above example) that specifies
the name of the property that should be created in the JSON object result.
This label also acts as a default name selector for the nested spec, allowing
the `Attr` blocks in the above example to omit the usually-required `name`
argument in cases where the HCL input name and JSON output name are the same.
An `Object` spec block creates no validation constraints, but it passes on
any validation constraints created by the nested specs.
*/
message Object {
map<string, Spec> attributes = 1;
}
/* Array spec type produces a JSON array whose elements are set based on
any nested spec blocks:
```hcl
Array {
Attr {
name = "first_element"
type = "string"
}
Attr {
name = "second_element"
type = "string"
}
}
```
An `Array` spec block creates no validation constraints, but it passes on
any validation constraints created by the nested specs.
*/
message Array {
repeated Spec values = 1;
}

View file

@ -0,0 +1,25 @@
package hclspec
import (
"github.com/zclconf/go-cty/cty/function"
"github.com/zclconf/go-cty/cty/function/stdlib"
)
// specFuncs exposes the stdlib functions.
var specFuncs = map[string]function.Function{
"abs": stdlib.AbsoluteFunc,
"coalesce": stdlib.CoalesceFunc,
"concat": stdlib.ConcatFunc,
"hasindex": stdlib.HasIndexFunc,
"int": stdlib.IntFunc,
"jsondecode": stdlib.JSONDecodeFunc,
"jsonencode": stdlib.JSONEncodeFunc,
"length": stdlib.LengthFunc,
"lower": stdlib.LowerFunc,
"max": stdlib.MaxFunc,
"min": stdlib.MinFunc,
"reverse": stdlib.ReverseFunc,
"strlen": stdlib.StrlenFunc,
"substr": stdlib.SubstrFunc,
"upper": stdlib.UpperFunc,
}

View file

@ -0,0 +1,129 @@
package hclspec
import (
"fmt"
"reflect"
"github.com/hashicorp/hcl2/hcl"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/function"
)
var typeType = cty.Capsule("type", reflect.TypeOf(cty.NilType))
var typeEvalCtx = &hcl.EvalContext{
Variables: map[string]cty.Value{
"string": wrapTypeType(cty.String),
"bool": wrapTypeType(cty.Bool),
"number": wrapTypeType(cty.Number),
"any": wrapTypeType(cty.DynamicPseudoType),
},
Functions: map[string]function.Function{
"list": function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "element_type",
Type: typeType,
},
},
Type: function.StaticReturnType(typeType),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
ety := unwrapTypeType(args[0])
ty := cty.List(ety)
return wrapTypeType(ty), nil
},
}),
"set": function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "element_type",
Type: typeType,
},
},
Type: function.StaticReturnType(typeType),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
ety := unwrapTypeType(args[0])
ty := cty.Set(ety)
return wrapTypeType(ty), nil
},
}),
"map": function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "element_type",
Type: typeType,
},
},
Type: function.StaticReturnType(typeType),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
ety := unwrapTypeType(args[0])
ty := cty.Map(ety)
return wrapTypeType(ty), nil
},
}),
"tuple": function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "element_types",
Type: cty.List(typeType),
},
},
Type: function.StaticReturnType(typeType),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
etysVal := args[0]
etys := make([]cty.Type, 0, etysVal.LengthInt())
for it := etysVal.ElementIterator(); it.Next(); {
_, wrapEty := it.Element()
etys = append(etys, unwrapTypeType(wrapEty))
}
ty := cty.Tuple(etys)
return wrapTypeType(ty), nil
},
}),
"object": function.New(&function.Spec{
Params: []function.Parameter{
{
Name: "attribute_types",
Type: cty.Map(typeType),
},
},
Type: function.StaticReturnType(typeType),
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
atysVal := args[0]
atys := make(map[string]cty.Type)
for it := atysVal.ElementIterator(); it.Next(); {
nameVal, wrapAty := it.Element()
name := nameVal.AsString()
atys[name] = unwrapTypeType(wrapAty)
}
ty := cty.Object(atys)
return wrapTypeType(ty), nil
},
}),
},
}
func evalTypeExpr(expr hcl.Expression) (cty.Type, hcl.Diagnostics) {
result, diags := expr.Value(typeEvalCtx)
if result.IsNull() {
return cty.DynamicPseudoType, diags
}
if !result.Type().Equals(typeType) {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid type expression",
Detail: fmt.Sprintf("A type is required, not %s.", result.Type().FriendlyName()),
})
return cty.DynamicPseudoType, diags
}
return unwrapTypeType(result), diags
}
func wrapTypeType(ty cty.Type) cty.Value {
return cty.CapsuleVal(typeType, &ty)
}
func unwrapTypeType(val cty.Value) cty.Type {
return *(val.EncapsulatedValue().(*cty.Type))
}

36
vendor/github.com/agext/levenshtein/DCO generated vendored Normal file
View file

@ -0,0 +1,36 @@
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.

201
vendor/github.com/agext/levenshtein/LICENSE generated vendored Normal file
View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

1
vendor/github.com/agext/levenshtein/MAINTAINERS generated vendored Normal file
View file

@ -0,0 +1 @@
Alex Bucataru <alex@alrux.com> (@AlexBucataru)

5
vendor/github.com/agext/levenshtein/NOTICE generated vendored Normal file
View file

@ -0,0 +1,5 @@
Alrux Go EXTensions (AGExt) - package levenshtein
Copyright 2016 ALRUX Inc.
This product includes software developed at ALRUX Inc.
(http://www.alrux.com/).

38
vendor/github.com/agext/levenshtein/README.md generated vendored Normal file
View file

@ -0,0 +1,38 @@
# A Go package for calculating the Levenshtein distance between two strings
[![Release](https://img.shields.io/github/release/agext/levenshtein.svg?style=flat)](https://github.com/agext/levenshtein/releases/latest)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/agext/levenshtein) 
[![Build Status](https://travis-ci.org/agext/levenshtein.svg?branch=master&style=flat)](https://travis-ci.org/agext/levenshtein)
[![Coverage Status](https://coveralls.io/repos/github/agext/levenshtein/badge.svg?style=flat)](https://coveralls.io/github/agext/levenshtein)
[![Go Report Card](https://goreportcard.com/badge/github.com/agext/levenshtein?style=flat)](https://goreportcard.com/report/github.com/agext/levenshtein)
This package implements distance and similarity metrics for strings, based on the Levenshtein measure, in [Go](http://golang.org).
## Project Status
v1.2.1 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis.
This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome.
## Overview
The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0.
A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded.
The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0.
The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest.
The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed.
## Installation
```
go get github.com/agext/levenshtein
```
## License
Package levenshtein is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.

290
vendor/github.com/agext/levenshtein/levenshtein.go generated vendored Normal file
View file

@ -0,0 +1,290 @@
// Copyright 2016 ALRUX Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package levenshtein implements distance and similarity metrics for strings, based on the Levenshtein measure.
The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0.
A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded.
The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0.
The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest.
The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed.
*/
package levenshtein
// Calculate determines the Levenshtein distance between two strings, using
// the given costs for each edit operation. It returns the distance along with
// the lengths of the longest common prefix and suffix.
//
// If maxCost is non-zero, the calculation stops as soon as the distance is determined
// to be greater than maxCost. Therefore, any return value higher than maxCost is a
// lower bound for the actual distance.
func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, prefixLen, suffixLen int) {
l1, l2 := len(str1), len(str2)
// trim common prefix, if any, as it doesn't affect the distance
for ; prefixLen < l1 && prefixLen < l2; prefixLen++ {
if str1[prefixLen] != str2[prefixLen] {
break
}
}
str1, str2 = str1[prefixLen:], str2[prefixLen:]
l1 -= prefixLen
l2 -= prefixLen
// trim common suffix, if any, as it doesn't affect the distance
for 0 < l1 && 0 < l2 {
if str1[l1-1] != str2[l2-1] {
str1, str2 = str1[:l1], str2[:l2]
break
}
l1--
l2--
suffixLen++
}
// if the first string is empty, the distance is the length of the second string times the cost of insertion
if l1 == 0 {
dist = l2 * insCost
return
}
// if the second string is empty, the distance is the length of the first string times the cost of deletion
if l2 == 0 {
dist = l1 * delCost
return
}
// variables used in inner "for" loops
var y, dy, c, l int
// if maxCost is greater than or equal to the maximum possible distance, it's equivalent to 'unlimited'
if maxCost > 0 {
if subCost < delCost+insCost {
if maxCost >= l1*subCost+(l2-l1)*insCost {
maxCost = 0
}
} else {
if maxCost >= l1*delCost+l2*insCost {
maxCost = 0
}
}
}
if maxCost > 0 {
// prefer the longer string first, to minimize time;
// a swap also transposes the meanings of insertion and deletion.
if l1 < l2 {
str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost
}
// the length differential times cost of deletion is a lower bound for the cost;
// if it is higher than the maxCost, there is no point going into the main calculation.
if dist = (l1 - l2) * delCost; dist > maxCost {
return
}
d := make([]int, l1+1)
// offset and length of d in the current row
doff, dlen := 0, 1
for y, dy = 1, delCost; y <= l1 && dy <= maxCost; dlen++ {
d[y] = dy
y++
dy = y * delCost
}
// fmt.Printf("%q -> %q: init doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, doff, dlen, doff, doff+dlen, d[doff:doff+dlen])
for x := 0; x < l2; x++ {
dy, d[doff] = d[doff], d[doff]+insCost
for d[doff] > maxCost && dlen > 0 {
if str1[doff] != str2[x] {
dy += subCost
}
doff++
dlen--
if c = d[doff] + insCost; c < dy {
dy = c
}
dy, d[doff] = d[doff], dy
}
for y, l = doff, doff+dlen-1; y < l; dy, d[y] = d[y], dy {
if str1[y] != str2[x] {
dy += subCost
}
if c = d[y] + delCost; c < dy {
dy = c
}
y++
if c = d[y] + insCost; c < dy {
dy = c
}
}
if y < l1 {
if str1[y] != str2[x] {
dy += subCost
}
if c = d[y] + delCost; c < dy {
dy = c
}
for ; dy <= maxCost && y < l1; dy, d[y] = dy+delCost, dy {
y++
dlen++
}
}
// fmt.Printf("%q -> %q: x=%d doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, x, doff, dlen, doff, doff+dlen, d[doff:doff+dlen])
if dlen == 0 {
dist = maxCost + 1
return
}
}
if doff+dlen-1 < l1 {
dist = maxCost + 1
return
}
dist = d[l1]
} else {
// ToDo: This is O(l1*l2) time and O(min(l1,l2)) space; investigate if it is
// worth to implement diagonal approach - O(l1*(1+dist)) time, up to O(l1*l2) space
// http://www.csse.monash.edu.au/~lloyd/tildeStrings/Alignment/92.IPL.html
// prefer the shorter string first, to minimize space; time is O(l1*l2) anyway;
// a swap also transposes the meanings of insertion and deletion.
if l1 > l2 {
str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost
}
d := make([]int, l1+1)
for y = 1; y <= l1; y++ {
d[y] = y * delCost
}
for x := 0; x < l2; x++ {
dy, d[0] = d[0], d[0]+insCost
for y = 0; y < l1; dy, d[y] = d[y], dy {
if str1[y] != str2[x] {
dy += subCost
}
if c = d[y] + delCost; c < dy {
dy = c
}
y++
if c = d[y] + insCost; c < dy {
dy = c
}
}
}
dist = d[l1]
}
return
}
// Distance returns the Levenshtein distance between str1 and str2, using the
// default or provided cost values. Pass nil for the third argument to use the
// default cost of 1 for all three operations, with no maximum.
func Distance(str1, str2 string, p *Params) int {
if p == nil {
p = defaultParams
}
dist, _, _ := Calculate([]rune(str1), []rune(str2), p.maxCost, p.insCost, p.subCost, p.delCost)
return dist
}
// Similarity returns a score in the range of 0..1 for how similar the two strings are.
// A score of 1 means the strings are identical, and 0 means they have nothing in common.
//
// A nil third argument uses the default cost of 1 for all three operations.
//
// If a non-zero MinScore value is provided in the parameters, scores lower than it
// will be returned as 0.
func Similarity(str1, str2 string, p *Params) float64 {
return Match(str1, str2, p.Clone().BonusThreshold(1.1)) // guaranteed no bonus
}
// Match returns a similarity score adjusted by the same method as proposed by Winkler for
// the Jaro distance - giving a bonus to string pairs that share a common prefix, only if their
// similarity score is already over a threshold.
//
// The score is in the range of 0..1, with 1 meaning the strings are identical,
// and 0 meaning they have nothing in common.
//
// A nil third argument uses the default cost of 1 for all three operations, maximum length of
// common prefix to consider for bonus of 4, scaling factor of 0.1, and bonus threshold of 0.7.
//
// If a non-zero MinScore value is provided in the parameters, scores lower than it
// will be returned as 0.
func Match(str1, str2 string, p *Params) float64 {
s1, s2 := []rune(str1), []rune(str2)
l1, l2 := len(s1), len(s2)
// two empty strings are identical; shortcut also avoids divByZero issues later on.
if l1 == 0 && l2 == 0 {
return 1
}
if p == nil {
p = defaultParams
}
// a min over 1 can never be satisfied, so the score is 0.
if p.minScore > 1 {
return 0
}
insCost, delCost, maxDist, max := p.insCost, p.delCost, 0, 0
if l1 > l2 {
l1, l2, insCost, delCost = l2, l1, delCost, insCost
}
if p.subCost < delCost+insCost {
maxDist = l1*p.subCost + (l2-l1)*insCost
} else {
maxDist = l1*delCost + l2*insCost
}
// a zero min is always satisfied, so no need to set a max cost.
if p.minScore > 0 {
// if p.minScore is lower than p.bonusThreshold, we can use a simplified formula
// for the max cost, because a sim score below min cannot receive a bonus.
if p.minScore < p.bonusThreshold {
// round down the max - a cost equal to a rounded up max would already be under min.
max = int((1 - p.minScore) * float64(maxDist))
} else {
// p.minScore <= sim + p.bonusPrefix*p.bonusScale*(1-sim)
// p.minScore <= (1-dist/maxDist) + p.bonusPrefix*p.bonusScale*(1-(1-dist/maxDist))
// p.minScore <= 1 - dist/maxDist + p.bonusPrefix*p.bonusScale*dist/maxDist
// 1 - p.minScore >= dist/maxDist - p.bonusPrefix*p.bonusScale*dist/maxDist
// (1-p.minScore)*maxDist/(1-p.bonusPrefix*p.bonusScale) >= dist
max = int((1 - p.minScore) * float64(maxDist) / (1 - float64(p.bonusPrefix)*p.bonusScale))
}
}
dist, pl, _ := Calculate(s1, s2, max, p.insCost, p.subCost, p.delCost)
if max > 0 && dist > max {
return 0
}
sim := 1 - float64(dist)/float64(maxDist)
if sim >= p.bonusThreshold && sim < 1 && p.bonusPrefix > 0 && p.bonusScale > 0 {
if pl > p.bonusPrefix {
pl = p.bonusPrefix
}
sim += float64(pl) * p.bonusScale * (1 - sim)
}
if sim < p.minScore {
return 0
}
return sim
}

152
vendor/github.com/agext/levenshtein/params.go generated vendored Normal file
View file

@ -0,0 +1,152 @@
// Copyright 2016 ALRUX Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package levenshtein
// Params represents a set of parameter values for the various formulas involved
// in the calculation of the Levenshtein string metrics.
type Params struct {
insCost int
subCost int
delCost int
maxCost int
minScore float64
bonusPrefix int
bonusScale float64
bonusThreshold float64
}
var (
defaultParams = NewParams()
)
// NewParams creates a new set of parameters and initializes it with the default values.
func NewParams() *Params {
return &Params{
insCost: 1,
subCost: 1,
delCost: 1,
maxCost: 0,
minScore: 0,
bonusPrefix: 4,
bonusScale: .1,
bonusThreshold: .7,
}
}
// Clone returns a pointer to a copy of the receiver parameter set, or of a new
// default parameter set if the receiver is nil.
func (p *Params) Clone() *Params {
if p == nil {
return NewParams()
}
return &Params{
insCost: p.insCost,
subCost: p.subCost,
delCost: p.delCost,
maxCost: p.maxCost,
minScore: p.minScore,
bonusPrefix: p.bonusPrefix,
bonusScale: p.bonusScale,
bonusThreshold: p.bonusThreshold,
}
}
// InsCost overrides the default value of 1 for the cost of insertion.
// The new value must be zero or positive.
func (p *Params) InsCost(v int) *Params {
if v >= 0 {
p.insCost = v
}
return p
}
// SubCost overrides the default value of 1 for the cost of substitution.
// The new value must be zero or positive.
func (p *Params) SubCost(v int) *Params {
if v >= 0 {
p.subCost = v
}
return p
}
// DelCost overrides the default value of 1 for the cost of deletion.
// The new value must be zero or positive.
func (p *Params) DelCost(v int) *Params {
if v >= 0 {
p.delCost = v
}
return p
}
// MaxCost overrides the default value of 0 (meaning unlimited) for the maximum cost.
// The calculation of Distance() stops when the result is guaranteed to exceed
// this maximum, returning a lower-bound rather than exact value.
// The new value must be zero or positive.
func (p *Params) MaxCost(v int) *Params {
if v >= 0 {
p.maxCost = v
}
return p
}
// MinScore overrides the default value of 0 for the minimum similarity score.
// Scores below this threshold are returned as 0 by Similarity() and Match().
// The new value must be zero or positive. Note that a minimum greater than 1
// can never be satisfied, resulting in a score of 0 for any pair of strings.
func (p *Params) MinScore(v float64) *Params {
if v >= 0 {
p.minScore = v
}
return p
}
// BonusPrefix overrides the default value for the maximum length of
// common prefix to be considered for bonus by Match().
// The new value must be zero or positive.
func (p *Params) BonusPrefix(v int) *Params {
if v >= 0 {
p.bonusPrefix = v
}
return p
}
// BonusScale overrides the default value for the scaling factor used by Match()
// in calculating the bonus.
// The new value must be zero or positive. To guarantee that the similarity score
// remains in the interval 0..1, this scaling factor is not allowed to exceed
// 1 / BonusPrefix.
func (p *Params) BonusScale(v float64) *Params {
if v >= 0 {
p.bonusScale = v
}
// the bonus cannot exceed (1-sim), or the score may become greater than 1.
if float64(p.bonusPrefix)*p.bonusScale > 1 {
p.bonusScale = 1 / float64(p.bonusPrefix)
}
return p
}
// BonusThreshold overrides the default value for the minimum similarity score
// for which Match() can assign a bonus.
// The new value must be zero or positive. Note that a threshold greater than 1
// effectively makes Match() become the equivalent of Similarity().
func (p *Params) BonusThreshold(v float64) *Params {
if v >= 0 {
p.bonusThreshold = v
}
return p
}

95
vendor/github.com/apparentlymart/go-textseg/LICENSE generated vendored Normal file
View file

@ -0,0 +1,95 @@
Copyright (c) 2017 Martin Atkins
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
---------
Unicode table generation programs are under a separate copyright and license:
Copyright (c) 2014 Couchbase, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the specific language governing permissions
and limitations under the License.
---------
Grapheme break data is provided as part of the Unicode character database,
copright 2016 Unicode, Inc, which is provided with the following license:
Unicode Data Files include all data files under the directories
http://www.unicode.org/Public/, http://www.unicode.org/reports/,
http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
http://www.unicode.org/utility/trac/browser/.
Unicode Data Files do not include PDF online code charts under the
directory http://www.unicode.org/Public/.
Software includes any source code published in the Unicode Standard
or under the directories
http://www.unicode.org/Public/, http://www.unicode.org/reports/,
http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
http://www.unicode.org/utility/trac/browser/.
NOTICE TO USER: Carefully read the following legal agreement.
BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
TERMS AND CONDITIONS OF THIS AGREEMENT.
IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
THE DATA FILES OR SOFTWARE.
COPYRIGHT AND PERMISSION NOTICE
Copyright © 1991-2017 Unicode, Inc. All rights reserved.
Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
Permission is hereby granted, free of charge, to any person obtaining
a copy of the Unicode data files and any associated documentation
(the "Data Files") or Unicode software and any associated documentation
(the "Software") to deal in the Data Files or Software
without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, and/or sell copies of
the Data Files or Software, and to permit persons to whom the Data Files
or Software are furnished to do so, provided that either
(a) this copyright and permission notice appear with all copies
of the Data Files or Software, or
(b) this copyright and permission notice appear in associated
Documentation.
THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THE DATA FILES OR SOFTWARE.
Except as contained in this notice, the name of a copyright holder
shall not be used in advertising or otherwise to promote the sale,
use or other dealings in these Data Files or Software without prior
written authorization of the copyright holder.

View file

@ -0,0 +1,30 @@
package textseg
import (
"bufio"
"bytes"
)
// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of
// all of the recognized tokens in the given buffer.
func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) {
scanner := bufio.NewScanner(bytes.NewReader(buf))
scanner.Split(splitFunc)
var ret [][]byte
for scanner.Scan() {
ret = append(ret, scanner.Bytes())
}
return ret, scanner.Err()
}
// TokenCount is a utility that uses a bufio.SplitFunc to count the number of
// recognized tokens in the given buffer.
func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) {
scanner := bufio.NewScanner(bytes.NewReader(buf))
scanner.Split(splitFunc)
var ret int
for scanner.Scan() {
ret++
}
return ret, scanner.Err()
}

View file

@ -0,0 +1,7 @@
package textseg
//go:generate go run make_tables.go -output tables.go
//go:generate go run make_test_tables.go -output tables_test.go
//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,E_Base,E_Modifier,ZWJ,Glue_After_Zwj,E_Base_GAZ" -o grapheme_clusters_table.rl
//go:generate ragel -Z grapheme_clusters.rl
//go:generate gofmt -w grapheme_clusters.go

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,132 @@
package textseg
import (
"errors"
"unicode/utf8"
)
// Generated from grapheme_clusters.rl. DO NOT EDIT
%%{
# (except you are actually in grapheme_clusters.rl here, so edit away!)
machine graphclust;
write data;
}%%
var Error = errors.New("invalid UTF8 text")
// ScanGraphemeClusters is a split function for bufio.Scanner that splits
// on grapheme cluster boundaries.
func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) {
if len(data) == 0 {
return 0, nil, nil
}
// Ragel state
cs := 0 // Current State
p := 0 // "Pointer" into data
pe := len(data) // End-of-data "pointer"
ts := 0
te := 0
act := 0
eof := pe
// Make Go compiler happy
_ = ts
_ = te
_ = act
_ = eof
startPos := 0
endPos := 0
%%{
include GraphemeCluster "grapheme_clusters_table.rl";
action start {
startPos = p
}
action end {
endPos = p
}
action emit {
return endPos+1, data[startPos:endPos+1], nil
}
ZWJGlue = ZWJ (Glue_After_Zwj | E_Base_GAZ Extend* E_Modifier?)?;
AnyExtender = Extend | ZWJGlue | SpacingMark;
Extension = AnyExtender*;
ReplacementChar = (0xEF 0xBF 0xBD);
CRLFSeq = CR LF;
ControlSeq = Control | ReplacementChar;
HangulSeq = (
L+ (((LV? V+ | LVT) T*)?|LV?) |
LV V* T* |
V+ T* |
LVT T* |
T+
) Extension;
EmojiSeq = (E_Base | E_Base_GAZ) Extend* E_Modifier? Extension;
ZWJSeq = ZWJGlue Extension;
EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension;
UTF8Cont = 0x80 .. 0xBF;
AnyUTF8 = (
0x00..0x7F |
0xC0..0xDF . UTF8Cont |
0xE0..0xEF . UTF8Cont . UTF8Cont |
0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont
);
# OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension
OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|E_Base|E_Base_GAZ|ZWJ|Regional_Indicator|Prepend)) Extension;
# PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break
PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?;
CRLFTok = CRLFSeq >start @end;
ControlTok = ControlSeq >start @end;
HangulTok = HangulSeq >start @end;
EmojiTok = EmojiSeq >start @end;
ZWJTok = ZWJSeq >start @end;
EmojiFlagTok = EmojiFlagSeq >start @end;
OtherTok = OtherSeq >start @end;
PrependTok = PrependSeq >start @end;
main := |*
CRLFTok => emit;
ControlTok => emit;
HangulTok => emit;
EmojiTok => emit;
ZWJTok => emit;
EmojiFlagTok => emit;
PrependTok => emit;
OtherTok => emit;
# any single valid UTF-8 character would also be valid per spec,
# but we'll handle that separately after the loop so we can deal
# with requesting more bytes if we're not at EOF.
*|;
write init;
write exec;
}%%
// If we fall out here then we were unable to complete a sequence.
// If we weren't able to complete a sequence then either we've
// reached the end of a partial buffer (so there's more data to come)
// or we have an isolated symbol that would normally be part of a
// grapheme cluster but has appeared in isolation here.
if !atEOF {
// Request more
return 0, nil, nil
}
// Just take the first UTF-8 sequence and return that.
_, seqLen := utf8.DecodeRune(data)
return seqLen, data[:seqLen], nil
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,335 @@
#!/usr/bin/env ruby
#
# This scripted has been updated to accept more command-line arguments:
#
# -u, --url URL to process
# -m, --machine Machine name
# -p, --properties Properties to add to the machine
# -o, --output Write output to file
#
# Updated by: Marty Schoch <marty.schoch@gmail.com>
#
# This script uses the unicode spec to generate a Ragel state machine
# that recognizes unicode alphanumeric characters. It generates 5
# character classes: uupper, ulower, ualpha, udigit, and ualnum.
# Currently supported encodings are UTF-8 [default] and UCS-4.
#
# Usage: unicode2ragel.rb [options]
# -e, --encoding [ucs4 | utf8] Data encoding
# -h, --help Show this message
#
# This script was originally written as part of the Ferret search
# engine library.
#
# Author: Rakan El-Khalil <rakan@well.com>
require 'optparse'
require 'open-uri'
ENCODINGS = [ :utf8, :ucs4 ]
ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" }
DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt"
DEFAULT_MACHINE_NAME= "WChar"
###
# Display vars & default option
TOTAL_WIDTH = 80
RANGE_WIDTH = 23
@encoding = :utf8
@chart_url = DEFAULT_CHART_URL
machine_name = DEFAULT_MACHINE_NAME
properties = []
@output = $stdout
###
# Option parsing
cli_opts = OptionParser.new do |opts|
opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o|
@encoding = o.downcase.to_sym
end
opts.on("-h", "--help", "Show this message") do
puts opts
exit
end
opts.on("-u", "--url URL", "URL to process") do |o|
@chart_url = o
end
opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o|
machine_name = o
end
opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o|
properties = o
end
opts.on("-o", "--output FILE", "output file") do |o|
@output = File.new(o, "w+")
end
end
cli_opts.parse(ARGV)
unless ENCODINGS.member? @encoding
puts "Invalid encoding: #{@encoding}"
puts cli_opts
exit
end
##
# Downloads the document at url and yields every alpha line's hex
# range and description.
def each_alpha( url, property )
open( url ) do |file|
file.each_line do |line|
next if line =~ /^#/;
next if line !~ /; #{property} #/;
range, description = line.split(/;/)
range.strip!
description.gsub!(/.*#/, '').strip!
if range =~ /\.\./
start, stop = range.split '..'
else start = stop = range
end
yield start.hex .. stop.hex, description
end
end
end
###
# Formats to hex at minimum width
def to_hex( n )
r = "%0X" % n
r = "0#{r}" unless (r.length % 2).zero?
r
end
###
# UCS4 is just a straight hex conversion of the unicode codepoint.
def to_ucs4( range )
rangestr = "0x" + to_hex(range.begin)
rangestr << "..0x" + to_hex(range.end) if range.begin != range.end
[ rangestr ]
end
##
# 0x00 - 0x7f -> 0zzzzzzz[7]
# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6]
# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6]
# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6]
UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff]
def to_utf8_enc( n )
r = 0
if n <= 0x7f
r = n
elsif n <= 0x7ff
y = 0xc0 | (n >> 6)
z = 0x80 | (n & 0x3f)
r = y << 8 | z
elsif n <= 0xffff
x = 0xe0 | (n >> 12)
y = 0x80 | (n >> 6) & 0x3f
z = 0x80 | n & 0x3f
r = x << 16 | y << 8 | z
elsif n <= 0x10ffff
w = 0xf0 | (n >> 18)
x = 0x80 | (n >> 12) & 0x3f
y = 0x80 | (n >> 6) & 0x3f
z = 0x80 | n & 0x3f
r = w << 24 | x << 16 | y << 8 | z
end
to_hex(r)
end
def from_utf8_enc( n )
n = n.hex
r = 0
if n <= 0x7f
r = n
elsif n <= 0xdfff
y = (n >> 8) & 0x1f
z = n & 0x3f
r = y << 6 | z
elsif n <= 0xefffff
x = (n >> 16) & 0x0f
y = (n >> 8) & 0x3f
z = n & 0x3f
r = x << 10 | y << 6 | z
elsif n <= 0xf7ffffff
w = (n >> 24) & 0x07
x = (n >> 16) & 0x3f
y = (n >> 8) & 0x3f
z = n & 0x3f
r = w << 18 | x << 12 | y << 6 | z
end
r
end
###
# Given a range, splits it up into ranges that can be continuously
# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff]
# This is not strictly needed since the current [5.1] unicode standard
# doesn't have ranges that straddle utf8 boundaries. This is included
# for completeness as there is no telling if that will ever change.
def utf8_ranges( range )
ranges = []
UTF8_BOUNDARIES.each do |max|
if range.begin <= max
if range.end <= max
ranges << range
return ranges
end
ranges << (range.begin .. max)
range = (max + 1) .. range.end
end
end
ranges
end
def build_range( start, stop )
size = start.size/2
left = size - 1
return [""] if size < 1
a = start[0..1]
b = stop[0..1]
###
# Shared prefix
if a == b
return build_range(start[2..-1], stop[2..-1]).map do |elt|
"0x#{a} " + elt
end
end
###
# Unshared prefix, end of run
return ["0x#{a}..0x#{b} "] if left.zero?
###
# Unshared prefix, not end of run
# Range can be 0x123456..0x56789A
# Which is equivalent to:
# 0x123456 .. 0x12FFFF
# 0x130000 .. 0x55FFFF
# 0x560000 .. 0x56789A
ret = []
ret << build_range(start, a + "FF" * left)
###
# Only generate middle range if need be.
if a.hex+1 != b.hex
max = to_hex(b.hex - 1)
max = "FF" if b == "FF"
ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left
end
###
# Don't generate last range if it is covered by first range
ret << build_range(b + "00" * left, stop) unless b == "FF"
ret.flatten!
end
def to_utf8( range )
utf8_ranges( range ).map do |r|
begin_enc = to_utf8_enc(r.begin)
end_enc = to_utf8_enc(r.end)
build_range begin_enc, end_enc
end.flatten!
end
##
# Perform a 3-way comparison of the number of codepoints advertised by
# the unicode spec for the given range, the originally parsed range,
# and the resulting utf8 encoded range.
def count_codepoints( code )
code.split(' ').inject(1) do |acc, elt|
if elt =~ /0x(.+)\.\.0x(.+)/
if @encoding == :utf8
acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1)
else
acc * ($2.hex - $1.hex + 1)
end
else
acc
end
end
end
def is_valid?( range, desc, codes )
spec_count = 1
spec_count = $1.to_i if desc =~ /\[(\d+)\]/
range_count = range.end - range.begin + 1
sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) }
sum == spec_count and sum == range_count
end
##
# Generate the state maching to stdout
def generate_machine( name, property )
pipe = " "
@output.puts " #{name} = "
each_alpha( @chart_url, property ) do |range, desc|
codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range)
#raise "Invalid encoding of range #{range}: #{codes.inspect}" unless
# is_valid? range, desc, codes
range_width = codes.map { |a| a.size }.max
range_width = RANGE_WIDTH if range_width < RANGE_WIDTH
desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11
desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH
if desc.size > desc_width
desc = desc[0..desc_width - 4] + "..."
end
codes.each_with_index do |r, idx|
desc = "" unless idx.zero?
code = "%-#{range_width}s" % r
@output.puts " #{pipe} #{code} ##{desc}"
pipe = "|"
end
end
@output.puts " ;"
@output.puts ""
end
@output.puts <<EOF
# The following Ragel file was autogenerated with #{$0}
# from: #{@chart_url}
#
# It defines #{properties}.
#
# To use this, make sure that your alphtype is set to #{ALPHTYPES[@encoding]},
# and that your input is in #{@encoding}.
%%{
machine #{machine_name};
EOF
properties.each { |x| generate_machine( x, x ) }
@output.puts <<EOF
}%%
EOF

View file

@ -0,0 +1,19 @@
package textseg
import "unicode/utf8"
// ScanGraphemeClusters is a split function for bufio.Scanner that splits
// on UTF8 sequence boundaries.
//
// This is included largely for completeness, since this behavior is already
// built in to Go when ranging over a string.
func ScanUTF8Sequences(data []byte, atEOF bool) (int, []byte, error) {
if len(data) == 0 {
return 0, nil, nil
}
r, seqLen := utf8.DecodeRune(data)
if r == utf8.RuneError && !atEOF {
return 0, nil, nil
}
return seqLen, data[:seqLen], nil
}

View file

@ -1,4 +1,7 @@
Go support for Protocol Buffers - Google's data interchange format
Copyright 2010 The Go Authors. All rights reserved.
https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are

View file

@ -41,17 +41,20 @@ import (
"reflect"
)
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
// Marshal reports this when a required field is not initialized.
// Unmarshal reports this when a required field is missing from the wire data.
// RequiredNotSetError is the error returned if Marshal is called with
// a protocol buffer struct whose required fields have not
// all been initialized. It is also the error returned if Unmarshal is
// called with an encoded protocol buffer that does not include all the
// required fields.
//
// When printed, RequiredNotSetError reports the first unset required field in a
// message. If the field cannot be precisely determined, it is reported as
// "{Unknown}".
type RequiredNotSetError struct {
field string
}
func (e *RequiredNotSetError) Error() string {
if e.field == "" {
return fmt.Sprintf("proto: required field not set")
}
return fmt.Sprintf("proto: required field %q not set", e.field)
}

View file

@ -139,7 +139,7 @@ type Properties struct {
Repeated bool
Packed bool // relevant for repeated primitives only
Enum string // set for enum types only
proto3 bool // whether this is known to be a proto3 field
proto3 bool // whether this is known to be a proto3 field; set for []byte only
oneof bool // whether this is a oneof field
Default string // default value
@ -148,9 +148,9 @@ type Properties struct {
stype reflect.Type // set for struct types only
sprop *StructProperties // set for struct types only
mtype reflect.Type // set for map types only
MapKeyProp *Properties // set for map types only
MapValProp *Properties // set for map types only
mtype reflect.Type // set for map types only
mkeyprop *Properties // set for map types only
mvalprop *Properties // set for map types only
}
// String formats the properties in the protobuf struct field tag style.
@ -275,16 +275,16 @@ func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, loc
case reflect.Map:
p.mtype = t1
p.MapKeyProp = &Properties{}
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
p.MapValProp = &Properties{}
p.mkeyprop = &Properties{}
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
p.mvalprop = &Properties{}
vtype := p.mtype.Elem()
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
// The value type is not a message (*T) or bytes ([]byte),
// so we need encoders for the pointer to this type.
vtype = reflect.PtrTo(vtype)
}
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
}
if p.stype != nil {

View file

@ -225,9 +225,6 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
// If the message can marshal itself, let it do it, for compatibility.
// NOTE: This is not efficient.
if u.hasmarshaler {
if deterministic {
return nil, errors.New("proto: deterministic not supported by the Marshal method of " + u.typ.String())
}
m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
b1, err := m.Marshal()
b = append(b, b1...)
@ -280,10 +277,6 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
if err == errRepeatedHasNil {
err = errors.New("proto: repeated field " + f.name + " has nil element")
}
if err == errInvalidUTF8 {
fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
err = fmt.Errorf("proto: string field %q contains invalid UTF-8", fullName)
}
return b, err
}
}
@ -537,7 +530,6 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
packed := false
proto3 := false
validateUTF8 := true
for i := 2; i < len(tags); i++ {
if tags[i] == "packed" {
packed = true
@ -546,7 +538,6 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
proto3 = true
}
}
validateUTF8 = validateUTF8 && proto3
switch t.Kind() {
case reflect.Bool:
@ -744,18 +735,6 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
}
return sizeFloat64Value, appendFloat64Value
case reflect.String:
if validateUTF8 {
if pointer {
return sizeStringPtr, appendUTF8StringPtr
}
if slice {
return sizeStringSlice, appendUTF8StringSlice
}
if nozero {
return sizeStringValueNoZero, appendUTF8StringValueNoZero
}
return sizeStringValue, appendUTF8StringValue
}
if pointer {
return sizeStringPtr, appendStringPtr
}
@ -2005,6 +1984,9 @@ func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byt
}
func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
v := *ptr.toString()
if !utf8.ValidString(v) {
return nil, errInvalidUTF8
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
@ -2015,6 +1997,9 @@ func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]b
if v == "" {
return b, nil
}
if !utf8.ValidString(v) {
return nil, errInvalidUTF8
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
@ -2026,58 +2011,15 @@ func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, err
return b, nil
}
v := *p
if !utf8.ValidString(v) {
return nil, errInvalidUTF8
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
return b, nil
}
func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
s := *ptr.toStringSlice()
for _, v := range s {
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
}
return b, nil
}
func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
v := *ptr.toString()
if !utf8.ValidString(v) {
return nil, errInvalidUTF8
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
return b, nil
}
func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
v := *ptr.toString()
if v == "" {
return b, nil
}
if !utf8.ValidString(v) {
return nil, errInvalidUTF8
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
return b, nil
}
func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
p := *ptr.toStringPtr()
if p == nil {
return b, nil
}
v := *p
if !utf8.ValidString(v) {
return nil, errInvalidUTF8
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
return b, nil
}
func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
s := *ptr.toStringSlice()
for _, v := range s {
if !utf8.ValidString(v) {
@ -2281,25 +2223,6 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
// value.
// Key cannot be pointer-typed.
valIsPtr := valType.Kind() == reflect.Ptr
// If value is a message with nested maps, calling
// valSizer in marshal may be quadratic. We should use
// cached version in marshal (but not in size).
// If value is not message type, we don't have size cache,
// but it cannot be nested either. Just use valSizer.
valCachedSizer := valSizer
if valIsPtr && valType.Elem().Kind() == reflect.Struct {
u := getMarshalInfo(valType.Elem())
valCachedSizer = func(ptr pointer, tagsize int) int {
// Same as message sizer, but use cache.
p := ptr.getPointer()
if p.isNil() {
return 0
}
siz := u.cachedsize(p)
return siz + SizeVarint(uint64(siz)) + tagsize
}
}
return func(ptr pointer, tagsize int) int {
m := ptr.asPointerTo(t).Elem() // the map
n := 0
@ -2326,7 +2249,7 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
kaddr := toAddrPointer(&ki, false) // pointer to key
vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
b = appendVarint(b, tag)
siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
b = appendVarint(b, uint64(siz))
b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
if err != nil {

View file

@ -97,8 +97,6 @@ type unmarshalFieldInfo struct {
// if a required field, contains a single set bit at this field's index in the required field list.
reqMask uint64
name string // name of the field, for error reporting
}
var (
@ -183,10 +181,6 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
continue
}
if err != errInternalBadWireType {
if err == errInvalidUTF8 {
fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
err = fmt.Errorf("proto: string field %q contains invalid UTF-8", fullName)
}
return err
}
// Fragments with bad wire type are treated as unknown fields.
@ -357,7 +351,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
}
// Store the info in the correct slot in the message.
u.setTag(tag, toField(&f), unmarshal, reqMask, name)
u.setTag(tag, toField(&f), unmarshal, reqMask)
}
// Find any types associated with oneof fields.
@ -372,17 +366,10 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
f := typ.Field(0) // oneof implementers have one field
baseUnmarshal := fieldUnmarshaler(&f)
tags := strings.Split(f.Tag.Get("protobuf"), ",")
fieldNum, err := strconv.Atoi(tags[1])
tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1]
tag, err := strconv.Atoi(tagstr)
if err != nil {
panic("protobuf tag field not an integer: " + tags[1])
}
var name string
for _, tag := range tags {
if strings.HasPrefix(tag, "name=") {
name = strings.TrimPrefix(tag, "name=")
break
}
panic("protobuf tag field not an integer: " + tagstr)
}
// Find the oneof field that this struct implements.
@ -393,7 +380,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
// That lets us know where this struct should be stored
// when we encounter it during unmarshaling.
unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
u.setTag(fieldNum, of.field, unmarshal, 0, name)
u.setTag(tag, of.field, unmarshal, 0)
}
}
}
@ -414,7 +401,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
}, 0, "")
}, 0)
// Set mask for required field check.
u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
@ -426,9 +413,8 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
// tag = tag # for field
// field/unmarshal = unmarshal info for that field.
// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
// name = short name of the field.
func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64) {
i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask}
n := u.typ.NumField()
if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
for len(u.dense) <= tag {
@ -456,17 +442,11 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
tagArray := strings.Split(tags, ",")
encoding := tagArray[0]
name := "unknown"
proto3 := false
validateUTF8 := true
for _, tag := range tagArray[3:] {
if strings.HasPrefix(tag, "name=") {
name = tag[5:]
}
if tag == "proto3" {
proto3 = true
}
}
validateUTF8 = validateUTF8 && proto3
// Figure out packaging (pointer, slice, or both)
slice := false
@ -614,15 +594,6 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
}
return unmarshalBytesValue
case reflect.String:
if validateUTF8 {
if pointer {
return unmarshalUTF8StringPtr
}
if slice {
return unmarshalUTF8StringSlice
}
return unmarshalUTF8StringValue
}
if pointer {
return unmarshalStringPtr
}
@ -1477,6 +1448,9 @@ func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
return nil, io.ErrUnexpectedEOF
}
v := string(b[:x])
if !utf8.ValidString(v) {
return nil, errInvalidUTF8
}
*f.toString() = v
return b[x:], nil
}
@ -1494,69 +1468,14 @@ func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
return nil, io.ErrUnexpectedEOF
}
v := string(b[:x])
if !utf8.ValidString(v) {
return nil, errInvalidUTF8
}
*f.toStringPtr() = &v
return b[x:], nil
}
func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return b, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
v := string(b[:x])
s := f.toStringSlice()
*s = append(*s, v)
return b[x:], nil
}
func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return b, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
v := string(b[:x])
if !utf8.ValidString(v) {
return nil, errInvalidUTF8
}
*f.toString() = v
return b[x:], nil
}
func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return b, errInternalBadWireType
}
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
if x > uint64(len(b)) {
return nil, io.ErrUnexpectedEOF
}
v := string(b[:x])
if !utf8.ValidString(v) {
return nil, errInvalidUTF8
}
*f.toStringPtr() = &v
return b[x:], nil
}
func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
if w != WireBytes {
return b, errInternalBadWireType
}

View file

@ -353,7 +353,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
@ -370,7 +370,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
if err := tm.writeAny(w, val, props.MapValProp); err != nil {
if err := tm.writeAny(w, val, props.mvalprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {

View file

@ -630,17 +630,17 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(key, props.MapKeyProp); err != nil {
if err := p.readAny(key, props.mkeyprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
case "value":
if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
return err
}
if err := p.readAny(val, props.MapValProp); err != nil {
if err := p.readAny(val, props.mvalprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {

View file

@ -130,12 +130,10 @@ func UnmarshalAny(any *any.Any, pb proto.Message) error {
// Is returns true if any value contains a given message type.
func Is(any *any.Any, pb proto.Message) bool {
// The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
// but it avoids scanning TypeUrl for the slash.
if any == nil {
aname, err := AnyMessageName(any)
if err != nil {
return false
}
name := proto.MessageName(pb)
prefix := len(any.TypeUrl) - len(name)
return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
return aname == proto.MessageName(pb)
}

View file

@ -1,16 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/any/any.proto
// source: google/protobuf/any.proto
/*
Package any is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/any/any.proto
It has these top-level messages:
Any
*/
package any
package any // import "github.com/golang/protobuf/ptypes/any"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
@ -62,6 +53,16 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// any.Unpack(foo)
// ...
//
// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := ptypes.MarshalAny(foo)
// ...
// foo := &pb.Foo{}
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
// ...
// }
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
@ -122,14 +123,36 @@ type Any struct {
//
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Any) XXX_WellKnownType() string { return "Any" }
func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) {
return fileDescriptor_any_744b9ca530f228db, []int{0}
}
func (*Any) XXX_WellKnownType() string { return "Any" }
func (m *Any) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Any.Unmarshal(m, b)
}
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
}
func (dst *Any) XXX_Merge(src proto.Message) {
xxx_messageInfo_Any.Merge(dst, src)
}
func (m *Any) XXX_Size() int {
return xxx_messageInfo_Any.Size(m)
}
func (m *Any) XXX_DiscardUnknown() {
xxx_messageInfo_Any.DiscardUnknown(m)
}
var xxx_messageInfo_Any proto.InternalMessageInfo
func (m *Any) GetTypeUrl() string {
if m != nil {
@ -149,20 +172,20 @@ func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) }
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) }
var fileDescriptor0 = []byte{
// 184 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1,
0x38, 0xe5, 0x73, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19,
0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x45, 0x4c, 0xcc, 0xee, 0x01, 0x4e, 0xab,
0x98, 0xe4, 0xdc, 0x21, 0x46, 0x05, 0x40, 0x95, 0xe8, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5,
0x97, 0xe7, 0x85, 0x80, 0x94, 0x26, 0xb1, 0x81, 0xf5, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff,
0x45, 0x1f, 0x1a, 0xf2, 0xf3, 0x00, 0x00, 0x00,
var fileDescriptor_any_744b9ca530f228db = []byte{
// 185 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
}

View file

@ -74,6 +74,16 @@ option objc_class_prefix = "GPB";
// any.Unpack(foo)
// ...
//
// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := ptypes.MarshalAny(foo)
// ...
// foo := &pb.Foo{}
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
// ...
// }
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'

View file

@ -82,14 +82,14 @@ type Duration struct {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`

View file

@ -100,12 +100,12 @@ type Timestamp struct {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`

353
vendor/github.com/hashicorp/hcl2/LICENSE generated vendored Normal file
View file

@ -0,0 +1,353 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributors Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third partys
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
partys negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a partys ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.

View file

@ -0,0 +1,28 @@
# HCL User Functions Extension
This HCL extension allows a calling application to support user-defined
functions.
Functions are defined via a specific block type, like this:
```hcl
function "add" {
params = [a, b]
result = a + b
}
function "list" {
params = []
variadic_param = items
result = items
}
```
The extension is implemented as a pre-processor for `cty.Body` objects. Given
a body that may contain functions, the `DecodeUserFunctions` function searches
for blocks that define functions and returns a functions map suitable for
inclusion in a `hcl.EvalContext`. It also returns a new `cty.Body` that
contains the remainder of the content from the given body, allowing for
further processing of remaining content.
For more information, see [the godoc reference](http://godoc.org/github.com/hashicorp/hcl2/ext/userfunc).

156
vendor/github.com/hashicorp/hcl2/ext/userfunc/decode.go generated vendored Normal file
View file

@ -0,0 +1,156 @@
package userfunc
import (
"github.com/hashicorp/hcl2/hcl"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/function"
)
var funcBodySchema = &hcl.BodySchema{
Attributes: []hcl.AttributeSchema{
{
Name: "params",
Required: true,
},
{
Name: "variadic_param",
Required: false,
},
{
Name: "result",
Required: true,
},
},
}
func decodeUserFunctions(body hcl.Body, blockType string, contextFunc ContextFunc) (funcs map[string]function.Function, remain hcl.Body, diags hcl.Diagnostics) {
schema := &hcl.BodySchema{
Blocks: []hcl.BlockHeaderSchema{
{
Type: blockType,
LabelNames: []string{"name"},
},
},
}
content, remain, diags := body.PartialContent(schema)
if diags.HasErrors() {
return nil, remain, diags
}
// first call to getBaseCtx will populate context, and then the same
// context will be used for all subsequent calls. It's assumed that
// all functions in a given body should see an identical context.
var baseCtx *hcl.EvalContext
getBaseCtx := func() *hcl.EvalContext {
if baseCtx == nil {
if contextFunc != nil {
baseCtx = contextFunc()
}
}
// baseCtx might still be nil here, and that's okay
return baseCtx
}
funcs = make(map[string]function.Function)
Blocks:
for _, block := range content.Blocks {
name := block.Labels[0]
funcContent, funcDiags := block.Body.Content(funcBodySchema)
diags = append(diags, funcDiags...)
if funcDiags.HasErrors() {
continue
}
paramsExpr := funcContent.Attributes["params"].Expr
resultExpr := funcContent.Attributes["result"].Expr
var varParamExpr hcl.Expression
if funcContent.Attributes["variadic_param"] != nil {
varParamExpr = funcContent.Attributes["variadic_param"].Expr
}
var params []string
var varParam string
paramExprs, paramsDiags := hcl.ExprList(paramsExpr)
diags = append(diags, paramsDiags...)
if paramsDiags.HasErrors() {
continue
}
for _, paramExpr := range paramExprs {
param := hcl.ExprAsKeyword(paramExpr)
if param == "" {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid param element",
Detail: "Each parameter name must be an identifier.",
Subject: paramExpr.Range().Ptr(),
})
continue Blocks
}
params = append(params, param)
}
if varParamExpr != nil {
varParam = hcl.ExprAsKeyword(varParamExpr)
if varParam == "" {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid variadic_param",
Detail: "The variadic parameter name must be an identifier.",
Subject: varParamExpr.Range().Ptr(),
})
continue
}
}
spec := &function.Spec{}
for _, paramName := range params {
spec.Params = append(spec.Params, function.Parameter{
Name: paramName,
Type: cty.DynamicPseudoType,
})
}
if varParamExpr != nil {
spec.VarParam = &function.Parameter{
Name: varParam,
Type: cty.DynamicPseudoType,
}
}
impl := func(args []cty.Value) (cty.Value, error) {
ctx := getBaseCtx()
ctx = ctx.NewChild()
ctx.Variables = make(map[string]cty.Value)
// The cty function machinery guarantees that we have at least
// enough args to fill all of our params.
for i, paramName := range params {
ctx.Variables[paramName] = args[i]
}
if spec.VarParam != nil {
varArgs := args[len(params):]
ctx.Variables[varParam] = cty.TupleVal(varArgs)
}
result, diags := resultExpr.Value(ctx)
if diags.HasErrors() {
// Smuggle the diagnostics out via the error channel, since
// a diagnostics sequence implements error. Caller can
// type-assert this to recover the individual diagnostics
// if desired.
return cty.DynamicVal, diags
}
return result, nil
}
spec.Type = func(args []cty.Value) (cty.Type, error) {
val, err := impl(args)
return val.Type(), err
}
spec.Impl = func(args []cty.Value, retType cty.Type) (cty.Value, error) {
return impl(args)
}
funcs[name] = function.New(spec)
}
return funcs, remain, diags
}

22
vendor/github.com/hashicorp/hcl2/ext/userfunc/doc.go generated vendored Normal file
View file

@ -0,0 +1,22 @@
// Package userfunc implements a HCL extension that allows user-defined
// functions in HCL configuration.
//
// Using this extension requires some integration effort on the part of the
// calling application, to pass any declared functions into a HCL evaluation
// context after processing.
//
// The function declaration syntax looks like this:
//
// function "foo" {
// params = ["name"]
// result = "Hello, ${name}!"
// }
//
// When a user-defined function is called, the expression given for the "result"
// attribute is evaluated in an isolated evaluation context that defines variables
// named after the given parameter names.
//
// The block name "function" may be overridden by the calling application, if
// that default name conflicts with an existing block or attribute name in
// the application.
package userfunc

View file

@ -0,0 +1,42 @@
package userfunc
import (
"github.com/hashicorp/hcl2/hcl"
"github.com/zclconf/go-cty/cty/function"
)
// A ContextFunc is a callback used to produce the base EvalContext for
// running a particular set of functions.
//
// This is a function rather than an EvalContext directly to allow functions
// to be decoded before their context is complete. This will be true, for
// example, for applications that wish to allow functions to refer to themselves.
//
// The simplest use of a ContextFunc is to give user functions access to the
// same global variables and functions available elsewhere in an application's
// configuration language, but more complex applications may use different
// contexts to support lexical scoping depending on where in a configuration
// structure a function declaration is found, etc.
type ContextFunc func() *hcl.EvalContext
// DecodeUserFunctions looks for blocks of the given type in the given body
// and, for each one found, interprets it as a custom function definition.
//
// On success, the result is a mapping of function names to implementations,
// along with a new body that represents the remaining content of the given
// body which can be used for further processing.
//
// The result expression of each function is parsed during decoding but not
// evaluated until the function is called.
//
// If the given ContextFunc is non-nil, it will be called to obtain the
// context in which the function result expressions will be evaluated. If nil,
// or if it returns nil, the result expression will have access only to
// variables named after the declared parameters. A non-nil context turns
// the returned functions into closures, bound to the given context.
//
// If the returned diagnostics set has errors then the function map and
// remain body may be nil or incomplete.
func DecodeUserFunctions(body hcl.Body, blockType string, context ContextFunc) (funcs map[string]function.Function, remain hcl.Body, diags hcl.Diagnostics) {
return decodeUserFunctions(body, blockType, context)
}

304
vendor/github.com/hashicorp/hcl2/gohcl/decode.go generated vendored Normal file
View file

@ -0,0 +1,304 @@
package gohcl
import (
"fmt"
"reflect"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/hcl2/hcl"
"github.com/zclconf/go-cty/cty/convert"
"github.com/zclconf/go-cty/cty/gocty"
)
// DecodeBody extracts the configuration within the given body into the given
// value. This value must be a non-nil pointer to either a struct or
// a map, where in the former case the configuration will be decoded using
// struct tags and in the latter case only attributes are allowed and their
// values are decoded into the map.
//
// The given EvalContext is used to resolve any variables or functions in
// expressions encountered while decoding. This may be nil to require only
// constant values, for simple applications that do not support variables or
// functions.
//
// The returned diagnostics should be inspected with its HasErrors method to
// determine if the populated value is valid and complete. If error diagnostics
// are returned then the given value may have been partially-populated but
// may still be accessed by a careful caller for static analysis and editor
// integration use-cases.
func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
rv := reflect.ValueOf(val)
if rv.Kind() != reflect.Ptr {
panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String()))
}
return decodeBodyToValue(body, ctx, rv.Elem())
}
func decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
et := val.Type()
switch et.Kind() {
case reflect.Struct:
return decodeBodyToStruct(body, ctx, val)
case reflect.Map:
return decodeBodyToMap(body, ctx, val)
default:
panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String()))
}
}
func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
schema, partial := ImpliedBodySchema(val.Interface())
var content *hcl.BodyContent
var leftovers hcl.Body
var diags hcl.Diagnostics
if partial {
content, leftovers, diags = body.PartialContent(schema)
} else {
content, diags = body.Content(schema)
}
if content == nil {
return diags
}
tags := getFieldTags(val.Type())
if tags.Remain != nil {
fieldIdx := *tags.Remain
field := val.Type().Field(fieldIdx)
fieldV := val.Field(fieldIdx)
switch {
case bodyType.AssignableTo(field.Type):
fieldV.Set(reflect.ValueOf(leftovers))
case attrsType.AssignableTo(field.Type):
attrs, attrsDiags := leftovers.JustAttributes()
if len(attrsDiags) > 0 {
diags = append(diags, attrsDiags...)
}
fieldV.Set(reflect.ValueOf(attrs))
default:
diags = append(diags, decodeBodyToValue(leftovers, ctx, fieldV)...)
}
}
for name, fieldIdx := range tags.Attributes {
attr := content.Attributes[name]
field := val.Type().Field(fieldIdx)
fieldV := val.Field(fieldIdx)
if attr == nil {
if !exprType.AssignableTo(field.Type) {
continue
}
// As a special case, if the target is of type hcl.Expression then
// we'll assign an actual expression that evalues to a cty null,
// so the caller can deal with it within the cty realm rather
// than within the Go realm.
synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange())
fieldV.Set(reflect.ValueOf(synthExpr))
continue
}
switch {
case attrType.AssignableTo(field.Type):
fieldV.Set(reflect.ValueOf(attr))
case exprType.AssignableTo(field.Type):
fieldV.Set(reflect.ValueOf(attr.Expr))
default:
diags = append(diags, DecodeExpression(
attr.Expr, ctx, fieldV.Addr().Interface(),
)...)
}
}
blocksByType := content.Blocks.ByType()
for typeName, fieldIdx := range tags.Blocks {
blocks := blocksByType[typeName]
field := val.Type().Field(fieldIdx)
ty := field.Type
isSlice := false
isPtr := false
if ty.Kind() == reflect.Slice {
isSlice = true
ty = ty.Elem()
}
if ty.Kind() == reflect.Ptr {
isPtr = true
ty = ty.Elem()
}
if len(blocks) > 1 && !isSlice {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Duplicate %s block", typeName),
Detail: fmt.Sprintf(
"Only one %s block is allowed. Another was defined at %s.",
typeName, blocks[0].DefRange.String(),
),
Subject: &blocks[1].DefRange,
})
continue
}
if len(blocks) == 0 {
if isSlice || isPtr {
val.Field(fieldIdx).Set(reflect.Zero(field.Type))
} else {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Missing %s block", typeName),
Detail: fmt.Sprintf("A %s block is required.", typeName),
Subject: body.MissingItemRange().Ptr(),
})
}
continue
}
switch {
case isSlice:
elemType := ty
if isPtr {
elemType = reflect.PtrTo(ty)
}
sli := reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks))
for i, block := range blocks {
if isPtr {
v := reflect.New(ty)
diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...)
sli.Index(i).Set(v)
} else {
diags = append(diags, decodeBlockToValue(block, ctx, sli.Index(i))...)
}
}
val.Field(fieldIdx).Set(sli)
default:
block := blocks[0]
if isPtr {
v := reflect.New(ty)
diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...)
val.Field(fieldIdx).Set(v)
} else {
diags = append(diags, decodeBlockToValue(block, ctx, val.Field(fieldIdx))...)
}
}
}
return diags
}
func decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
attrs, diags := body.JustAttributes()
if attrs == nil {
return diags
}
mv := reflect.MakeMap(v.Type())
for k, attr := range attrs {
switch {
case attrType.AssignableTo(v.Type().Elem()):
mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr))
case exprType.AssignableTo(v.Type().Elem()):
mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr))
default:
ev := reflect.New(v.Type().Elem())
diags = append(diags, DecodeExpression(attr.Expr, ctx, ev.Interface())...)
mv.SetMapIndex(reflect.ValueOf(k), ev.Elem())
}
}
v.Set(mv)
return diags
}
func decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
var diags hcl.Diagnostics
ty := v.Type()
switch {
case blockType.AssignableTo(ty):
v.Elem().Set(reflect.ValueOf(block))
case bodyType.AssignableTo(ty):
v.Elem().Set(reflect.ValueOf(block.Body))
case attrsType.AssignableTo(ty):
attrs, attrsDiags := block.Body.JustAttributes()
if len(attrsDiags) > 0 {
diags = append(diags, attrsDiags...)
}
v.Elem().Set(reflect.ValueOf(attrs))
default:
diags = append(diags, decodeBodyToValue(block.Body, ctx, v)...)
if len(block.Labels) > 0 {
blockTags := getFieldTags(ty)
for li, lv := range block.Labels {
lfieldIdx := blockTags.Labels[li].FieldIndex
v.Field(lfieldIdx).Set(reflect.ValueOf(lv))
}
}
}
return diags
}
// DecodeExpression extracts the value of the given expression into the given
// value. This value must be something that gocty is able to decode into,
// since the final decoding is delegated to that package.
//
// The given EvalContext is used to resolve any variables or functions in
// expressions encountered while decoding. This may be nil to require only
// constant values, for simple applications that do not support variables or
// functions.
//
// The returned diagnostics should be inspected with its HasErrors method to
// determine if the populated value is valid and complete. If error diagnostics
// are returned then the given value may have been partially-populated but
// may still be accessed by a careful caller for static analysis and editor
// integration use-cases.
func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
srcVal, diags := expr.Value(ctx)
convTy, err := gocty.ImpliedType(val)
if err != nil {
panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err))
}
srcVal, err = convert.Convert(srcVal, convTy)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unsuitable value type",
Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
Subject: expr.StartRange().Ptr(),
Context: expr.Range().Ptr(),
})
return diags
}
err = gocty.FromCtyValue(srcVal, val)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unsuitable value type",
Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
Subject: expr.StartRange().Ptr(),
Context: expr.Range().Ptr(),
})
}
return diags
}

49
vendor/github.com/hashicorp/hcl2/gohcl/doc.go generated vendored Normal file
View file

@ -0,0 +1,49 @@
// Package gohcl allows decoding HCL configurations into Go data structures.
//
// It provides a convenient and concise way of describing the schema for
// configuration and then accessing the resulting data via native Go
// types.
//
// A struct field tag scheme is used, similar to other decoding and
// unmarshalling libraries. The tags are formatted as in the following example:
//
// ThingType string `hcl:"thing_type,attr"`
//
// Within each tag there are two comma-separated tokens. The first is the
// name of the corresponding construct in configuration, while the second
// is a keyword giving the kind of construct expected. The following
// kind keywords are supported:
//
// attr (the default) indicates that the value is to be populated from an attribute
// block indicates that the value is to populated from a block
// label indicates that the value is to populated from a block label
// remain indicates that the value is to be populated from the remaining body after populating other fields
//
// "attr" fields may either be of type *hcl.Expression, in which case the raw
// expression is assigned, or of any type accepted by gocty, in which case
// gocty will be used to assign the value to a native Go type.
//
// "block" fields may be of type *hcl.Block or hcl.Body, in which case the
// corresponding raw value is assigned, or may be a struct that recursively
// uses the same tags. Block fields may also be slices of any of these types,
// in which case multiple blocks of the corresponding type are decoded into
// the slice.
//
// "label" fields are considered only in a struct used as the type of a field
// marked as "block", and are used sequentially to capture the labels of
// the blocks being decoded. In this case, the name token is used only as
// an identifier for the label in diagnostic messages.
//
// "remain" can be placed on a single field that may be either of type
// hcl.Body or hcl.Attributes, in which case any remaining body content is
// placed into this field for delayed processing. If no "remain" field is
// present then any attributes or blocks not matched by another valid tag
// will cause an error diagnostic.
//
// Broadly-speaking this package deals with two types of error. The first is
// errors in the configuration itself, which are returned as diagnostics
// written with the configuration author as the target audience. The second
// is bugs in the calling program, such as invalid struct tags, which are
// surfaced via panics since there can be no useful runtime handling of such
// errors and they should certainly not be returned to the user as diagnostics.
package gohcl

174
vendor/github.com/hashicorp/hcl2/gohcl/schema.go generated vendored Normal file
View file

@ -0,0 +1,174 @@
package gohcl
import (
"fmt"
"reflect"
"sort"
"strings"
"github.com/hashicorp/hcl2/hcl"
)
// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the
// given value, which must be a struct value or a pointer to one. If an
// inappropriate value is passed, this function will panic.
//
// The second return argument indicates whether the given struct includes
// a "remain" field, and thus the returned schema is non-exhaustive.
//
// This uses the tags on the fields of the struct to discover how each
// field's value should be expressed within configuration. If an invalid
// mapping is attempted, this function will panic.
func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) {
ty := reflect.TypeOf(val)
if ty.Kind() == reflect.Ptr {
ty = ty.Elem()
}
if ty.Kind() != reflect.Struct {
panic(fmt.Sprintf("given value must be struct, not %T", val))
}
var attrSchemas []hcl.AttributeSchema
var blockSchemas []hcl.BlockHeaderSchema
tags := getFieldTags(ty)
attrNames := make([]string, 0, len(tags.Attributes))
for n := range tags.Attributes {
attrNames = append(attrNames, n)
}
sort.Strings(attrNames)
for _, n := range attrNames {
idx := tags.Attributes[n]
optional := tags.Optional[n]
field := ty.Field(idx)
var required bool
switch {
case field.Type.AssignableTo(exprType):
// If we're decoding to hcl.Expression then absense can be
// indicated via a null value, so we don't specify that
// the field is required during decoding.
required = false
case field.Type.Kind() != reflect.Ptr && !optional:
required = true
default:
required = false
}
attrSchemas = append(attrSchemas, hcl.AttributeSchema{
Name: n,
Required: required,
})
}
blockNames := make([]string, 0, len(tags.Blocks))
for n := range tags.Blocks {
blockNames = append(blockNames, n)
}
sort.Strings(blockNames)
for _, n := range blockNames {
idx := tags.Blocks[n]
field := ty.Field(idx)
fty := field.Type
if fty.Kind() == reflect.Slice {
fty = fty.Elem()
}
if fty.Kind() == reflect.Ptr {
fty = fty.Elem()
}
if fty.Kind() != reflect.Struct {
panic(fmt.Sprintf(
"hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name,
))
}
ftags := getFieldTags(fty)
var labelNames []string
if len(ftags.Labels) > 0 {
labelNames = make([]string, len(ftags.Labels))
for i, l := range ftags.Labels {
labelNames[i] = l.Name
}
}
blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{
Type: n,
LabelNames: labelNames,
})
}
partial = tags.Remain != nil
schema = &hcl.BodySchema{
Attributes: attrSchemas,
Blocks: blockSchemas,
}
return schema, partial
}
type fieldTags struct {
Attributes map[string]int
Blocks map[string]int
Labels []labelField
Remain *int
Optional map[string]bool
}
type labelField struct {
FieldIndex int
Name string
}
func getFieldTags(ty reflect.Type) *fieldTags {
ret := &fieldTags{
Attributes: map[string]int{},
Blocks: map[string]int{},
Optional: map[string]bool{},
}
ct := ty.NumField()
for i := 0; i < ct; i++ {
field := ty.Field(i)
tag := field.Tag.Get("hcl")
if tag == "" {
continue
}
comma := strings.Index(tag, ",")
var name, kind string
if comma != -1 {
name = tag[:comma]
kind = tag[comma+1:]
} else {
name = tag
kind = "attr"
}
switch kind {
case "attr":
ret.Attributes[name] = i
case "block":
ret.Blocks[name] = i
case "label":
ret.Labels = append(ret.Labels, labelField{
FieldIndex: i,
Name: name,
})
case "remain":
if ret.Remain != nil {
panic("only one 'remain' tag is permitted")
}
idx := i // copy, because this loop will continue assigning to i
ret.Remain = &idx
case "optional":
ret.Attributes[name] = i
ret.Optional[name] = true
default:
panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name))
}
}
return ret
}

16
vendor/github.com/hashicorp/hcl2/gohcl/types.go generated vendored Normal file
View file

@ -0,0 +1,16 @@
package gohcl
import (
"reflect"
"github.com/hashicorp/hcl2/hcl"
)
var victimExpr hcl.Expression
var victimBody hcl.Body
var exprType = reflect.TypeOf(&victimExpr).Elem()
var bodyType = reflect.TypeOf(&victimBody).Elem()
var blockType = reflect.TypeOf((*hcl.Block)(nil))
var attrType = reflect.TypeOf((*hcl.Attribute)(nil))
var attrsType = reflect.TypeOf(hcl.Attributes(nil))

143
vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go generated vendored Normal file
View file

@ -0,0 +1,143 @@
package hcl
import (
"fmt"
)
// DiagnosticSeverity represents the severity of a diagnostic.
type DiagnosticSeverity int
const (
// DiagInvalid is the invalid zero value of DiagnosticSeverity
DiagInvalid DiagnosticSeverity = iota
// DiagError indicates that the problem reported by a diagnostic prevents
// further progress in parsing and/or evaluating the subject.
DiagError
// DiagWarning indicates that the problem reported by a diagnostic warrants
// user attention but does not prevent further progress. It is most
// commonly used for showing deprecation notices.
DiagWarning
)
// Diagnostic represents information to be presented to a user about an
// error or anomoly in parsing or evaluating configuration.
type Diagnostic struct {
Severity DiagnosticSeverity
// Summary and Detail contain the English-language description of the
// problem. Summary is a terse description of the general problem and
// detail is a more elaborate, often-multi-sentence description of
// the probem and what might be done to solve it.
Summary string
Detail string
// Subject and Context are both source ranges relating to the diagnostic.
//
// Subject is a tight range referring to exactly the construct that
// is problematic, while Context is an optional broader range (which should
// fully contain Subject) that ought to be shown around Subject when
// generating isolated source-code snippets in diagnostic messages.
// If Context is nil, the Subject is also the Context.
//
// Some diagnostics have no source ranges at all. If Context is set then
// Subject should always also be set.
Subject *Range
Context *Range
// For diagnostics that occur when evaluating an expression, Expression
// may refer to that expression and EvalContext may point to the
// EvalContext that was active when evaluating it. This may allow for the
// inclusion of additional useful information when rendering a diagnostic
// message to the user.
//
// It is not always possible to select a single EvalContext for a
// diagnostic, and so in some cases this field may be nil even when an
// expression causes a problem.
//
// EvalContexts form a tree, so the given EvalContext may refer to a parent
// which in turn refers to another parent, etc. For a full picture of all
// of the active variables and functions the caller must walk up this
// chain, preferring definitions that are "closer" to the expression in
// case of colliding names.
Expression Expression
EvalContext *EvalContext
}
// Diagnostics is a list of Diagnostic instances.
type Diagnostics []*Diagnostic
// error implementation, so that diagnostics can be returned via APIs
// that normally deal in vanilla Go errors.
//
// This presents only minimal context about the error, for compatibility
// with usual expectations about how errors will present as strings.
func (d *Diagnostic) Error() string {
return fmt.Sprintf("%s: %s; %s", d.Subject, d.Summary, d.Detail)
}
// error implementation, so that sets of diagnostics can be returned via
// APIs that normally deal in vanilla Go errors.
func (d Diagnostics) Error() string {
count := len(d)
switch {
case count == 0:
return "no diagnostics"
case count == 1:
return d[0].Error()
default:
return fmt.Sprintf("%s, and %d other diagnostic(s)", d[0].Error(), count-1)
}
}
// Append appends a new error to a Diagnostics and return the whole Diagnostics.
//
// This is provided as a convenience for returning from a function that
// collects and then returns a set of diagnostics:
//
// return nil, diags.Append(&hcl.Diagnostic{ ... })
//
// Note that this modifies the array underlying the diagnostics slice, so
// must be used carefully within a single codepath. It is incorrect (and rude)
// to extend a diagnostics created by a different subsystem.
func (d Diagnostics) Append(diag *Diagnostic) Diagnostics {
return append(d, diag)
}
// Extend concatenates the given Diagnostics with the receiver and returns
// the whole new Diagnostics.
//
// This is similar to Append but accepts multiple diagnostics to add. It has
// all the same caveats and constraints.
func (d Diagnostics) Extend(diags Diagnostics) Diagnostics {
return append(d, diags...)
}
// HasErrors returns true if the receiver contains any diagnostics of
// severity DiagError.
func (d Diagnostics) HasErrors() bool {
for _, diag := range d {
if diag.Severity == DiagError {
return true
}
}
return false
}
func (d Diagnostics) Errs() []error {
var errs []error
for _, diag := range d {
if diag.Severity == DiagError {
errs = append(errs, diag)
}
}
return errs
}
// A DiagnosticWriter emits diagnostics somehow.
type DiagnosticWriter interface {
WriteDiagnostic(*Diagnostic) error
WriteDiagnostics(Diagnostics) error
}

311
vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go generated vendored Normal file
View file

@ -0,0 +1,311 @@
package hcl
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"sort"
wordwrap "github.com/mitchellh/go-wordwrap"
"github.com/zclconf/go-cty/cty"
)
type diagnosticTextWriter struct {
files map[string]*File
wr io.Writer
width uint
color bool
}
// NewDiagnosticTextWriter creates a DiagnosticWriter that writes diagnostics
// to the given writer as formatted text.
//
// It is designed to produce text appropriate to print in a monospaced font
// in a terminal of a particular width, or optionally with no width limit.
//
// The given width may be zero to disable word-wrapping of the detail text
// and truncation of source code snippets.
//
// If color is set to true, the output will include VT100 escape sequences to
// color-code the severity indicators. It is suggested to turn this off if
// the target writer is not a terminal.
func NewDiagnosticTextWriter(wr io.Writer, files map[string]*File, width uint, color bool) DiagnosticWriter {
return &diagnosticTextWriter{
files: files,
wr: wr,
width: width,
color: color,
}
}
func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error {
if diag == nil {
return errors.New("nil diagnostic")
}
var colorCode, highlightCode, resetCode string
if w.color {
switch diag.Severity {
case DiagError:
colorCode = "\x1b[31m"
case DiagWarning:
colorCode = "\x1b[33m"
}
resetCode = "\x1b[0m"
highlightCode = "\x1b[1;4m"
}
var severityStr string
switch diag.Severity {
case DiagError:
severityStr = "Error"
case DiagWarning:
severityStr = "Warning"
default:
// should never happen
severityStr = "???????"
}
fmt.Fprintf(w.wr, "%s%s%s: %s\n\n", colorCode, severityStr, resetCode, diag.Summary)
if diag.Subject != nil {
snipRange := *diag.Subject
highlightRange := snipRange
if diag.Context != nil {
// Show enough of the source code to include both the subject
// and context ranges, which overlap in all reasonable
// situations.
snipRange = RangeOver(snipRange, *diag.Context)
}
// We can't illustrate an empty range, so we'll turn such ranges into
// single-character ranges, which might not be totally valid (may point
// off the end of a line, or off the end of the file) but are good
// enough for the bounds checks we do below.
if snipRange.Empty() {
snipRange.End.Byte++
snipRange.End.Column++
}
if highlightRange.Empty() {
highlightRange.End.Byte++
highlightRange.End.Column++
}
file := w.files[diag.Subject.Filename]
if file == nil || file.Bytes == nil {
fmt.Fprintf(w.wr, " on %s line %d:\n (source code not available)\n\n", diag.Subject.Filename, diag.Subject.Start.Line)
} else {
var contextLine string
if diag.Subject != nil {
contextLine = contextString(file, diag.Subject.Start.Byte)
if contextLine != "" {
contextLine = ", in " + contextLine
}
}
fmt.Fprintf(w.wr, " on %s line %d%s:\n", diag.Subject.Filename, diag.Subject.Start.Line, contextLine)
src := file.Bytes
sc := NewRangeScanner(src, diag.Subject.Filename, bufio.ScanLines)
for sc.Scan() {
lineRange := sc.Range()
if !lineRange.Overlaps(snipRange) {
continue
}
beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange)
if highlightedRange.Empty() {
fmt.Fprintf(w.wr, "%4d: %s\n", lineRange.Start.Line, sc.Bytes())
} else {
before := beforeRange.SliceBytes(src)
highlighted := highlightedRange.SliceBytes(src)
after := afterRange.SliceBytes(src)
fmt.Fprintf(
w.wr, "%4d: %s%s%s%s%s\n",
lineRange.Start.Line,
before,
highlightCode, highlighted, resetCode,
after,
)
}
}
w.wr.Write([]byte{'\n'})
}
if diag.Expression != nil && diag.EvalContext != nil {
// We will attempt to render the values for any variables
// referenced in the given expression as additional context, for
// situations where the same expression is evaluated multiple
// times in different scopes.
expr := diag.Expression
ctx := diag.EvalContext
vars := expr.Variables()
stmts := make([]string, 0, len(vars))
seen := make(map[string]struct{}, len(vars))
for _, traversal := range vars {
val, diags := traversal.TraverseAbs(ctx)
if diags.HasErrors() {
// Skip anything that generates errors, since we probably
// already have the same error in our diagnostics set
// already.
continue
}
traversalStr := w.traversalStr(traversal)
if _, exists := seen[traversalStr]; exists {
continue // don't show duplicates when the same variable is referenced multiple times
}
switch {
case !val.IsKnown():
// Can't say anything about this yet, then.
continue
case val.IsNull():
stmts = append(stmts, fmt.Sprintf("%s set to null", traversalStr))
default:
stmts = append(stmts, fmt.Sprintf("%s as %s", traversalStr, w.valueStr(val)))
}
seen[traversalStr] = struct{}{}
}
sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly?
last := len(stmts) - 1
for i, stmt := range stmts {
switch i {
case 0:
w.wr.Write([]byte{'w', 'i', 't', 'h', ' '})
default:
w.wr.Write([]byte{' ', ' ', ' ', ' ', ' '})
}
w.wr.Write([]byte(stmt))
switch i {
case last:
w.wr.Write([]byte{'.', '\n', '\n'})
default:
w.wr.Write([]byte{',', '\n'})
}
}
}
}
if diag.Detail != "" {
detail := diag.Detail
if w.width != 0 {
detail = wordwrap.WrapString(detail, w.width)
}
fmt.Fprintf(w.wr, "%s\n\n", detail)
}
return nil
}
func (w *diagnosticTextWriter) WriteDiagnostics(diags Diagnostics) error {
for _, diag := range diags {
err := w.WriteDiagnostic(diag)
if err != nil {
return err
}
}
return nil
}
func (w *diagnosticTextWriter) traversalStr(traversal Traversal) string {
// This is a specialized subset of traversal rendering tailored to
// producing helpful contextual messages in diagnostics. It is not
// comprehensive nor intended to be used for other purposes.
var buf bytes.Buffer
for _, step := range traversal {
switch tStep := step.(type) {
case TraverseRoot:
buf.WriteString(tStep.Name)
case TraverseAttr:
buf.WriteByte('.')
buf.WriteString(tStep.Name)
case TraverseIndex:
buf.WriteByte('[')
if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() {
buf.WriteString(w.valueStr(tStep.Key))
} else {
// We'll just use a placeholder for more complex values,
// since otherwise our result could grow ridiculously long.
buf.WriteString("...")
}
buf.WriteByte(']')
}
}
return buf.String()
}
func (w *diagnosticTextWriter) valueStr(val cty.Value) string {
// This is a specialized subset of value rendering tailored to producing
// helpful but concise messages in diagnostics. It is not comprehensive
// nor intended to be used for other purposes.
ty := val.Type()
switch {
case val.IsNull():
return "null"
case !val.IsKnown():
// Should never happen here because we should filter before we get
// in here, but we'll do something reasonable rather than panic.
return "(not yet known)"
case ty == cty.Bool:
if val.True() {
return "true"
}
return "false"
case ty == cty.Number:
bf := val.AsBigFloat()
return bf.Text('g', 10)
case ty == cty.String:
// Go string syntax is not exactly the same as HCL native string syntax,
// but we'll accept the minor edge-cases where this is different here
// for now, just to get something reasonable here.
return fmt.Sprintf("%q", val.AsString())
case ty.IsCollectionType() || ty.IsTupleType():
l := val.LengthInt()
switch l {
case 0:
return "empty " + ty.FriendlyName()
case 1:
return ty.FriendlyName() + " with 1 element"
default:
return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l)
}
case ty.IsObjectType():
atys := ty.AttributeTypes()
l := len(atys)
switch l {
case 0:
return "object with no attributes"
case 1:
var name string
for k := range atys {
name = k
}
return fmt.Sprintf("object with 1 attribute %q", name)
default:
return fmt.Sprintf("object with %d attributes", l)
}
default:
return ty.FriendlyName()
}
}
func contextString(file *File, offset int) string {
type contextStringer interface {
ContextString(offset int) string
}
if cser, ok := file.Nav.(contextStringer); ok {
return cser.ContextString(offset)
}
return ""
}

24
vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go generated vendored Normal file
View file

@ -0,0 +1,24 @@
package hcl
import (
"github.com/agext/levenshtein"
)
// nameSuggestion tries to find a name from the given slice of suggested names
// that is close to the given name and returns it if found. If no suggestion
// is close enough, returns the empty string.
//
// The suggestions are tried in order, so earlier suggestions take precedence
// if the given string is similar to two or more suggestions.
//
// This function is intended to be used with a relatively-small number of
// suggestions. It's not optimized for hundreds or thousands of them.
func nameSuggestion(given string, suggestions []string) string {
for _, suggestion := range suggestions {
dist := levenshtein.Distance(given, suggestion, nil)
if dist < 3 { // threshold determined experimentally
return suggestion
}
}
return ""
}

1
vendor/github.com/hashicorp/hcl2/hcl/doc.go generated vendored Normal file
View file

@ -0,0 +1 @@
package hcl

25
vendor/github.com/hashicorp/hcl2/hcl/eval_context.go generated vendored Normal file
View file

@ -0,0 +1,25 @@
package hcl
import (
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/function"
)
// An EvalContext provides the variables and functions that should be used
// to evaluate an expression.
type EvalContext struct {
Variables map[string]cty.Value
Functions map[string]function.Function
parent *EvalContext
}
// NewChild returns a new EvalContext that is a child of the receiver.
func (ctx *EvalContext) NewChild() *EvalContext {
return &EvalContext{parent: ctx}
}
// Parent returns the parent of the receiver, or nil if the receiver has
// no parent.
func (ctx *EvalContext) Parent() *EvalContext {
return ctx.parent
}

46
vendor/github.com/hashicorp/hcl2/hcl/expr_call.go generated vendored Normal file
View file

@ -0,0 +1,46 @@
package hcl
// ExprCall tests if the given expression is a function call and,
// if so, extracts the function name and the expressions that represent
// the arguments. If the given expression is not statically a function call,
// error diagnostics are returned.
//
// A particular Expression implementation can support this function by
// offering a method called ExprCall that takes no arguments and returns
// *StaticCall. This method should return nil if a static call cannot
// be extracted. Alternatively, an implementation can support
// UnwrapExpression to delegate handling of this function to a wrapped
// Expression object.
func ExprCall(expr Expression) (*StaticCall, Diagnostics) {
type exprCall interface {
ExprCall() *StaticCall
}
physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool {
_, supported := expr.(exprCall)
return supported
})
if exC, supported := physExpr.(exprCall); supported {
if call := exC.ExprCall(); call != nil {
return call, nil
}
}
return nil, Diagnostics{
&Diagnostic{
Severity: DiagError,
Summary: "Invalid expression",
Detail: "A static function call is required.",
Subject: expr.StartRange().Ptr(),
},
}
}
// StaticCall represents a function call that was extracted statically from
// an expression using ExprCall.
type StaticCall struct {
Name string
NameRange Range
Arguments []Expression
ArgsRange Range
}

37
vendor/github.com/hashicorp/hcl2/hcl/expr_list.go generated vendored Normal file
View file

@ -0,0 +1,37 @@
package hcl
// ExprList tests if the given expression is a static list construct and,
// if so, extracts the expressions that represent the list elements.
// If the given expression is not a static list, error diagnostics are
// returned.
//
// A particular Expression implementation can support this function by
// offering a method called ExprList that takes no arguments and returns
// []Expression. This method should return nil if a static list cannot
// be extracted. Alternatively, an implementation can support
// UnwrapExpression to delegate handling of this function to a wrapped
// Expression object.
func ExprList(expr Expression) ([]Expression, Diagnostics) {
type exprList interface {
ExprList() []Expression
}
physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool {
_, supported := expr.(exprList)
return supported
})
if exL, supported := physExpr.(exprList); supported {
if list := exL.ExprList(); list != nil {
return list, nil
}
}
return nil, Diagnostics{
&Diagnostic{
Severity: DiagError,
Summary: "Invalid expression",
Detail: "A static list expression is required.",
Subject: expr.StartRange().Ptr(),
},
}
}

44
vendor/github.com/hashicorp/hcl2/hcl/expr_map.go generated vendored Normal file
View file

@ -0,0 +1,44 @@
package hcl
// ExprMap tests if the given expression is a static map construct and,
// if so, extracts the expressions that represent the map elements.
// If the given expression is not a static map, error diagnostics are
// returned.
//
// A particular Expression implementation can support this function by
// offering a method called ExprMap that takes no arguments and returns
// []KeyValuePair. This method should return nil if a static map cannot
// be extracted. Alternatively, an implementation can support
// UnwrapExpression to delegate handling of this function to a wrapped
// Expression object.
func ExprMap(expr Expression) ([]KeyValuePair, Diagnostics) {
type exprMap interface {
ExprMap() []KeyValuePair
}
physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool {
_, supported := expr.(exprMap)
return supported
})
if exM, supported := physExpr.(exprMap); supported {
if pairs := exM.ExprMap(); pairs != nil {
return pairs, nil
}
}
return nil, Diagnostics{
&Diagnostic{
Severity: DiagError,
Summary: "Invalid expression",
Detail: "A static map expression is required.",
Subject: expr.StartRange().Ptr(),
},
}
}
// KeyValuePair represents a pair of expressions that serve as a single item
// within a map or object definition construct.
type KeyValuePair struct {
Key Expression
Value Expression
}

68
vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go generated vendored Normal file
View file

@ -0,0 +1,68 @@
package hcl
type unwrapExpression interface {
UnwrapExpression() Expression
}
// UnwrapExpression removes any "wrapper" expressions from the given expression,
// to recover the representation of the physical expression given in source
// code.
//
// Sometimes wrapping expressions are used to modify expression behavior, e.g.
// in extensions that need to make some local variables available to certain
// sub-trees of the configuration. This can make it difficult to reliably
// type-assert on the physical AST types used by the underlying syntax.
//
// Unwrapping an expression may modify its behavior by stripping away any
// additional constraints or capabilities being applied to the Value and
// Variables methods, so this function should generally only be used prior
// to operations that concern themselves with the static syntax of the input
// configuration, and not with the effective value of the expression.
//
// Wrapper expression types must support unwrapping by implementing a method
// called UnwrapExpression that takes no arguments and returns the embedded
// Expression. Implementations of this method should peel away only one level
// of wrapping, if multiple are present. This method may return nil to
// indicate _dynamically_ that no wrapped expression is available, for
// expression types that might only behave as wrappers in certain cases.
func UnwrapExpression(expr Expression) Expression {
for {
unwrap, wrapped := expr.(unwrapExpression)
if !wrapped {
return expr
}
innerExpr := unwrap.UnwrapExpression()
if innerExpr == nil {
return expr
}
expr = innerExpr
}
}
// UnwrapExpressionUntil is similar to UnwrapExpression except it gives the
// caller an opportunity to test each level of unwrapping to see each a
// particular expression is accepted.
//
// This could be used, for example, to unwrap until a particular other
// interface is satisfied, regardless of wrap wrapping level it is satisfied
// at.
//
// The given callback function must return false to continue wrapping, or
// true to accept and return the proposed expression given. If the callback
// function rejects even the final, physical expression then the result of
// this function is nil.
func UnwrapExpressionUntil(expr Expression, until func(Expression) bool) Expression {
for {
if until(expr) {
return expr
}
unwrap, wrapped := expr.(unwrapExpression)
if !wrapped {
return nil
}
expr = unwrap.UnwrapExpression()
if expr == nil {
return nil
}
}
}

View file

@ -0,0 +1,23 @@
package hclsyntax
import (
"github.com/hashicorp/hcl2/hcl"
)
// setDiagEvalContext is an internal helper that will impose a particular
// EvalContext on a set of diagnostics in-place, for any diagnostic that
// does not already have an EvalContext set.
//
// We generally expect diagnostics to be immutable, but this is safe to use
// on any Diagnostics where none of the contained Diagnostic objects have yet
// been seen by a caller. Its purpose is to apply additional context to a
// set of diagnostics produced by a "deeper" component as the stack unwinds
// during expression evaluation.
func setDiagEvalContext(diags hcl.Diagnostics, expr hcl.Expression, ctx *hcl.EvalContext) {
for _, diag := range diags {
if diag.Expression == nil {
diag.Expression = expr
diag.EvalContext = ctx
}
}
}

View file

@ -0,0 +1,24 @@
package hclsyntax
import (
"github.com/agext/levenshtein"
)
// nameSuggestion tries to find a name from the given slice of suggested names
// that is close to the given name and returns it if found. If no suggestion
// is close enough, returns the empty string.
//
// The suggestions are tried in order, so earlier suggestions take precedence
// if the given string is similar to two or more suggestions.
//
// This function is intended to be used with a relatively-small number of
// suggestions. It's not optimized for hundreds or thousands of them.
func nameSuggestion(given string, suggestions []string) string {
for _, suggestion := range suggestions {
dist := levenshtein.Distance(given, suggestion, nil)
if dist < 3 { // threshold determined experimentally
return suggestion
}
}
return ""
}

View file

@ -0,0 +1,7 @@
// Package hclsyntax contains the parser, AST, etc for HCL's native language,
// as opposed to the JSON variant.
//
// In normal use applications should rarely depend on this package directly,
// instead preferring the higher-level interface of the main hcl package and
// its companion package hclparse.
package hclsyntax

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,268 @@
package hclsyntax
import (
"fmt"
"github.com/hashicorp/hcl2/hcl"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/convert"
"github.com/zclconf/go-cty/cty/function"
"github.com/zclconf/go-cty/cty/function/stdlib"
)
type Operation struct {
Impl function.Function
Type cty.Type
}
var (
OpLogicalOr = &Operation{
Impl: stdlib.OrFunc,
Type: cty.Bool,
}
OpLogicalAnd = &Operation{
Impl: stdlib.AndFunc,
Type: cty.Bool,
}
OpLogicalNot = &Operation{
Impl: stdlib.NotFunc,
Type: cty.Bool,
}
OpEqual = &Operation{
Impl: stdlib.EqualFunc,
Type: cty.Bool,
}
OpNotEqual = &Operation{
Impl: stdlib.NotEqualFunc,
Type: cty.Bool,
}
OpGreaterThan = &Operation{
Impl: stdlib.GreaterThanFunc,
Type: cty.Bool,
}
OpGreaterThanOrEqual = &Operation{
Impl: stdlib.GreaterThanOrEqualToFunc,
Type: cty.Bool,
}
OpLessThan = &Operation{
Impl: stdlib.LessThanFunc,
Type: cty.Bool,
}
OpLessThanOrEqual = &Operation{
Impl: stdlib.LessThanOrEqualToFunc,
Type: cty.Bool,
}
OpAdd = &Operation{
Impl: stdlib.AddFunc,
Type: cty.Number,
}
OpSubtract = &Operation{
Impl: stdlib.SubtractFunc,
Type: cty.Number,
}
OpMultiply = &Operation{
Impl: stdlib.MultiplyFunc,
Type: cty.Number,
}
OpDivide = &Operation{
Impl: stdlib.DivideFunc,
Type: cty.Number,
}
OpModulo = &Operation{
Impl: stdlib.ModuloFunc,
Type: cty.Number,
}
OpNegate = &Operation{
Impl: stdlib.NegateFunc,
Type: cty.Number,
}
)
var binaryOps []map[TokenType]*Operation
func init() {
// This operation table maps from the operator's token type
// to the AST operation type. All expressions produced from
// binary operators are BinaryOp nodes.
//
// Binary operator groups are listed in order of precedence, with
// the *lowest* precedence first. Operators within the same group
// have left-to-right associativity.
binaryOps = []map[TokenType]*Operation{
{
TokenOr: OpLogicalOr,
},
{
TokenAnd: OpLogicalAnd,
},
{
TokenEqualOp: OpEqual,
TokenNotEqual: OpNotEqual,
},
{
TokenGreaterThan: OpGreaterThan,
TokenGreaterThanEq: OpGreaterThanOrEqual,
TokenLessThan: OpLessThan,
TokenLessThanEq: OpLessThanOrEqual,
},
{
TokenPlus: OpAdd,
TokenMinus: OpSubtract,
},
{
TokenStar: OpMultiply,
TokenSlash: OpDivide,
TokenPercent: OpModulo,
},
}
}
type BinaryOpExpr struct {
LHS Expression
Op *Operation
RHS Expression
SrcRange hcl.Range
}
func (e *BinaryOpExpr) walkChildNodes(w internalWalkFunc) {
e.LHS = w(e.LHS).(Expression)
e.RHS = w(e.RHS).(Expression)
}
func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
impl := e.Op.Impl // assumed to be a function taking exactly two arguments
params := impl.Params()
lhsParam := params[0]
rhsParam := params[1]
var diags hcl.Diagnostics
givenLHSVal, lhsDiags := e.LHS.Value(ctx)
givenRHSVal, rhsDiags := e.RHS.Value(ctx)
diags = append(diags, lhsDiags...)
diags = append(diags, rhsDiags...)
lhsVal, err := convert.Convert(givenLHSVal, lhsParam.Type)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid operand",
Detail: fmt.Sprintf("Unsuitable value for left operand: %s.", err),
Subject: e.LHS.Range().Ptr(),
Context: &e.SrcRange,
Expression: e.LHS,
EvalContext: ctx,
})
}
rhsVal, err := convert.Convert(givenRHSVal, rhsParam.Type)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid operand",
Detail: fmt.Sprintf("Unsuitable value for right operand: %s.", err),
Subject: e.RHS.Range().Ptr(),
Context: &e.SrcRange,
Expression: e.RHS,
EvalContext: ctx,
})
}
if diags.HasErrors() {
// Don't actually try the call if we have errors already, since the
// this will probably just produce a confusing duplicative diagnostic.
return cty.UnknownVal(e.Op.Type), diags
}
args := []cty.Value{lhsVal, rhsVal}
result, err := impl.Call(args)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
// FIXME: This diagnostic is useless.
Severity: hcl.DiagError,
Summary: "Operation failed",
Detail: fmt.Sprintf("Error during operation: %s.", err),
Subject: &e.SrcRange,
Expression: e,
EvalContext: ctx,
})
return cty.UnknownVal(e.Op.Type), diags
}
return result, diags
}
func (e *BinaryOpExpr) Range() hcl.Range {
return e.SrcRange
}
func (e *BinaryOpExpr) StartRange() hcl.Range {
return e.LHS.StartRange()
}
type UnaryOpExpr struct {
Op *Operation
Val Expression
SrcRange hcl.Range
SymbolRange hcl.Range
}
func (e *UnaryOpExpr) walkChildNodes(w internalWalkFunc) {
e.Val = w(e.Val).(Expression)
}
func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
impl := e.Op.Impl // assumed to be a function taking exactly one argument
params := impl.Params()
param := params[0]
givenVal, diags := e.Val.Value(ctx)
val, err := convert.Convert(givenVal, param.Type)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid operand",
Detail: fmt.Sprintf("Unsuitable value for unary operand: %s.", err),
Subject: e.Val.Range().Ptr(),
Context: &e.SrcRange,
Expression: e.Val,
EvalContext: ctx,
})
}
if diags.HasErrors() {
// Don't actually try the call if we have errors already, since the
// this will probably just produce a confusing duplicative diagnostic.
return cty.UnknownVal(e.Op.Type), diags
}
args := []cty.Value{val}
result, err := impl.Call(args)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
// FIXME: This diagnostic is useless.
Severity: hcl.DiagError,
Summary: "Operation failed",
Detail: fmt.Sprintf("Error during operation: %s.", err),
Subject: &e.SrcRange,
Expression: e,
EvalContext: ctx,
})
return cty.UnknownVal(e.Op.Type), diags
}
return result, diags
}
func (e *UnaryOpExpr) Range() hcl.Range {
return e.SrcRange
}
func (e *UnaryOpExpr) StartRange() hcl.Range {
return e.SymbolRange
}

View file

@ -0,0 +1,200 @@
package hclsyntax
import (
"bytes"
"fmt"
"github.com/hashicorp/hcl2/hcl"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/convert"
)
type TemplateExpr struct {
Parts []Expression
SrcRange hcl.Range
}
func (e *TemplateExpr) walkChildNodes(w internalWalkFunc) {
for i, part := range e.Parts {
e.Parts[i] = w(part).(Expression)
}
}
func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
buf := &bytes.Buffer{}
var diags hcl.Diagnostics
isKnown := true
for _, part := range e.Parts {
partVal, partDiags := part.Value(ctx)
diags = append(diags, partDiags...)
if partVal.IsNull() {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid template interpolation value",
Detail: fmt.Sprintf(
"The expression result is null. Cannot include a null value in a string template.",
),
Subject: part.Range().Ptr(),
Context: &e.SrcRange,
Expression: part,
EvalContext: ctx,
})
continue
}
if !partVal.IsKnown() {
// If any part is unknown then the result as a whole must be
// unknown too. We'll keep on processing the rest of the parts
// anyway, because we want to still emit any diagnostics resulting
// from evaluating those.
isKnown = false
continue
}
strVal, err := convert.Convert(partVal, cty.String)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid template interpolation value",
Detail: fmt.Sprintf(
"Cannot include the given value in a string template: %s.",
err.Error(),
),
Subject: part.Range().Ptr(),
Context: &e.SrcRange,
Expression: part,
EvalContext: ctx,
})
continue
}
buf.WriteString(strVal.AsString())
}
if !isKnown {
return cty.UnknownVal(cty.String), diags
}
return cty.StringVal(buf.String()), diags
}
func (e *TemplateExpr) Range() hcl.Range {
return e.SrcRange
}
func (e *TemplateExpr) StartRange() hcl.Range {
return e.Parts[0].StartRange()
}
// TemplateJoinExpr is used to convert tuples of strings produced by template
// constructs (i.e. for loops) into flat strings, by converting the values
// tos strings and joining them. This AST node is not used directly; it's
// produced as part of the AST of a "for" loop in a template.
type TemplateJoinExpr struct {
Tuple Expression
}
func (e *TemplateJoinExpr) walkChildNodes(w internalWalkFunc) {
e.Tuple = w(e.Tuple).(Expression)
}
func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
tuple, diags := e.Tuple.Value(ctx)
if tuple.IsNull() {
// This indicates a bug in the code that constructed the AST.
panic("TemplateJoinExpr got null tuple")
}
if tuple.Type() == cty.DynamicPseudoType {
return cty.UnknownVal(cty.String), diags
}
if !tuple.Type().IsTupleType() {
// This indicates a bug in the code that constructed the AST.
panic("TemplateJoinExpr got non-tuple tuple")
}
if !tuple.IsKnown() {
return cty.UnknownVal(cty.String), diags
}
buf := &bytes.Buffer{}
it := tuple.ElementIterator()
for it.Next() {
_, val := it.Element()
if val.IsNull() {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid template interpolation value",
Detail: fmt.Sprintf(
"An iteration result is null. Cannot include a null value in a string template.",
),
Subject: e.Range().Ptr(),
Expression: e,
EvalContext: ctx,
})
continue
}
if val.Type() == cty.DynamicPseudoType {
return cty.UnknownVal(cty.String), diags
}
strVal, err := convert.Convert(val, cty.String)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid template interpolation value",
Detail: fmt.Sprintf(
"Cannot include one of the interpolation results into the string template: %s.",
err.Error(),
),
Subject: e.Range().Ptr(),
Expression: e,
EvalContext: ctx,
})
continue
}
if !val.IsKnown() {
return cty.UnknownVal(cty.String), diags
}
buf.WriteString(strVal.AsString())
}
return cty.StringVal(buf.String()), diags
}
func (e *TemplateJoinExpr) Range() hcl.Range {
return e.Tuple.Range()
}
func (e *TemplateJoinExpr) StartRange() hcl.Range {
return e.Tuple.StartRange()
}
// TemplateWrapExpr is used instead of a TemplateExpr when a template
// consists _only_ of a single interpolation sequence. In that case, the
// template's result is the single interpolation's result, verbatim with
// no type conversions.
type TemplateWrapExpr struct {
Wrapped Expression
SrcRange hcl.Range
}
func (e *TemplateWrapExpr) walkChildNodes(w internalWalkFunc) {
e.Wrapped = w(e.Wrapped).(Expression)
}
func (e *TemplateWrapExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
return e.Wrapped.Value(ctx)
}
func (e *TemplateWrapExpr) Range() hcl.Range {
return e.SrcRange
}
func (e *TemplateWrapExpr) StartRange() hcl.Range {
return e.SrcRange
}

View file

@ -0,0 +1,76 @@
package hclsyntax
// Generated by expression_vars_get.go. DO NOT EDIT.
// Run 'go generate' on this package to update the set of functions here.
import (
"github.com/hashicorp/hcl2/hcl"
)
func (e *AnonSymbolExpr) Variables() []hcl.Traversal {
return Variables(e)
}
func (e *BinaryOpExpr) Variables() []hcl.Traversal {
return Variables(e)
}
func (e *ConditionalExpr) Variables() []hcl.Traversal {
return Variables(e)
}
func (e *ForExpr) Variables() []hcl.Traversal {
return Variables(e)
}
func (e *FunctionCallExpr) Variables() []hcl.Traversal {
return Variables(e)
}
func (e *IndexExpr) Variables() []hcl.Traversal {
return Variables(e)
}
func (e *LiteralValueExpr) Variables() []hcl.Traversal {
return Variables(e)
}
func (e *ObjectConsExpr) Variables() []hcl.Traversal {
return Variables(e)
}
func (e *ObjectConsKeyExpr) Variables() []hcl.Traversal {
return Variables(e)
}
func (e *RelativeTraversalExpr) Variables() []hcl.Traversal {
return Variables(e)
}
func (e *ScopeTraversalExpr) Variables() []hcl.Traversal {
return Variables(e)
}
func (e *SplatExpr) Variables() []hcl.Traversal {
return Variables(e)
}
func (e *TemplateExpr) Variables() []hcl.Traversal {
return Variables(e)
}
func (e *TemplateJoinExpr) Variables() []hcl.Traversal {
return Variables(e)
}
func (e *TemplateWrapExpr) Variables() []hcl.Traversal {
return Variables(e)
}
func (e *TupleConsExpr) Variables() []hcl.Traversal {
return Variables(e)
}
func (e *UnaryOpExpr) Variables() []hcl.Traversal {
return Variables(e)
}

20
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
package hclsyntax
import (
"github.com/hashicorp/hcl2/hcl"
)
// File is the top-level object resulting from parsing a configuration file.
type File struct {
Body *Body
Bytes []byte
}
func (f *File) AsHCLFile() *hcl.File {
return &hcl.File{
Body: f.Body,
Bytes: f.Bytes,
// TODO: The Nav object, once we have an implementation of it
}
}

View file

@ -0,0 +1,9 @@
package hclsyntax
//go:generate go run expression_vars_gen.go
//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/DerivedCoreProperties.txt -m UnicodeDerived -p ID_Start,ID_Continue -o unicode_derived.rl
//go:generate ragel -Z scan_tokens.rl
//go:generate gofmt -w scan_tokens.go
//go:generate ragel -Z scan_string_lit.rl
//go:generate gofmt -w scan_string_lit.go
//go:generate stringer -type TokenType -output token_type_string.go

View file

@ -0,0 +1,21 @@
package hclsyntax
import (
"bytes"
)
type Keyword []byte
var forKeyword = Keyword([]byte{'f', 'o', 'r'})
var inKeyword = Keyword([]byte{'i', 'n'})
var ifKeyword = Keyword([]byte{'i', 'f'})
var elseKeyword = Keyword([]byte{'e', 'l', 's', 'e'})
var endifKeyword = Keyword([]byte{'e', 'n', 'd', 'i', 'f'})
var endforKeyword = Keyword([]byte{'e', 'n', 'd', 'f', 'o', 'r'})
func (kw Keyword) TokenMatches(token Token) bool {
if token.Type != TokenIdent {
return false
}
return bytes.Equal([]byte(kw), token.Bytes)
}

View file

@ -0,0 +1,41 @@
package hclsyntax
import (
"bytes"
"fmt"
)
type navigation struct {
root *Body
}
// Implementation of hcled.ContextString
func (n navigation) ContextString(offset int) string {
// We will walk our top-level blocks until we find one that contains
// the given offset, and then construct a representation of the header
// of the block.
var block *Block
for _, candidate := range n.root.Blocks {
if candidate.Range().ContainsOffset(offset) {
block = candidate
break
}
}
if block == nil {
return ""
}
if len(block.Labels) == 0 {
// Easy case!
return block.Type
}
buf := &bytes.Buffer{}
buf.WriteString(block.Type)
for _, label := range block.Labels {
fmt.Fprintf(buf, " %q", label)
}
return buf.String()
}

22
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go generated vendored Normal file
View file

@ -0,0 +1,22 @@
package hclsyntax
import (
"github.com/hashicorp/hcl2/hcl"
)
// Node is the abstract type that every AST node implements.
//
// This is a closed interface, so it cannot be implemented from outside of
// this package.
type Node interface {
// This is the mechanism by which the public-facing walk functions
// are implemented. Implementations should call the given function
// for each child node and then replace that node with its return value.
// The return value might just be the same node, for non-transforming
// walks.
walkChildNodes(w internalWalkFunc)
Range() hcl.Range
}
type internalWalkFunc func(Node) Node

1836
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,728 @@
package hclsyntax
import (
"fmt"
"strings"
"unicode"
"github.com/hashicorp/hcl2/hcl"
"github.com/zclconf/go-cty/cty"
)
func (p *parser) ParseTemplate() (Expression, hcl.Diagnostics) {
return p.parseTemplate(TokenEOF)
}
func (p *parser) parseTemplate(end TokenType) (Expression, hcl.Diagnostics) {
exprs, passthru, rng, diags := p.parseTemplateInner(end)
if passthru {
if len(exprs) != 1 {
panic("passthru set with len(exprs) != 1")
}
return &TemplateWrapExpr{
Wrapped: exprs[0],
SrcRange: rng,
}, diags
}
return &TemplateExpr{
Parts: exprs,
SrcRange: rng,
}, diags
}
func (p *parser) parseTemplateInner(end TokenType) ([]Expression, bool, hcl.Range, hcl.Diagnostics) {
parts, diags := p.parseTemplateParts(end)
tp := templateParser{
Tokens: parts.Tokens,
SrcRange: parts.SrcRange,
}
exprs, exprsDiags := tp.parseRoot()
diags = append(diags, exprsDiags...)
passthru := false
if len(parts.Tokens) == 2 { // one real token and one synthetic "end" token
if _, isInterp := parts.Tokens[0].(*templateInterpToken); isInterp {
passthru = true
}
}
return exprs, passthru, parts.SrcRange, diags
}
type templateParser struct {
Tokens []templateToken
SrcRange hcl.Range
pos int
}
func (p *templateParser) parseRoot() ([]Expression, hcl.Diagnostics) {
var exprs []Expression
var diags hcl.Diagnostics
for {
next := p.Peek()
if _, isEnd := next.(*templateEndToken); isEnd {
break
}
expr, exprDiags := p.parseExpr()
diags = append(diags, exprDiags...)
exprs = append(exprs, expr)
}
return exprs, diags
}
func (p *templateParser) parseExpr() (Expression, hcl.Diagnostics) {
next := p.Peek()
switch tok := next.(type) {
case *templateLiteralToken:
p.Read() // eat literal
return &LiteralValueExpr{
Val: cty.StringVal(tok.Val),
SrcRange: tok.SrcRange,
}, nil
case *templateInterpToken:
p.Read() // eat interp
return tok.Expr, nil
case *templateIfToken:
return p.parseIf()
case *templateForToken:
return p.parseFor()
case *templateEndToken:
p.Read() // eat erroneous token
return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{
{
// This is a particularly unhelpful diagnostic, so callers
// should attempt to pre-empt it and produce a more helpful
// diagnostic that is context-aware.
Severity: hcl.DiagError,
Summary: "Unexpected end of template",
Detail: "The control directives within this template are unbalanced.",
Subject: &tok.SrcRange,
},
}
case *templateEndCtrlToken:
p.Read() // eat erroneous token
return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Unexpected %s directive", tok.Name()),
Detail: "The control directives within this template are unbalanced.",
Subject: &tok.SrcRange,
},
}
default:
// should never happen, because above should be exhaustive
panic(fmt.Sprintf("unhandled template token type %T", next))
}
}
func (p *templateParser) parseIf() (Expression, hcl.Diagnostics) {
open := p.Read()
openIf, isIf := open.(*templateIfToken)
if !isIf {
// should never happen if caller is behaving
panic("parseIf called with peeker not pointing at if token")
}
var ifExprs, elseExprs []Expression
var diags hcl.Diagnostics
var endifRange hcl.Range
currentExprs := &ifExprs
Token:
for {
next := p.Peek()
if end, isEnd := next.(*templateEndToken); isEnd {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unexpected end of template",
Detail: fmt.Sprintf(
"The if directive at %s is missing its corresponding endif directive.",
openIf.SrcRange,
),
Subject: &end.SrcRange,
})
return errPlaceholderExpr(end.SrcRange), diags
}
if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd {
p.Read() // eat end directive
switch end.Type {
case templateElse:
if currentExprs == &ifExprs {
currentExprs = &elseExprs
continue Token
}
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unexpected else directive",
Detail: fmt.Sprintf(
"Already in the else clause for the if started at %s.",
openIf.SrcRange,
),
Subject: &end.SrcRange,
})
case templateEndIf:
endifRange = end.SrcRange
break Token
default:
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Unexpected %s directive", end.Name()),
Detail: fmt.Sprintf(
"Expecting an endif directive for the if started at %s.",
openIf.SrcRange,
),
Subject: &end.SrcRange,
})
}
return errPlaceholderExpr(end.SrcRange), diags
}
expr, exprDiags := p.parseExpr()
diags = append(diags, exprDiags...)
*currentExprs = append(*currentExprs, expr)
}
if len(ifExprs) == 0 {
ifExprs = append(ifExprs, &LiteralValueExpr{
Val: cty.StringVal(""),
SrcRange: hcl.Range{
Filename: openIf.SrcRange.Filename,
Start: openIf.SrcRange.End,
End: openIf.SrcRange.End,
},
})
}
if len(elseExprs) == 0 {
elseExprs = append(elseExprs, &LiteralValueExpr{
Val: cty.StringVal(""),
SrcRange: hcl.Range{
Filename: endifRange.Filename,
Start: endifRange.Start,
End: endifRange.Start,
},
})
}
trueExpr := &TemplateExpr{
Parts: ifExprs,
SrcRange: hcl.RangeBetween(ifExprs[0].Range(), ifExprs[len(ifExprs)-1].Range()),
}
falseExpr := &TemplateExpr{
Parts: elseExprs,
SrcRange: hcl.RangeBetween(elseExprs[0].Range(), elseExprs[len(elseExprs)-1].Range()),
}
return &ConditionalExpr{
Condition: openIf.CondExpr,
TrueResult: trueExpr,
FalseResult: falseExpr,
SrcRange: hcl.RangeBetween(openIf.SrcRange, endifRange),
}, diags
}
func (p *templateParser) parseFor() (Expression, hcl.Diagnostics) {
open := p.Read()
openFor, isFor := open.(*templateForToken)
if !isFor {
// should never happen if caller is behaving
panic("parseFor called with peeker not pointing at for token")
}
var contentExprs []Expression
var diags hcl.Diagnostics
var endforRange hcl.Range
Token:
for {
next := p.Peek()
if end, isEnd := next.(*templateEndToken); isEnd {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unexpected end of template",
Detail: fmt.Sprintf(
"The for directive at %s is missing its corresponding endfor directive.",
openFor.SrcRange,
),
Subject: &end.SrcRange,
})
return errPlaceholderExpr(end.SrcRange), diags
}
if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd {
p.Read() // eat end directive
switch end.Type {
case templateElse:
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unexpected else directive",
Detail: "An else clause is not expected for a for directive.",
Subject: &end.SrcRange,
})
case templateEndFor:
endforRange = end.SrcRange
break Token
default:
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Unexpected %s directive", end.Name()),
Detail: fmt.Sprintf(
"Expecting an endfor directive corresponding to the for directive at %s.",
openFor.SrcRange,
),
Subject: &end.SrcRange,
})
}
return errPlaceholderExpr(end.SrcRange), diags
}
expr, exprDiags := p.parseExpr()
diags = append(diags, exprDiags...)
contentExprs = append(contentExprs, expr)
}
if len(contentExprs) == 0 {
contentExprs = append(contentExprs, &LiteralValueExpr{
Val: cty.StringVal(""),
SrcRange: hcl.Range{
Filename: openFor.SrcRange.Filename,
Start: openFor.SrcRange.End,
End: openFor.SrcRange.End,
},
})
}
contentExpr := &TemplateExpr{
Parts: contentExprs,
SrcRange: hcl.RangeBetween(contentExprs[0].Range(), contentExprs[len(contentExprs)-1].Range()),
}
forExpr := &ForExpr{
KeyVar: openFor.KeyVar,
ValVar: openFor.ValVar,
CollExpr: openFor.CollExpr,
ValExpr: contentExpr,
SrcRange: hcl.RangeBetween(openFor.SrcRange, endforRange),
OpenRange: openFor.SrcRange,
CloseRange: endforRange,
}
return &TemplateJoinExpr{
Tuple: forExpr,
}, diags
}
func (p *templateParser) Peek() templateToken {
return p.Tokens[p.pos]
}
func (p *templateParser) Read() templateToken {
ret := p.Peek()
if _, end := ret.(*templateEndToken); !end {
p.pos++
}
return ret
}
// parseTemplateParts produces a flat sequence of "template tokens", which are
// either literal values (with any "trimming" already applied), interpolation
// sequences, or control flow markers.
//
// A further pass is required on the result to turn it into an AST.
func (p *parser) parseTemplateParts(end TokenType) (*templateParts, hcl.Diagnostics) {
var parts []templateToken
var diags hcl.Diagnostics
startRange := p.NextRange()
ltrimNext := false
nextCanTrimPrev := false
var endRange hcl.Range
Token:
for {
next := p.Read()
if next.Type == end {
// all done!
endRange = next.Range
break
}
ltrim := ltrimNext
ltrimNext = false
canTrimPrev := nextCanTrimPrev
nextCanTrimPrev = false
switch next.Type {
case TokenStringLit, TokenQuotedLit:
str, strDiags := p.decodeStringLit(next)
diags = append(diags, strDiags...)
if ltrim {
str = strings.TrimLeftFunc(str, unicode.IsSpace)
}
parts = append(parts, &templateLiteralToken{
Val: str,
SrcRange: next.Range,
})
nextCanTrimPrev = true
case TokenTemplateInterp:
// if the opener is ${~ then we want to eat any trailing whitespace
// in the preceding literal token, assuming it is indeed a literal
// token.
if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 {
prevExpr := parts[len(parts)-1]
if lexpr, ok := prevExpr.(*templateLiteralToken); ok {
lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace)
}
}
p.PushIncludeNewlines(false)
expr, exprDiags := p.ParseExpression()
diags = append(diags, exprDiags...)
close := p.Peek()
if close.Type != TokenTemplateSeqEnd {
if !p.recovery {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Extra characters after interpolation expression",
Detail: "Expected a closing brace to end the interpolation expression, but found extra characters.",
Subject: &close.Range,
Context: hcl.RangeBetween(startRange, close.Range).Ptr(),
})
}
p.recover(TokenTemplateSeqEnd)
} else {
p.Read() // eat closing brace
// If the closer is ~} then we want to eat any leading
// whitespace on the next token, if it turns out to be a
// literal token.
if len(close.Bytes) == 2 && close.Bytes[0] == '~' {
ltrimNext = true
}
}
p.PopIncludeNewlines()
parts = append(parts, &templateInterpToken{
Expr: expr,
SrcRange: hcl.RangeBetween(next.Range, close.Range),
})
case TokenTemplateControl:
// if the opener is %{~ then we want to eat any trailing whitespace
// in the preceding literal token, assuming it is indeed a literal
// token.
if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 {
prevExpr := parts[len(parts)-1]
if lexpr, ok := prevExpr.(*templateLiteralToken); ok {
lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace)
}
}
p.PushIncludeNewlines(false)
kw := p.Peek()
if kw.Type != TokenIdent {
if !p.recovery {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid template directive",
Detail: "A template directive keyword (\"if\", \"for\", etc) is expected at the beginning of a %{ sequence.",
Subject: &kw.Range,
Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(),
})
}
p.recover(TokenTemplateSeqEnd)
p.PopIncludeNewlines()
continue Token
}
p.Read() // eat keyword token
switch {
case ifKeyword.TokenMatches(kw):
condExpr, exprDiags := p.ParseExpression()
diags = append(diags, exprDiags...)
parts = append(parts, &templateIfToken{
CondExpr: condExpr,
SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
})
case elseKeyword.TokenMatches(kw):
parts = append(parts, &templateEndCtrlToken{
Type: templateElse,
SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
})
case endifKeyword.TokenMatches(kw):
parts = append(parts, &templateEndCtrlToken{
Type: templateEndIf,
SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
})
case forKeyword.TokenMatches(kw):
var keyName, valName string
if p.Peek().Type != TokenIdent {
if !p.recovery {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid 'for' directive",
Detail: "For directive requires variable name after 'for'.",
Subject: p.Peek().Range.Ptr(),
})
}
p.recover(TokenTemplateSeqEnd)
p.PopIncludeNewlines()
continue Token
}
valName = string(p.Read().Bytes)
if p.Peek().Type == TokenComma {
// What we just read was actually the key, then.
keyName = valName
p.Read() // eat comma
if p.Peek().Type != TokenIdent {
if !p.recovery {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid 'for' directive",
Detail: "For directive requires value variable name after comma.",
Subject: p.Peek().Range.Ptr(),
})
}
p.recover(TokenTemplateSeqEnd)
p.PopIncludeNewlines()
continue Token
}
valName = string(p.Read().Bytes)
}
if !inKeyword.TokenMatches(p.Peek()) {
if !p.recovery {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid 'for' directive",
Detail: "For directive requires 'in' keyword after names.",
Subject: p.Peek().Range.Ptr(),
})
}
p.recover(TokenTemplateSeqEnd)
p.PopIncludeNewlines()
continue Token
}
p.Read() // eat 'in' keyword
collExpr, collDiags := p.ParseExpression()
diags = append(diags, collDiags...)
parts = append(parts, &templateForToken{
KeyVar: keyName,
ValVar: valName,
CollExpr: collExpr,
SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
})
case endforKeyword.TokenMatches(kw):
parts = append(parts, &templateEndCtrlToken{
Type: templateEndFor,
SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
})
default:
if !p.recovery {
suggestions := []string{"if", "for", "else", "endif", "endfor"}
given := string(kw.Bytes)
suggestion := nameSuggestion(given, suggestions)
if suggestion != "" {
suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
}
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid template control keyword",
Detail: fmt.Sprintf("%q is not a valid template control keyword.%s", given, suggestion),
Subject: &kw.Range,
Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(),
})
}
p.recover(TokenTemplateSeqEnd)
p.PopIncludeNewlines()
continue Token
}
close := p.Peek()
if close.Type != TokenTemplateSeqEnd {
if !p.recovery {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Extra characters in %s marker", kw.Bytes),
Detail: "Expected a closing brace to end the sequence, but found extra characters.",
Subject: &close.Range,
Context: hcl.RangeBetween(startRange, close.Range).Ptr(),
})
}
p.recover(TokenTemplateSeqEnd)
} else {
p.Read() // eat closing brace
// If the closer is ~} then we want to eat any leading
// whitespace on the next token, if it turns out to be a
// literal token.
if len(close.Bytes) == 2 && close.Bytes[0] == '~' {
ltrimNext = true
}
}
p.PopIncludeNewlines()
default:
if !p.recovery {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unterminated template string",
Detail: "No closing marker was found for the string.",
Subject: &next.Range,
Context: hcl.RangeBetween(startRange, next.Range).Ptr(),
})
}
final := p.recover(end)
endRange = final.Range
break Token
}
}
if len(parts) == 0 {
// If a sequence has no content, we'll treat it as if it had an
// empty string in it because that's what the user probably means
// if they write "" in configuration.
parts = append(parts, &templateLiteralToken{
Val: "",
SrcRange: hcl.Range{
// Range is the zero-character span immediately after the
// opening quote.
Filename: startRange.Filename,
Start: startRange.End,
End: startRange.End,
},
})
}
// Always end with an end token, so the parser can produce diagnostics
// about unclosed items with proper position information.
parts = append(parts, &templateEndToken{
SrcRange: endRange,
})
ret := &templateParts{
Tokens: parts,
SrcRange: hcl.RangeBetween(startRange, endRange),
}
return ret, diags
}
type templateParts struct {
Tokens []templateToken
SrcRange hcl.Range
}
// templateToken is a higher-level token that represents a single atom within
// the template language. Our template parsing first raises the raw token
// stream to a sequence of templateToken, and then transforms the result into
// an expression tree.
type templateToken interface {
templateToken() templateToken
}
type templateLiteralToken struct {
Val string
SrcRange hcl.Range
isTemplateToken
}
type templateInterpToken struct {
Expr Expression
SrcRange hcl.Range
isTemplateToken
}
type templateIfToken struct {
CondExpr Expression
SrcRange hcl.Range
isTemplateToken
}
type templateForToken struct {
KeyVar string // empty if ignoring key
ValVar string
CollExpr Expression
SrcRange hcl.Range
isTemplateToken
}
type templateEndCtrlType int
const (
templateEndIf templateEndCtrlType = iota
templateElse
templateEndFor
)
type templateEndCtrlToken struct {
Type templateEndCtrlType
SrcRange hcl.Range
isTemplateToken
}
func (t *templateEndCtrlToken) Name() string {
switch t.Type {
case templateEndIf:
return "endif"
case templateElse:
return "else"
case templateEndFor:
return "endfor"
default:
// should never happen
panic("invalid templateEndCtrlType")
}
}
type templateEndToken struct {
SrcRange hcl.Range
isTemplateToken
}
type isTemplateToken [0]int
func (t isTemplateToken) templateToken() templateToken {
return t
}

View file

@ -0,0 +1,159 @@
package hclsyntax
import (
"github.com/hashicorp/hcl2/hcl"
"github.com/zclconf/go-cty/cty"
)
// ParseTraversalAbs parses an absolute traversal that is assumed to consume
// all of the remaining tokens in the peeker. The usual parser recovery
// behavior is not supported here because traversals are not expected to
// be parsed as part of a larger program.
func (p *parser) ParseTraversalAbs() (hcl.Traversal, hcl.Diagnostics) {
var ret hcl.Traversal
var diags hcl.Diagnostics
// Absolute traversal must always begin with a variable name
varTok := p.Read()
if varTok.Type != TokenIdent {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Variable name required",
Detail: "Must begin with a variable name.",
Subject: &varTok.Range,
})
return ret, diags
}
varName := string(varTok.Bytes)
ret = append(ret, hcl.TraverseRoot{
Name: varName,
SrcRange: varTok.Range,
})
for {
next := p.Peek()
if next.Type == TokenEOF {
return ret, diags
}
switch next.Type {
case TokenDot:
// Attribute access
dot := p.Read() // eat dot
nameTok := p.Read()
if nameTok.Type != TokenIdent {
if nameTok.Type == TokenStar {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Attribute name required",
Detail: "Splat expressions (.*) may not be used here.",
Subject: &nameTok.Range,
Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(),
})
} else {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Attribute name required",
Detail: "Dot must be followed by attribute name.",
Subject: &nameTok.Range,
Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(),
})
}
return ret, diags
}
attrName := string(nameTok.Bytes)
ret = append(ret, hcl.TraverseAttr{
Name: attrName,
SrcRange: hcl.RangeBetween(dot.Range, nameTok.Range),
})
case TokenOBrack:
// Index
open := p.Read() // eat open bracket
next := p.Peek()
switch next.Type {
case TokenNumberLit:
tok := p.Read() // eat number
numVal, numDiags := p.numberLitValue(tok)
diags = append(diags, numDiags...)
close := p.Read()
if close.Type != TokenCBrack {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unclosed index brackets",
Detail: "Index key must be followed by a closing bracket.",
Subject: &close.Range,
Context: hcl.RangeBetween(open.Range, close.Range).Ptr(),
})
}
ret = append(ret, hcl.TraverseIndex{
Key: numVal,
SrcRange: hcl.RangeBetween(open.Range, close.Range),
})
if diags.HasErrors() {
return ret, diags
}
case TokenOQuote:
str, _, strDiags := p.parseQuotedStringLiteral()
diags = append(diags, strDiags...)
close := p.Read()
if close.Type != TokenCBrack {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unclosed index brackets",
Detail: "Index key must be followed by a closing bracket.",
Subject: &close.Range,
Context: hcl.RangeBetween(open.Range, close.Range).Ptr(),
})
}
ret = append(ret, hcl.TraverseIndex{
Key: cty.StringVal(str),
SrcRange: hcl.RangeBetween(open.Range, close.Range),
})
if diags.HasErrors() {
return ret, diags
}
default:
if next.Type == TokenStar {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Attribute name required",
Detail: "Splat expressions ([*]) may not be used here.",
Subject: &next.Range,
Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(),
})
} else {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Index value required",
Detail: "Index brackets must contain either a literal number or a literal string.",
Subject: &next.Range,
Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(),
})
}
return ret, diags
}
default:
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid character",
Detail: "Expected an attribute access or an index operator.",
Subject: &next.Range,
Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(),
})
return ret, diags
}
}
}

View file

@ -0,0 +1,212 @@
package hclsyntax
import (
"bytes"
"fmt"
"path/filepath"
"runtime"
"strings"
"github.com/hashicorp/hcl2/hcl"
)
// This is set to true at init() time in tests, to enable more useful output
// if a stack discipline error is detected. It should not be enabled in
// normal mode since there is a performance penalty from accessing the
// runtime stack to produce the traces, but could be temporarily set to
// true for debugging if desired.
var tracePeekerNewlinesStack = false
type peeker struct {
Tokens Tokens
NextIndex int
IncludeComments bool
IncludeNewlinesStack []bool
// used only when tracePeekerNewlinesStack is set
newlineStackChanges []peekerNewlineStackChange
}
// for use in debugging the stack usage only
type peekerNewlineStackChange struct {
Pushing bool // if false, then popping
Frame runtime.Frame
Include bool
}
func newPeeker(tokens Tokens, includeComments bool) *peeker {
return &peeker{
Tokens: tokens,
IncludeComments: includeComments,
IncludeNewlinesStack: []bool{true},
}
}
func (p *peeker) Peek() Token {
ret, _ := p.nextToken()
return ret
}
func (p *peeker) Read() Token {
ret, nextIdx := p.nextToken()
p.NextIndex = nextIdx
return ret
}
func (p *peeker) NextRange() hcl.Range {
return p.Peek().Range
}
func (p *peeker) PrevRange() hcl.Range {
if p.NextIndex == 0 {
return p.NextRange()
}
return p.Tokens[p.NextIndex-1].Range
}
func (p *peeker) nextToken() (Token, int) {
for i := p.NextIndex; i < len(p.Tokens); i++ {
tok := p.Tokens[i]
switch tok.Type {
case TokenComment:
if !p.IncludeComments {
// Single-line comment tokens, starting with # or //, absorb
// the trailing newline that terminates them as part of their
// bytes. When we're filtering out comments, we must as a
// special case transform these to newline tokens in order
// to properly parse newline-terminated block items.
if p.includingNewlines() {
if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' {
fakeNewline := Token{
Type: TokenNewline,
Bytes: tok.Bytes[len(tok.Bytes)-1 : len(tok.Bytes)],
// We use the whole token range as the newline
// range, even though that's a little... weird,
// because otherwise we'd need to go count
// characters again in order to figure out the
// column of the newline, and that complexity
// isn't justified when ranges of newlines are
// so rarely printed anyway.
Range: tok.Range,
}
return fakeNewline, i + 1
}
}
continue
}
case TokenNewline:
if !p.includingNewlines() {
continue
}
}
return tok, i + 1
}
// if we fall out here then we'll return the EOF token, and leave
// our index pointed off the end of the array so we'll keep
// returning EOF in future too.
return p.Tokens[len(p.Tokens)-1], len(p.Tokens)
}
func (p *peeker) includingNewlines() bool {
return p.IncludeNewlinesStack[len(p.IncludeNewlinesStack)-1]
}
func (p *peeker) PushIncludeNewlines(include bool) {
if tracePeekerNewlinesStack {
// Record who called us so that we can more easily track down any
// mismanagement of the stack in the parser.
callers := []uintptr{0}
runtime.Callers(2, callers)
frames := runtime.CallersFrames(callers)
frame, _ := frames.Next()
p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{
true, frame, include,
})
}
p.IncludeNewlinesStack = append(p.IncludeNewlinesStack, include)
}
func (p *peeker) PopIncludeNewlines() bool {
stack := p.IncludeNewlinesStack
remain, ret := stack[:len(stack)-1], stack[len(stack)-1]
p.IncludeNewlinesStack = remain
if tracePeekerNewlinesStack {
// Record who called us so that we can more easily track down any
// mismanagement of the stack in the parser.
callers := []uintptr{0}
runtime.Callers(2, callers)
frames := runtime.CallersFrames(callers)
frame, _ := frames.Next()
p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{
false, frame, ret,
})
}
return ret
}
// AssertEmptyNewlinesStack checks if the IncludeNewlinesStack is empty, doing
// panicking if it is not. This can be used to catch stack mismanagement that
// might otherwise just cause confusing downstream errors.
//
// This function is a no-op if the stack is empty when called.
//
// If newlines stack tracing is enabled by setting the global variable
// tracePeekerNewlinesStack at init time, a full log of all of the push/pop
// calls will be produced to help identify which caller in the parser is
// misbehaving.
func (p *peeker) AssertEmptyIncludeNewlinesStack() {
if len(p.IncludeNewlinesStack) != 1 {
// Should never happen; indicates mismanagement of the stack inside
// the parser.
if p.newlineStackChanges != nil { // only if traceNewlinesStack is enabled above
panic(fmt.Errorf(
"non-empty IncludeNewlinesStack after parse with %d calls unaccounted for:\n%s",
len(p.IncludeNewlinesStack)-1,
formatPeekerNewlineStackChanges(p.newlineStackChanges),
))
} else {
panic(fmt.Errorf("non-empty IncludeNewlinesStack after parse: %#v", p.IncludeNewlinesStack))
}
}
}
func formatPeekerNewlineStackChanges(changes []peekerNewlineStackChange) string {
indent := 0
var buf bytes.Buffer
for _, change := range changes {
funcName := change.Frame.Function
if idx := strings.LastIndexByte(funcName, '.'); idx != -1 {
funcName = funcName[idx+1:]
}
filename := change.Frame.File
if idx := strings.LastIndexByte(filename, filepath.Separator); idx != -1 {
filename = filename[idx+1:]
}
switch change.Pushing {
case true:
buf.WriteString(strings.Repeat(" ", indent))
fmt.Fprintf(&buf, "PUSH %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line)
indent++
case false:
indent--
buf.WriteString(strings.Repeat(" ", indent))
fmt.Fprintf(&buf, "POP %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line)
}
}
return buf.String()
}

View file

@ -0,0 +1,171 @@
package hclsyntax
import (
"github.com/hashicorp/hcl2/hcl"
)
// ParseConfig parses the given buffer as a whole HCL config file, returning
// a *hcl.File representing its contents. If HasErrors called on the returned
// diagnostics returns true, the returned body is likely to be incomplete
// and should therefore be used with care.
//
// The body in the returned file has dynamic type *hclsyntax.Body, so callers
// may freely type-assert this to get access to the full hclsyntax API in
// situations where detailed access is required. However, most common use-cases
// should be served using the hcl.Body interface to ensure compatibility with
// other configurationg syntaxes, such as JSON.
func ParseConfig(src []byte, filename string, start hcl.Pos) (*hcl.File, hcl.Diagnostics) {
tokens, diags := LexConfig(src, filename, start)
peeker := newPeeker(tokens, false)
parser := &parser{peeker: peeker}
body, parseDiags := parser.ParseBody(TokenEOF)
diags = append(diags, parseDiags...)
// Panic if the parser uses incorrect stack discipline with the peeker's
// newlines stack, since otherwise it will produce confusing downstream
// errors.
peeker.AssertEmptyIncludeNewlinesStack()
return &hcl.File{
Body: body,
Bytes: src,
Nav: navigation{
root: body,
},
}, diags
}
// ParseExpression parses the given buffer as a standalone HCL expression,
// returning it as an instance of Expression.
func ParseExpression(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) {
tokens, diags := LexExpression(src, filename, start)
peeker := newPeeker(tokens, false)
parser := &parser{peeker: peeker}
// Bare expressions are always parsed in "ignore newlines" mode, as if
// they were wrapped in parentheses.
parser.PushIncludeNewlines(false)
expr, parseDiags := parser.ParseExpression()
diags = append(diags, parseDiags...)
next := parser.Peek()
if next.Type != TokenEOF && !parser.recovery {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Extra characters after expression",
Detail: "An expression was successfully parsed, but extra characters were found after it.",
Subject: &next.Range,
})
}
parser.PopIncludeNewlines()
// Panic if the parser uses incorrect stack discipline with the peeker's
// newlines stack, since otherwise it will produce confusing downstream
// errors.
peeker.AssertEmptyIncludeNewlinesStack()
return expr, diags
}
// ParseTemplate parses the given buffer as a standalone HCL template,
// returning it as an instance of Expression.
func ParseTemplate(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) {
tokens, diags := LexTemplate(src, filename, start)
peeker := newPeeker(tokens, false)
parser := &parser{peeker: peeker}
expr, parseDiags := parser.ParseTemplate()
diags = append(diags, parseDiags...)
// Panic if the parser uses incorrect stack discipline with the peeker's
// newlines stack, since otherwise it will produce confusing downstream
// errors.
peeker.AssertEmptyIncludeNewlinesStack()
return expr, diags
}
// ParseTraversalAbs parses the given buffer as a standalone absolute traversal.
//
// Parsing as a traversal is more limited than parsing as an expession since
// it allows only attribute and indexing operations on variables. Traverals
// are useful as a syntax for referring to objects without necessarily
// evaluating them.
func ParseTraversalAbs(src []byte, filename string, start hcl.Pos) (hcl.Traversal, hcl.Diagnostics) {
tokens, diags := LexExpression(src, filename, start)
peeker := newPeeker(tokens, false)
parser := &parser{peeker: peeker}
// Bare traverals are always parsed in "ignore newlines" mode, as if
// they were wrapped in parentheses.
parser.PushIncludeNewlines(false)
expr, parseDiags := parser.ParseTraversalAbs()
diags = append(diags, parseDiags...)
parser.PopIncludeNewlines()
// Panic if the parser uses incorrect stack discipline with the peeker's
// newlines stack, since otherwise it will produce confusing downstream
// errors.
peeker.AssertEmptyIncludeNewlinesStack()
return expr, diags
}
// LexConfig performs lexical analysis on the given buffer, treating it as a
// whole HCL config file, and returns the resulting tokens.
//
// Only minimal validation is done during lexical analysis, so the returned
// diagnostics may include errors about lexical issues such as bad character
// encodings or unrecognized characters, but full parsing is required to
// detect _all_ syntax errors.
func LexConfig(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) {
tokens := scanTokens(src, filename, start, scanNormal)
diags := checkInvalidTokens(tokens)
return tokens, diags
}
// LexExpression performs lexical analysis on the given buffer, treating it as
// a standalone HCL expression, and returns the resulting tokens.
//
// Only minimal validation is done during lexical analysis, so the returned
// diagnostics may include errors about lexical issues such as bad character
// encodings or unrecognized characters, but full parsing is required to
// detect _all_ syntax errors.
func LexExpression(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) {
// This is actually just the same thing as LexConfig, since configs
// and expressions lex in the same way.
tokens := scanTokens(src, filename, start, scanNormal)
diags := checkInvalidTokens(tokens)
return tokens, diags
}
// LexTemplate performs lexical analysis on the given buffer, treating it as a
// standalone HCL template, and returns the resulting tokens.
//
// Only minimal validation is done during lexical analysis, so the returned
// diagnostics may include errors about lexical issues such as bad character
// encodings or unrecognized characters, but full parsing is required to
// detect _all_ syntax errors.
func LexTemplate(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) {
tokens := scanTokens(src, filename, start, scanTemplate)
diags := checkInvalidTokens(tokens)
return tokens, diags
}
// ValidIdentifier tests if the given string could be a valid identifier in
// a native syntax expression.
//
// This is useful when accepting names from the user that will be used as
// variable or attribute names in the scope, to ensure that any name chosen
// will be traversable using the variable or attribute traversal syntax.
func ValidIdentifier(s string) bool {
// This is a kinda-expensive way to do something pretty simple, but it
// is easiest to do with our existing scanner-related infrastructure here
// and nobody should be validating identifiers in a tight loop.
tokens := scanTokens([]byte(s), "", hcl.Pos{}, scanIdentOnly)
return len(tokens) == 2 && tokens[0].Type == TokenIdent && tokens[1].Type == TokenEOF
}

View file

@ -0,0 +1,301 @@
// line 1 "scan_string_lit.rl"
package hclsyntax
// This file is generated from scan_string_lit.rl. DO NOT EDIT.
// line 9 "scan_string_lit.go"
var _hclstrtok_actions []byte = []byte{
0, 1, 0, 1, 1, 2, 1, 0,
}
var _hclstrtok_key_offsets []byte = []byte{
0, 0, 2, 4, 6, 10, 14, 18,
22, 27, 31, 36, 41, 46, 51, 57,
62, 74, 85, 96, 107, 118, 129, 140,
151,
}
var _hclstrtok_trans_keys []byte = []byte{
128, 191, 128, 191, 128, 191, 10, 13,
36, 37, 10, 13, 36, 37, 10, 13,
36, 37, 10, 13, 36, 37, 10, 13,
36, 37, 123, 10, 13, 36, 37, 10,
13, 36, 37, 92, 10, 13, 36, 37,
92, 10, 13, 36, 37, 92, 10, 13,
36, 37, 92, 10, 13, 36, 37, 92,
123, 10, 13, 36, 37, 92, 85, 117,
128, 191, 192, 223, 224, 239, 240, 247,
248, 255, 10, 13, 36, 37, 92, 48,
57, 65, 70, 97, 102, 10, 13, 36,
37, 92, 48, 57, 65, 70, 97, 102,
10, 13, 36, 37, 92, 48, 57, 65,
70, 97, 102, 10, 13, 36, 37, 92,
48, 57, 65, 70, 97, 102, 10, 13,
36, 37, 92, 48, 57, 65, 70, 97,
102, 10, 13, 36, 37, 92, 48, 57,
65, 70, 97, 102, 10, 13, 36, 37,
92, 48, 57, 65, 70, 97, 102, 10,
13, 36, 37, 92, 48, 57, 65, 70,
97, 102,
}
var _hclstrtok_single_lengths []byte = []byte{
0, 0, 0, 0, 4, 4, 4, 4,
5, 4, 5, 5, 5, 5, 6, 5,
2, 5, 5, 5, 5, 5, 5, 5,
5,
}
var _hclstrtok_range_lengths []byte = []byte{
0, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
5, 3, 3, 3, 3, 3, 3, 3,
3,
}
var _hclstrtok_index_offsets []byte = []byte{
0, 0, 2, 4, 6, 11, 16, 21,
26, 32, 37, 43, 49, 55, 61, 68,
74, 82, 91, 100, 109, 118, 127, 136,
145,
}
var _hclstrtok_indicies []byte = []byte{
0, 1, 2, 1, 3, 1, 5, 6,
7, 8, 4, 10, 11, 12, 13, 9,
14, 11, 12, 13, 9, 10, 11, 15,
13, 9, 10, 11, 12, 13, 14, 9,
10, 11, 12, 15, 9, 17, 18, 19,
20, 21, 16, 23, 24, 25, 26, 27,
22, 0, 24, 25, 26, 27, 22, 23,
24, 28, 26, 27, 22, 23, 24, 25,
26, 27, 0, 22, 23, 24, 25, 28,
27, 22, 29, 30, 22, 2, 3, 31,
22, 0, 23, 24, 25, 26, 27, 32,
32, 32, 22, 23, 24, 25, 26, 27,
33, 33, 33, 22, 23, 24, 25, 26,
27, 34, 34, 34, 22, 23, 24, 25,
26, 27, 30, 30, 30, 22, 23, 24,
25, 26, 27, 35, 35, 35, 22, 23,
24, 25, 26, 27, 36, 36, 36, 22,
23, 24, 25, 26, 27, 37, 37, 37,
22, 23, 24, 25, 26, 27, 0, 0,
0, 22,
}
var _hclstrtok_trans_targs []byte = []byte{
11, 0, 1, 2, 4, 5, 6, 7,
9, 4, 5, 6, 7, 9, 5, 8,
10, 11, 12, 13, 15, 16, 10, 11,
12, 13, 15, 16, 14, 17, 21, 3,
18, 19, 20, 22, 23, 24,
}
var _hclstrtok_trans_actions []byte = []byte{
0, 0, 0, 0, 0, 1, 1, 1,
1, 3, 5, 5, 5, 5, 0, 0,
0, 1, 1, 1, 1, 1, 3, 5,
5, 5, 5, 5, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
}
var _hclstrtok_eof_actions []byte = []byte{
0, 0, 0, 0, 0, 3, 3, 3,
3, 3, 0, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3,
3,
}
const hclstrtok_start int = 4
const hclstrtok_first_final int = 4
const hclstrtok_error int = 0
const hclstrtok_en_quoted int = 10
const hclstrtok_en_unquoted int = 4
// line 10 "scan_string_lit.rl"
func scanStringLit(data []byte, quoted bool) [][]byte {
var ret [][]byte
// line 61 "scan_string_lit.rl"
// Ragel state
p := 0 // "Pointer" into data
pe := len(data) // End-of-data "pointer"
ts := 0
te := 0
eof := pe
var cs int // current state
switch {
case quoted:
cs = hclstrtok_en_quoted
default:
cs = hclstrtok_en_unquoted
}
// Make Go compiler happy
_ = ts
_ = eof
/*token := func () {
ret = append(ret, data[ts:te])
}*/
// line 154 "scan_string_lit.go"
{
}
// line 158 "scan_string_lit.go"
{
var _klen int
var _trans int
var _acts int
var _nacts uint
var _keys int
if p == pe {
goto _test_eof
}
if cs == 0 {
goto _out
}
_resume:
_keys = int(_hclstrtok_key_offsets[cs])
_trans = int(_hclstrtok_index_offsets[cs])
_klen = int(_hclstrtok_single_lengths[cs])
if _klen > 0 {
_lower := int(_keys)
var _mid int
_upper := int(_keys + _klen - 1)
for {
if _upper < _lower {
break
}
_mid = _lower + ((_upper - _lower) >> 1)
switch {
case data[p] < _hclstrtok_trans_keys[_mid]:
_upper = _mid - 1
case data[p] > _hclstrtok_trans_keys[_mid]:
_lower = _mid + 1
default:
_trans += int(_mid - int(_keys))
goto _match
}
}
_keys += _klen
_trans += _klen
}
_klen = int(_hclstrtok_range_lengths[cs])
if _klen > 0 {
_lower := int(_keys)
var _mid int
_upper := int(_keys + (_klen << 1) - 2)
for {
if _upper < _lower {
break
}
_mid = _lower + (((_upper - _lower) >> 1) & ^1)
switch {
case data[p] < _hclstrtok_trans_keys[_mid]:
_upper = _mid - 2
case data[p] > _hclstrtok_trans_keys[_mid+1]:
_lower = _mid + 2
default:
_trans += int((_mid - int(_keys)) >> 1)
goto _match
}
}
_trans += _klen
}
_match:
_trans = int(_hclstrtok_indicies[_trans])
cs = int(_hclstrtok_trans_targs[_trans])
if _hclstrtok_trans_actions[_trans] == 0 {
goto _again
}
_acts = int(_hclstrtok_trans_actions[_trans])
_nacts = uint(_hclstrtok_actions[_acts])
_acts++
for ; _nacts > 0; _nacts-- {
_acts++
switch _hclstrtok_actions[_acts-1] {
case 0:
// line 40 "scan_string_lit.rl"
// If te is behind p then we've skipped over some literal
// characters which we must now return.
if te < p {
ret = append(ret, data[te:p])
}
ts = p
case 1:
// line 48 "scan_string_lit.rl"
te = p
ret = append(ret, data[ts:te])
// line 255 "scan_string_lit.go"
}
}
_again:
if cs == 0 {
goto _out
}
p++
if p != pe {
goto _resume
}
_test_eof:
{
}
if p == eof {
__acts := _hclstrtok_eof_actions[cs]
__nacts := uint(_hclstrtok_actions[__acts])
__acts++
for ; __nacts > 0; __nacts-- {
__acts++
switch _hclstrtok_actions[__acts-1] {
case 1:
// line 48 "scan_string_lit.rl"
te = p
ret = append(ret, data[ts:te])
// line 281 "scan_string_lit.go"
}
}
}
_out:
{
}
}
// line 89 "scan_string_lit.rl"
if te < p {
// Collect any leftover literal characters at the end of the input
ret = append(ret, data[te:p])
}
// If we fall out here without being in a final state then we've
// encountered something that the scanner can't match, which should
// be impossible (the scanner matches all bytes _somehow_) but we'll
// tolerate it and let the caller deal with it.
if cs < hclstrtok_first_final {
ret = append(ret, data[p:len(data)])
}
return ret
}

View file

@ -0,0 +1,105 @@
package hclsyntax
// This file is generated from scan_string_lit.rl. DO NOT EDIT.
%%{
# (except you are actually in scan_string_lit.rl here, so edit away!)
machine hclstrtok;
write data;
}%%
func scanStringLit(data []byte, quoted bool) [][]byte {
var ret [][]byte
%%{
include UnicodeDerived "unicode_derived.rl";
UTF8Cont = 0x80 .. 0xBF;
AnyUTF8 = (
0x00..0x7F |
0xC0..0xDF . UTF8Cont |
0xE0..0xEF . UTF8Cont . UTF8Cont |
0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont
);
BadUTF8 = any - AnyUTF8;
Hex = ('0'..'9' | 'a'..'f' | 'A'..'F');
# Our goal with this patterns is to capture user intent as best as
# possible, even if the input is invalid. The caller will then verify
# whether each token is valid and generate suitable error messages
# if not.
UnicodeEscapeShort = "\\u" . Hex{0,4};
UnicodeEscapeLong = "\\U" . Hex{0,8};
UnicodeEscape = (UnicodeEscapeShort | UnicodeEscapeLong);
SimpleEscape = "\\" . (AnyUTF8 - ('U'|'u'))?;
TemplateEscape = ("$" . ("$" . ("{"?))?) | ("%" . ("%" . ("{"?))?);
Newline = ("\r\n" | "\r" | "\n");
action Begin {
// If te is behind p then we've skipped over some literal
// characters which we must now return.
if te < p {
ret = append(ret, data[te:p])
}
ts = p;
}
action End {
te = p;
ret = append(ret, data[ts:te]);
}
QuotedToken = (UnicodeEscape | SimpleEscape | TemplateEscape | Newline) >Begin %End;
UnquotedToken = (TemplateEscape | Newline) >Begin %End;
QuotedLiteral = (any - ("\\" | "$" | "%" | "\r" | "\n"));
UnquotedLiteral = (any - ("$" | "%" | "\r" | "\n"));
quoted := (QuotedToken | QuotedLiteral)**;
unquoted := (UnquotedToken | UnquotedLiteral)**;
}%%
// Ragel state
p := 0 // "Pointer" into data
pe := len(data) // End-of-data "pointer"
ts := 0
te := 0
eof := pe
var cs int // current state
switch {
case quoted:
cs = hclstrtok_en_quoted
default:
cs = hclstrtok_en_unquoted
}
// Make Go compiler happy
_ = ts
_ = eof
/*token := func () {
ret = append(ret, data[ts:te])
}*/
%%{
write init nocs;
write exec;
}%%
if te < p {
// Collect any leftover literal characters at the end of the input
ret = append(ret, data[te:p])
}
// If we fall out here without being in a final state then we've
// encountered something that the scanner can't match, which should
// be impossible (the scanner matches all bytes _somehow_) but we'll
// tolerate it and let the caller deal with it.
if cs < hclstrtok_first_final {
ret = append(ret, data[p:len(data)])
}
return ret
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,376 @@
package hclsyntax
import (
"bytes"
"github.com/hashicorp/hcl2/hcl"
)
// This file is generated from scan_tokens.rl. DO NOT EDIT.
%%{
# (except you are actually in scan_tokens.rl here, so edit away!)
machine hcltok;
write data;
}%%
func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token {
f := &tokenAccum{
Filename: filename,
Bytes: data,
Pos: start,
}
%%{
include UnicodeDerived "unicode_derived.rl";
UTF8Cont = 0x80 .. 0xBF;
AnyUTF8 = (
0x00..0x7F |
0xC0..0xDF . UTF8Cont |
0xE0..0xEF . UTF8Cont . UTF8Cont |
0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont
);
BrokenUTF8 = any - AnyUTF8;
NumberLitContinue = (digit|'.'|('e'|'E') ('+'|'-')? digit);
NumberLit = digit ("" | (NumberLitContinue - '.') | (NumberLitContinue* (NumberLitContinue - '.')));
Ident = (ID_Start | '_') (ID_Continue | '-')*;
# Symbols that just represent themselves are handled as a single rule.
SelfToken = "[" | "]" | "(" | ")" | "." | "," | "*" | "/" | "%" | "+" | "-" | "=" | "<" | ">" | "!" | "?" | ":" | "\n" | "&" | "|" | "~" | "^" | ";" | "`";
EqualOp = "==";
NotEqual = "!=";
GreaterThanEqual = ">=";
LessThanEqual = "<=";
LogicalAnd = "&&";
LogicalOr = "||";
Ellipsis = "...";
FatArrow = "=>";
Newline = '\r' ? '\n';
EndOfLine = Newline;
BeginStringTmpl = '"';
BeginHeredocTmpl = '<<' ('-')? Ident Newline;
Comment = (
("#" (any - EndOfLine)* EndOfLine) |
("//" (any - EndOfLine)* EndOfLine) |
("/*" any* "*/")
);
# Note: hclwrite assumes that only ASCII spaces appear between tokens,
# and uses this assumption to recreate the spaces between tokens by
# looking at byte offset differences. This means it will produce
# incorrect results in the presence of tabs, but that's acceptable
# because the canonical style (which hclwrite itself can impose
# automatically is to never use tabs).
Spaces = (' ' | 0x09)+;
action beginStringTemplate {
token(TokenOQuote);
fcall stringTemplate;
}
action endStringTemplate {
token(TokenCQuote);
fret;
}
action beginHeredocTemplate {
token(TokenOHeredoc);
// the token is currently the whole heredoc introducer, like
// <<EOT or <<-EOT, followed by a newline. We want to extract
// just the "EOT" portion that we'll use as the closing marker.
marker := data[ts+2:te-1]
if marker[0] == '-' {
marker = marker[1:]
}
if marker[len(marker)-1] == '\r' {
marker = marker[:len(marker)-1]
}
heredocs = append(heredocs, heredocInProgress{
Marker: marker,
StartOfLine: true,
})
fcall heredocTemplate;
}
action heredocLiteralEOL {
// This action is called specificially when a heredoc literal
// ends with a newline character.
// This might actually be our end marker.
topdoc := &heredocs[len(heredocs)-1]
if topdoc.StartOfLine {
maybeMarker := bytes.TrimSpace(data[ts:te])
if bytes.Equal(maybeMarker, topdoc.Marker) {
// We actually emit two tokens here: the end-of-heredoc
// marker first, and then separately the newline that
// follows it. This then avoids issues with the closing
// marker consuming a newline that would normally be used
// to mark the end of an attribute definition.
// We might have either a \n sequence or an \r\n sequence
// here, so we must handle both.
nls := te-1
nle := te
te--
if data[te-1] == '\r' {
// back up one more byte
nls--
te--
}
token(TokenCHeredoc);
ts = nls
te = nle
token(TokenNewline);
heredocs = heredocs[:len(heredocs)-1]
fret;
}
}
topdoc.StartOfLine = true;
token(TokenStringLit);
}
action heredocLiteralMidline {
// This action is called when a heredoc literal _doesn't_ end
// with a newline character, e.g. because we're about to enter
// an interpolation sequence.
heredocs[len(heredocs)-1].StartOfLine = false;
token(TokenStringLit);
}
action bareTemplateLiteral {
token(TokenStringLit);
}
action beginTemplateInterp {
token(TokenTemplateInterp);
braces++;
retBraces = append(retBraces, braces);
if len(heredocs) > 0 {
heredocs[len(heredocs)-1].StartOfLine = false;
}
fcall main;
}
action beginTemplateControl {
token(TokenTemplateControl);
braces++;
retBraces = append(retBraces, braces);
if len(heredocs) > 0 {
heredocs[len(heredocs)-1].StartOfLine = false;
}
fcall main;
}
action openBrace {
token(TokenOBrace);
braces++;
}
action closeBrace {
if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces {
token(TokenTemplateSeqEnd);
braces--;
retBraces = retBraces[0:len(retBraces)-1]
fret;
} else {
token(TokenCBrace);
braces--;
}
}
action closeTemplateSeqEatWhitespace {
// Only consume from the retBraces stack and return if we are at
// a suitable brace nesting level, otherwise things will get
// confused. (Not entering this branch indicates a syntax error,
// which we will catch in the parser.)
if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces {
token(TokenTemplateSeqEnd);
braces--;
retBraces = retBraces[0:len(retBraces)-1]
fret;
} else {
// We intentionally generate a TokenTemplateSeqEnd here,
// even though the user apparently wanted a brace, because
// we want to allow the parser to catch the incorrect use
// of a ~} to balance a generic opening brace, rather than
// a template sequence.
token(TokenTemplateSeqEnd);
braces--;
}
}
TemplateInterp = "${" ("~")?;
TemplateControl = "%{" ("~")?;
EndStringTmpl = '"';
StringLiteralChars = (AnyUTF8 - ("\r"|"\n"));
TemplateStringLiteral = (
('$' ^'{' %{ fhold; }) |
('%' ^'{' %{ fhold; }) |
('\\' StringLiteralChars) |
(StringLiteralChars - ("$" | '%' | '"'))
)+;
HeredocStringLiteral = (
('$' ^'{' %{ fhold; }) |
('%' ^'{' %{ fhold; }) |
(StringLiteralChars - ("$" | '%'))
)*;
BareStringLiteral = (
('$' ^'{') |
('%' ^'{') |
(StringLiteralChars - ("$" | '%'))
)* Newline?;
stringTemplate := |*
TemplateInterp => beginTemplateInterp;
TemplateControl => beginTemplateControl;
EndStringTmpl => endStringTemplate;
TemplateStringLiteral => { token(TokenQuotedLit); };
AnyUTF8 => { token(TokenInvalid); };
BrokenUTF8 => { token(TokenBadUTF8); };
*|;
heredocTemplate := |*
TemplateInterp => beginTemplateInterp;
TemplateControl => beginTemplateControl;
HeredocStringLiteral EndOfLine => heredocLiteralEOL;
HeredocStringLiteral => heredocLiteralMidline;
BrokenUTF8 => { token(TokenBadUTF8); };
*|;
bareTemplate := |*
TemplateInterp => beginTemplateInterp;
TemplateControl => beginTemplateControl;
BareStringLiteral => bareTemplateLiteral;
BrokenUTF8 => { token(TokenBadUTF8); };
*|;
identOnly := |*
Ident => { token(TokenIdent) };
BrokenUTF8 => { token(TokenBadUTF8) };
AnyUTF8 => { token(TokenInvalid) };
*|;
main := |*
Spaces => {};
NumberLit => { token(TokenNumberLit) };
Ident => { token(TokenIdent) };
Comment => { token(TokenComment) };
Newline => { token(TokenNewline) };
EqualOp => { token(TokenEqualOp); };
NotEqual => { token(TokenNotEqual); };
GreaterThanEqual => { token(TokenGreaterThanEq); };
LessThanEqual => { token(TokenLessThanEq); };
LogicalAnd => { token(TokenAnd); };
LogicalOr => { token(TokenOr); };
Ellipsis => { token(TokenEllipsis); };
FatArrow => { token(TokenFatArrow); };
SelfToken => { selfToken() };
"{" => openBrace;
"}" => closeBrace;
"~}" => closeTemplateSeqEatWhitespace;
BeginStringTmpl => beginStringTemplate;
BeginHeredocTmpl => beginHeredocTemplate;
BrokenUTF8 => { token(TokenBadUTF8) };
AnyUTF8 => { token(TokenInvalid) };
*|;
}%%
// Ragel state
p := 0 // "Pointer" into data
pe := len(data) // End-of-data "pointer"
ts := 0
te := 0
act := 0
eof := pe
var stack []int
var top int
var cs int // current state
switch mode {
case scanNormal:
cs = hcltok_en_main
case scanTemplate:
cs = hcltok_en_bareTemplate
case scanIdentOnly:
cs = hcltok_en_identOnly
default:
panic("invalid scanMode")
}
braces := 0
var retBraces []int // stack of brace levels that cause us to use fret
var heredocs []heredocInProgress // stack of heredocs we're currently processing
%%{
prepush {
stack = append(stack, 0);
}
postpop {
stack = stack[:len(stack)-1];
}
}%%
// Make Go compiler happy
_ = ts
_ = te
_ = act
_ = eof
token := func (ty TokenType) {
f.emitToken(ty, ts, te)
}
selfToken := func () {
b := data[ts:te]
if len(b) != 1 {
// should never happen
panic("selfToken only works for single-character tokens")
}
f.emitToken(TokenType(b[0]), ts, te)
}
%%{
write init nocs;
write exec;
}%%
// If we fall out here without being in a final state then we've
// encountered something that the scanner can't match, which we'll
// deal with as an invalid.
if cs < hcltok_first_final {
if mode == scanTemplate && len(stack) == 0 {
// If we're scanning a bare template then any straggling
// top-level stuff is actually literal string, rather than
// invalid. This handles the case where the template ends
// with a single "$" or "%", which trips us up because we
// want to see another character to decide if it's a sequence
// or an escape.
f.emitToken(TokenStringLit, ts, len(data))
} else {
f.emitToken(TokenInvalid, ts, len(data))
}
}
// We always emit a synthetic EOF token at the end, since it gives the
// parser position information for an "unexpected EOF" diagnostic.
f.emitToken(TokenEOF, len(data), len(data))
return f.Tokens
}

923
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md generated vendored Normal file
View file

@ -0,0 +1,923 @@
# HCL Native Syntax Specification
This is the specification of the syntax and semantics of the native syntax
for HCL. HCL is a system for defining configuration languages for applications.
The HCL information model is designed to support multiple concrete syntaxes
for configuration, but this native syntax is considered the primary format
and is optimized for human authoring and maintenance, as opposed to machine
generation of configuration.
The language consists of three integrated sub-languages:
* The _structural_ language defines the overall hierarchical configuration
structure, and is a serialization of HCL bodies, blocks and attributes.
* The _expression_ language is used to express attribute values, either as
literals or as derivations of other values.
* The _template_ language is used to compose values together into strings,
as one of several types of expression in the expression language.
In normal use these three sub-languages are used together within configuration
files to describe an overall configuration, with the structural language
being used at the top level. The expression and template languages can also
be used in isolation, to implement features such as REPLs, debuggers, and
integration into more limited HCL syntaxes such as the JSON profile.
## Syntax Notation
Within this specification a semi-formal notation is used to illustrate the
details of syntax. This notation is intended for human consumption rather
than machine consumption, with the following conventions:
* A naked name starting with an uppercase letter is a global production,
common to all of the syntax specifications in this document.
* A naked name starting with a lowercase letter is a local production,
meaningful only within the specification where it is defined.
* Double and single quotes (`"` and `'`) are used to mark literal character
sequences, which may be either punctuation markers or keywords.
* The default operator for combining items, which has no punctuation,
is concatenation.
* The symbol `|` indicates that any one of its left and right operands may
be present.
* The `*` symbol indicates zero or more repetitions of the item to its left.
* The `?` symbol indicates zero or one of the item to its left.
* Parentheses (`(` and `)`) are used to group items together to apply
the `|`, `*` and `?` operators to them collectively.
The grammar notation does not fully describe the language. The prose may
augment or conflict with the illustrated grammar. In case of conflict, prose
has priority.
## Source Code Representation
Source code is unicode text expressed in the UTF-8 encoding. The language
itself does not perform unicode normalization, so syntax features such as
identifiers are sequences of unicode code points and so e.g. a precombined
accented character is distinct from a letter associated with a combining
accent. (String literals have some special handling with regard to Unicode
normalization which will be covered later in the relevant section.)
UTF-8 encoded Unicode byte order marks are not permitted. Invalid or
non-normalized UTF-8 encoding is always a parse error.
## Lexical Elements
### Comments and Whitespace
Comments and Whitespace are recognized as lexical elements but are ignored
except as described below.
Whitespace is defined as a sequence of zero or more space characters
(U+0020). Newline sequences (either U+000A or U+000D followed by U+000A)
are _not_ considered whitespace but are ignored as such in certain contexts.
Horizontal tab characters (U+0009) are not considered to be whitespace and
are not valid within HCL native syntax.
Comments serve as program documentation and come in two forms:
* _Line comments_ start with either the `//` or `#` sequences and end with
the next newline sequence. A line comments is considered equivalent to a
newline sequence.
* _Inline comments_ start with the `/*` sequence and end with the `*/`
sequence, and may have any characters within except the ending sequence.
An inline comments is considered equivalent to a whitespace sequence.
Comments and whitespace cannot begin within within other comments, or within
template literals except inside an interpolation sequence or template directive.
### Identifiers
Identifiers name entities such as blocks, attributes and expression variables.
Identifiers are interpreted as per [UAX #31][UAX31] Section 2. Specifically,
their syntax is defined in terms of the `ID_Start` and `ID_Continue`
character properties as follows:
```ebnf
Identifier = ID_Start (ID_Continue | '-')*;
```
The Unicode specification provides the normative requirements for identifier
parsing. Non-normatively, the spirit of this specification is that `ID_Start`
consists of Unicode letter and certain unambiguous punctuation tokens, while
`ID_Continue` augments that set with Unicode digits, combining marks, etc.
The dash character `-` is additionally allowed in identifiers, even though
that is not part of the unicode `ID_Continue` definition. This is to allow
attribute names and block type names to contain dashes, although underscores
as word separators are considered the idiomatic usage.
[UAX31]: http://unicode.org/reports/tr31/ "Unicode Identifier and Pattern Syntax"
### Keywords
There are no globally-reserved words, but in some contexts certain identifiers
are reserved to function as keywords. These are discussed further in the
relevant documentation sections that follow. In such situations, the
identifier's role as a keyword supersedes any other valid interpretation that
may be possible. Outside of these specific situations, the keywords have no
special meaning and are interpreted as regular identifiers.
### Operators and Delimiters
The following character sequences represent operators, delimiters, and other
special tokens:
```
+ && == < : { [ ( ${
- || != > ? } ] ) %{
* ! <= = .
/ >= => ,
% ...
```
### Numeric Literals
A numeric literal is a decimal representation of a
real number. It has an integer part, a fractional part,
and an exponent part.
```ebnf
NumericLit = decimal+ ("." decimal+)? (expmark decimal+)?;
decimal = '0' .. '9';
expmark = ('e' | 'E') ("+" | "-")?;
```
## Structural Elements
The structural language consists of syntax representing the following
constructs:
* _Attributes_, which assign a value to a specified name.
* _Blocks_, which create a child body annotated by a type and optional labels.
* _Body Content_, which consists of a collection of attributes and blocks.
These constructs correspond to the similarly-named concepts in the
language-agnostic HCL information model.
```ebnf
ConfigFile = Body;
Body = (Attribute | Block)*;
Attribute = Identifier "=" Expression Newline;
Block = Identifier (StringLit|Identifier)* "{" Newline Body "}" Newline;
```
### Configuration Files
A _configuration file_ is a sequence of characters whose top-level is
interpreted as a Body.
### Bodies
A _body_ is a collection of associated attributes and blocks. The meaning of
this association is defined by the calling application.
### Attribute Definitions
An _attribute definition_ assigns a value to a particular attribute name within
a body. Each distinct attribute name may be defined no more than once within a
single body.
The attribute value is given as an expression, which is retained literally
for later evaluation by the calling application.
### Blocks
A _block_ creates a child body that is annotated with a block _type_ and
zero or more block _labels_. Blocks create a structural hierachy which can be
interpreted by the calling application.
Block labels can either be quoted literal strings or naked identifiers.
## Expressions
The expression sub-language is used within attribute definitions to specify
values.
```ebnf
Expression = (
ExprTerm |
Operation |
Conditional
);
```
### Types
The value types used within the expression language are those defined by the
syntax-agnostic HCL information model. An expression may return any valid
type, but only a subset of the available types have first-class syntax.
A calling application may make other types available via _variables_ and
_functions_.
### Expression Terms
Expression _terms_ are the operands for unary and binary expressions, as well
as acting as expressions in their own right.
```ebnf
ExprTerm = (
LiteralValue |
CollectionValue |
TemplateExpr |
VariableExpr |
FunctionCall |
ForExpr |
ExprTerm Index |
ExprTerm GetAttr |
ExprTerm Splat |
"(" Expression ")"
);
```
The productions for these different term types are given in their corresponding
sections.
Between the `(` and `)` characters denoting a sub-expression, newline
characters are ignored as whitespace.
### Literal Values
A _literal value_ immediately represents a particular value of a primitive
type.
```ebnf
LiteralValue = (
NumericLit |
"true" |
"false" |
"null"
);
```
* Numeric literals represent values of type _number_.
* The `true` and `false` keywords represent values of type _bool_.
* The `null` keyword represents a null value of the dynamic pseudo-type.
String literals are not directly available in the expression sub-language, but
are available via the template sub-language, which can in turn be incorporated
via _template expressions_.
### Collection Values
A _collection value_ combines zero or more other expressions to produce a
collection value.
```ebnf
CollectionValue = tuple | object;
tuple = "[" (
(Expression ("," Expression)* ","?)?
) "]";
object = "{" (
(objectelem ("," objectelem)* ","?)?
) "}";
objectelem = (Identifier | Expression) "=" Expression;
```
Only tuple and object values can be directly constructed via native syntax.
Tuple and object values can in turn be converted to list, set and map values
with other operations, which behaves as defined by the syntax-agnostic HCL
information model.
When specifying an object element, an identifier is interpreted as a literal
attribute name as opposed to a variable reference. To populate an item key
from a variable, use parentheses to disambiguate:
* `{foo = "baz"}` is interpreted as an attribute literally named `foo`.
* `{(foo) = "baz"}` is interpreted as an attribute whose name is taken
from the variable named `foo`.
Between the open and closing delimiters of these sequences, newline sequences
are ignored as whitespace.
There is a syntax ambiguity between _for expressions_ and collection values
whose first element is a reference to a variable named `for`. The
_for expression_ interpretation has priority, so to produce a tuple whose
first element is the value of a variable named `for`, or an object with a
key named `for`, use parentheses to disambiguate:
* `[for, foo, baz]` is a syntax error.
* `[(for), foo, baz]` is a tuple whose first element is the value of variable
`for`.
* `{for: 1, baz: 2}` is a syntax error.
* `{(for): 1, baz: 2}` is an object with an attribute literally named `for`.
* `{baz: 2, for: 1}` is equivalent to the previous example, and resolves the
ambiguity by reordering.
### Template Expressions
A _template expression_ embeds a program written in the template sub-language
as an expression. Template expressions come in two forms:
* A _quoted_ template expression is delimited by quote characters (`"`) and
defines a template as a single-line expression with escape characters.
* A _heredoc_ template expression is introduced by a `<<` sequence and
defines a template via a multi-line sequence terminated by a user-chosen
delimiter.
In both cases the template interpolation and directive syntax is available for
use within the delimiters, and any text outside of these special sequences is
interpreted as a literal string.
In _quoted_ template expressions any literal string sequences within the
template behave in a special way: literal newline sequences are not permitted
and instead _escape sequences_ can be included, starting with the
backslash `\`:
```
\n Unicode newline control character
\r Unicode carriage return control character
\t Unicode tab control character
\" Literal quote mark, used to prevent interpretation as end of string
\\ Literal backslash, used to prevent interpretation as escape sequence
\uNNNN Unicode character from Basic Multilingual Plane (NNNN is four hexadecimal digits)
\UNNNNNNNN Unicode character from supplementary planes (NNNNNNNN is eight hexadecimal digits)
```
The _heredoc_ template expression type is introduced by either `<<` or `<<-`,
followed by an identifier. The template expression ends when the given
identifier subsequently appears again on a line of its own.
If a heredoc template is introduced with the `<<-` symbol, any literal string
at the start of each line is analyzed to find the minimum number of leading
spaces, and then that number of prefix spaces is removed from all line-leading
literal strings. The final closing marker may also have an arbitrary number
of spaces preceding it on its line.
```ebnf
TemplateExpr = quotedTemplate | heredocTemplate;
quotedTemplate = (as defined in prose above);
heredocTemplate = (
("<<" | "<<-") Identifier Newline
(content as defined in prose above)
Identifier Newline
);
```
A quoted template expression containing only a single literal string serves
as a syntax for defining literal string _expressions_. In certain contexts
the template syntax is restricted in this manner:
```ebnf
StringLit = '"' (quoted literals as defined in prose above) '"';
```
The `StringLit` production permits the escape sequences discussed for quoted
template expressions as above, but does _not_ permit template interpolation
or directive sequences.
### Variables and Variable Expressions
A _variable_ is a value that has been assigned a symbolic name. Variables are
made available for use in expressions by the calling application, by populating
the _global scope_ used for expression evaluation.
Variables can also be created by expressions themselves, which always creates
a _child scope_ that incorporates the variables from its parent scope but
(re-)defines zero or more names with new values.
The value of a variable is accessed using a _variable expression_, which is
a standalone `Identifier` whose name corresponds to a defined variable:
```ebnf
VariableExpr = Identifier;
```
Variables in a particular scope are immutable, but child scopes may _hide_
a variable from an ancestor scope by defining a new variable of the same name.
When looking up variables, the most locally-defined variable of the given name
is used, and ancestor-scoped variables of the same name cannot be accessed.
No direct syntax is provided for declaring or assigning variables, but other
expression constructs implicitly create child scopes and define variables as
part of their evaluation.
### Functions and Function Calls
A _function_ is an operation that has been assigned a symbolic name. Functions
are made available for use in expressions by the calling application, by
populating the _function table_ used for expression evaluation.
The namespace of functions is distinct from the namespace of variables. A
function and a variable may share the same name with no implication that they
are in any way related.
A function can be executed via a _function call_ expression:
```ebnf
FunctionCall = Identifier "(" arguments ")";
Arguments = (
() ||
(Expression ("," Expression)* ("," | "...")?)
);
```
The definition of functions and the semantics of calling them are defined by
the language-agnostic HCL information model. The given arguments are mapped
onto the function's _parameters_ and the result of a function call expression
is the return value of the named function when given those arguments.
If the final argument expression is followed by the ellipsis symbol (`...`),
the final argument expression must evaluate to either a list or tuple value.
The elements of the value are each mapped to a single parameter of the
named function, beginning at the first parameter remaining after all other
argument expressions have been mapped.
Within the parentheses that delimit the function arguments, newline sequences
are ignored as whitespace.
### For Expressions
A _for expression_ is a construct for constructing a collection by projecting
the items from another collection.
```ebnf
ForExpr = forTupleExpr | forObjectExpr;
forTupleExpr = "[" forIntro Expression forCond? "]";
forObjectExpr = "{" forIntro Expression "=>" Expression "..."? forCond? "}";
forIntro = "for" Identifier ("," Identifier)? "in" Expression ":";
forCond = "if" Expression;
```
The punctuation used to delimit a for expression decide whether it will produce
a tuple value (`[` and `]`) or an object value (`{` and `}`).
The "introduction" is equivalent in both cases: the keyword `for` followed by
either one or two identifiers separated by a comma which define the temporary
variable names used for iteration, followed by the keyword `in` and then
an expression that must evaluate to a value that can be iterated. The
introduction is then terminated by the colon (`:`) symbol.
If only one identifier is provided, it is the name of a variable that will
be temporarily assigned the value of each element during iteration. If both
are provided, the first is the key and the second is the value.
Tuple, object, list, map, and set types are iterable. The type of collection
used defines how the key and value variables are populated:
* For tuple and list types, the _key_ is the zero-based index into the
sequence for each element, and the _value_ is the element value. The
elements are visited in index order.
* For object and map types, the _key_ is the string attribute name or element
key, and the _value_ is the attribute or element value. The elements are
visited in the order defined by a lexicographic sort of the attribute names
or keys.
* For set types, the _key_ and _value_ are both the element value. The elements
are visited in an undefined but consistent order.
The expression after the colon and (in the case of object `for`) the expression
after the `=>` are both evaluated once for each element of the source
collection, in a local scope that defines the key and value variable names
specified.
The results of evaluating these expressions for each input element are used
to populate an element in the new collection. In the case of tuple `for`, the
single expression becomes an element, appending values to the tuple in visit
order. In the case of object `for`, the pair of expressions is used as an
attribute name and value respectively, creating an element in the resulting
object.
In the case of object `for`, it is an error if two input elements produce
the same result from the attribute name expression, since duplicate
attributes are not possible. If the ellipsis symbol (`...`) appears
immediately after the value expression, this activates the grouping mode in
which each value in the resulting object is a _tuple_ of all of the values
that were produced against each distinct key.
* `[for v in ["a", "b"]: v]` returns `["a", "b"]`.
* `[for i, v in ["a", "b"]: i]` returns `[0, 1]`.
* `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`.
* `{for i, v in ["a", "a", "b"]: k => v}` produces an error, because attribute
`a` is defined twice.
* `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`.
If the `if` keyword is used after the element expression(s), it applies an
additional predicate that can be used to conditionally filter elements from
the source collection from consideration. The expression following `if` is
evaluated once for each source element, in the same scope used for the
element expression(s). It must evaluate to a boolean value; if `true`, the
element will be evaluated as normal, while if `false` the element will be
skipped.
* `[for i, v in ["a", "b", "c"]: v if i < 2]` returns `["a", "b"]`.
If the collection value, element expression(s) or condition expression return
unknown values that are otherwise type-valid, the result is a value of the
dynamic pseudo-type.
### Index Operator
The _index_ operator returns the value of a single element of a collection
value. It is a postfix operator and can be applied to any value that has
a tuple, object, map, or list type.
```ebnf
Index = "[" Expression "]";
```
The expression delimited by the brackets is the _key_ by which an element
will be looked up.
If the index operator is applied to a value of tuple or list type, the
key expression must be an non-negative integer number representing the
zero-based element index to access. If applied to a value of object or map
type, the key expression must be a string representing the attribute name
or element key. If the given key value is not of the appropriate type, a
conversion is attempted using the conversion rules from the HCL
syntax-agnostic information model.
An error is produced if the given key expression does not correspond to
an element in the collection, either because it is of an unconvertable type,
because it is outside the range of elements for a tuple or list, or because
the given attribute or key does not exist.
If either the collection or the key are an unknown value of an
otherwise-suitable type, the return value is an unknown value whose type
matches what type would be returned given known values, or a value of the
dynamic pseudo-type if type information alone cannot determine a suitable
return type.
Within the brackets that delimit the index key, newline sequences are ignored
as whitespace.
### Attribute Access Operator
The _attribute access_ operator returns the value of a single attribute in
an object value. It is a postfix operator and can be applied to any value
that has an object type.
```ebnf
GetAttr = "." Identifier;
```
The given identifier is interpreted as the name of the attribute to access.
An error is produced if the object to which the operator is applied does not
have an attribute with the given name.
If the object is an unknown value of a type that has the attribute named, the
result is an unknown value of the attribute's type.
### Splat Operators
The _splat operators_ allow convenient access to attributes or elements of
elements in a tuple, list, or set value.
There are two kinds of "splat" operator:
* The _attribute-only_ splat operator supports only attribute lookups into
the elements from a list, but supports an arbitrary number of them.
* The _full_ splat operator additionally supports indexing into the elements
from a list, and allows any combination of attribute access and index
operations.
```ebnf
Splat = attrSplat | fullSplat;
attrSplat = "." "*" GetAttr*;
fullSplat = "[" "*" "]" (GetAttr | Index)*;
```
The splat operators can be thought of as shorthands for common operations that
could otherwise be performed using _for expressions_:
* `tuple.*.foo.bar[0]` is approximately equivalent to
`[for v in tuple: v.foo.bar][0]`.
* `tuple[*].foo.bar[0]` is approximately equivalent to
`[for v in tuple: v.foo.bar[0]]`
Note the difference in how the trailing index operator is interpreted in
each case. This different interpretation is the key difference between the
_attribute-only_ and _full_ splat operators.
Splat operators have one additional behavior compared to the equivalent
_for expressions_ shown above: if a splat operator is applied to a value that
is _not_ of tuple, list, or set type, the value is coerced automatically into
a single-value list of the value type:
* `any_object.*.id` is equivalent to `[any_object.id]`, assuming that `any_object`
is a single object.
* `any_number.*` is equivalent to `[any_number]`, assuming that `any_number`
is a single number.
If the left operand of a splat operator is an unknown value of any type, the
result is a value of the dynamic pseudo-type.
### Operations
Operations apply a particular operator to either one or two expression terms.
```ebnf
Operation = unaryOp | binaryOp;
unaryOp = ("-" | "!") ExprTerm;
binaryOp = ExprTerm binaryOperator ExprTerm;
binaryOperator = compareOperator | arithmeticOperator | logicOperator;
compareOperator = "==" | "!=" | "<" | ">" | "<=" | ">=";
arithmeticOperator = "+" | "-" | "*" | "/" | "%";
logicOperator = "&&" | "||" | "!";
```
The unary operators have the highest precedence.
The binary operators are grouped into the following precedence levels:
```
Level Operators
6 * / %
5 + -
4 > >= < <=
3 == !=
2 &&
1 ||
```
Higher values of "level" bind tighter. Operators within the same precedence
level have left-to-right associativity. For example, `x / y * z` is equivalent
to `(x / y) * z`.
### Comparison Operators
Comparison operators always produce boolean values, as a result of testing
the relationship between two values.
The two equality operators apply to values of any type:
```
a == b equal
a != b not equal
```
Two values are equal if the are of identical types and their values are
equal as defined in the HCL syntax-agnostic information model. The equality
operators are commutative and opposite, such that `(a == b) == !(a != b)`
and `(a == b) == (b == a)` for all values `a` and `b`.
The four numeric comparison operators apply only to numbers:
```
a < b less than
a <= b less than or equal to
a > b greater than
a >= b greater than or equal to
```
If either operand of a comparison operator is a correctly-typed unknown value
or a value of the dynamic pseudo-type, the result is an unknown boolean.
### Arithmetic Operators
Arithmetic operators apply only to number values and always produce number
values as results.
```
a + b sum (addition)
a - b difference (subtraction)
a * b product (multiplication)
a / b quotient (division)
a % b remainder (modulo)
-a negation
```
Arithmetic operations are considered to be performed in an arbitrary-precision
number space.
If either operand of an arithmetic operator is an unknown number or a value
of the dynamic pseudo-type, the result is an unknown number.
### Logic Operators
Logic operators apply only to boolean values and always produce boolean values
as results.
```
a && b logical AND
a || b logical OR
!a logical NOT
```
If either operand of a logic operator is an unknown bool value or a value
of the dynamic pseudo-type, the result is an unknown bool value.
### Conditional Operator
The conditional operator allows selecting from one of two expressions based on
the outcome of a boolean expression.
```ebnf
Conditional = Expression "?" Expression ":" Expression;
```
The first expression is the _predicate_, which is evaluated and must produce
a boolean result. If the predicate value is `true`, the result of the second
expression is the result of the conditional. If the predicate value is
`false`, the result of the third expression is the result of the conditional.
The second and third expressions must be of the same type or must be able to
unify into a common type using the type unification rules defined in the
HCL syntax-agnostic information model. This unified type is the result type
of the conditional, with both expressions converted as necessary to the
unified type.
If the predicate is an unknown boolean value or a value of the dynamic
pseudo-type then the result is an unknown value of the unified type of the
other two expressions.
If either the second or third expressions produce errors when evaluated,
these errors are passed through only if the erroneous expression is selected.
This allows for expressions such as
`length(some_list) > 0 ? some_list[0] : default` (given some suitable `length`
function) without producing an error when the predicate is `false`.
## Templates
The template sub-language is used within template expressions to concisely
combine strings and other values to produce other strings. It can also be
used in isolation as a standalone template language.
```ebnf
Template = (
TemplateLiteral |
TemplateInterpolation |
TemplateDirective
)*
TemplateDirective = TemplateIf | TemplateFor;
```
A template behaves like an expression that always returns a string value.
The different elements of the template are evaluated and combined into a
single string to return. If any of the elements produce an unknown string
or a value of the dynamic pseudo-type, the result is an unknown string.
An important use-case for standalone templates is to enable the use of
expressions in alternative HCL syntaxes where a native expression grammar is
not available. For example, the HCL JSON profile treats the values of JSON
strings as standalone templates when attributes are evaluated in expression
mode.
### Template Literals
A template literal is a literal sequence of characters to include in the
resulting string. When the template sub-language is used standalone, a
template literal can contain any unicode character, with the exception
of the sequences that introduce interpolations and directives, and for the
sequences that escape those introductions.
The interpolation and directive introductions are escaped by doubling their
leading characters. The `${` sequence is escaped as `$${` and the `%{`
sequence is escaped as `%%{`.
When the template sub-language is embedded in the expression language via
_template expressions_, additional constraints and transforms are applied to
template literals as described in the definition of template expressions.
The value of a template literal can be modified by _strip markers_ in any
interpolations or directives that are adjacent to it. A strip marker is
a tilde (`~`) placed immediately after the opening `{` or before the closing
`}` of a template sequence:
* `hello ${~ "world" }` produces `"helloworld"`.
* `%{ if true ~} hello %{~ endif }` produces `"hello"`.
When a strip marker is present, any spaces adjacent to it in the corresponding
string literal (if any) are removed before producing the final value. Space
characters are interpreted as per Unicode's definition.
Stripping is done at syntax level rather than value level. Values returned
by interpolations or directives are not subject to stripping:
* `${"hello" ~}${" world"}` produces `"hello world"`, and not `"helloworld"`,
because the space is not in a template literal directly adjacent to the
strip marker.
### Template Interpolations
An _interpolation sequence_ evaluates an expression (written in the
expression sub-language), converts the result to a string value, and
replaces itself with the resulting string.
```ebnf
TemplateInterpolation = ("${" | "${~") Expression ("}" | "~}";
```
If the expression result cannot be converted to a string, an error is
produced.
### Template If Directive
The template `if` directive is the template equivalent of the
_conditional expression_, allowing selection of one of two sub-templates based
on the value of a predicate expression.
```ebnf
TemplateIf = (
("%{" | "%{~") "if" Expression ("}" | "~}")
Template
(
("%{" | "%{~") "else" ("}" | "~}")
Template
)?
("%{" | "%{~") "endif" ("}" | "~}")
);
```
The evaluation of the `if` directive is equivalent to the conditional
expression, with the following exceptions:
* The two sub-templates always produce strings, and thus the result value is
also always a string.
* The `else` clause may be omitted, in which case the conditional's third
expression result is implied to be the empty string.
### Template For Directive
The template `for` directive is the template equivalent of the _for expression_,
producing zero or more copies of its sub-template based on the elements of
a collection.
```ebnf
TemplateFor = (
("%{" | "%{~") "for" Identifier ("," Identifier) "in" Expression ("}" | "~}")
Template
("%{" | "%{~") "endfor" ("}" | "~}")
);
```
The evaluation of the `for` directive is equivalent to the _for expression_
when producing a tuple, with the following exceptions:
* The sub-template always produces a string.
* There is no equivalent of the "if" clause on the for expression.
* The elements of the resulting tuple are all converted to strings and
concatenated to produce a flat string result.
### Template Interpolation Unwrapping
As a special case, a template that consists only of a single interpolation,
with no surrounding literals, directives or other interpolations, is
"unwrapped". In this case, the result of the interpolation expression is
returned verbatim, without conversion to string.
This special case exists primarily to enable the native template language
to be used inside strings in alternative HCL syntaxes that lack a first-class
template or expression syntax. Unwrapping allows arbitrary expressions to be
used to populate attributes when strings in such languages are interpreted
as templates.
* `${true}` produces the boolean value `true`
* `${"${true}"}` produces the boolean value `true`, because both the inner
and outer interpolations are subject to unwrapping.
* `hello ${true}` produces the string `"hello true"`
* `${""}${true}` produces the string `"true"` because there are two
interpolation sequences, even though one produces an empty result.
* `%{ for v in [true] }${v}%{ endif }` produces the string `true` because
the presence of the `for` directive circumvents the unwrapping even though
the final result is a single value.
In some contexts this unwrapping behavior may be circumvented by the calling
application, by converting the final template result to string. This is
necessary, for example, if a standalone template is being used to produce
the direct contents of a file, since the result in that case must always be a
string.
## Static Analysis
The HCL static analysis operations are implemented for some expression types
in the native syntax, as described in the following sections.
A goal for static analysis of the native syntax is for the interpretation to
be as consistent as possible with the dynamic evaluation interpretation of
the given expression, though some deviations are intentionally made in order
to maximize the potential for analysis.
### Static List
The tuple construction syntax can be interpreted as a static list. All of
the expression elements given are returned as the static list elements,
with no further interpretation.
### Static Map
The object construction syntax can be interpreted as a static map. All of the
key/value pairs given are returned as the static pairs, with no further
interpretation.
The usual requirement that an attribute name be interpretable as a string
does not apply to this static analysis, allowing callers to provide map-like
constructs with different key types by building on the map syntax.
### Static Call
The function call syntax can be interpreted as a static call. The called
function name is returned verbatim and the given argument expressions are
returned as the static arguments, with no further interpretation.
### Static Traversal
A variable expression and any attached attribute access operations and
constant index operations can be interpreted as a static traversal.
The keywords `true`, `false` and `null` can also be interpreted as
static traversals, behaving as if they were references to variables of those
names, to allow callers to redefine the meaning of those keywords in certain
contexts.

View file

@ -0,0 +1,386 @@
package hclsyntax
import (
"fmt"
"strings"
"github.com/hashicorp/hcl2/hcl"
)
// AsHCLBlock returns the block data expressed as a *hcl.Block.
func (b *Block) AsHCLBlock() *hcl.Block {
if b == nil {
return nil
}
lastHeaderRange := b.TypeRange
if len(b.LabelRanges) > 0 {
lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1]
}
return &hcl.Block{
Type: b.Type,
Labels: b.Labels,
Body: b.Body,
DefRange: hcl.RangeBetween(b.TypeRange, lastHeaderRange),
TypeRange: b.TypeRange,
LabelRanges: b.LabelRanges,
}
}
// Body is the implementation of hcl.Body for the HCL native syntax.
type Body struct {
Attributes Attributes
Blocks Blocks
// These are used with PartialContent to produce a "remaining items"
// body to return. They are nil on all bodies fresh out of the parser.
hiddenAttrs map[string]struct{}
hiddenBlocks map[string]struct{}
SrcRange hcl.Range
EndRange hcl.Range // Final token of the body, for reporting missing items
}
// Assert that *Body implements hcl.Body
var assertBodyImplBody hcl.Body = &Body{}
func (b *Body) walkChildNodes(w internalWalkFunc) {
b.Attributes = w(b.Attributes).(Attributes)
b.Blocks = w(b.Blocks).(Blocks)
}
func (b *Body) Range() hcl.Range {
return b.SrcRange
}
func (b *Body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
content, remainHCL, diags := b.PartialContent(schema)
// No we'll see if anything actually remains, to produce errors about
// extraneous items.
remain := remainHCL.(*Body)
for name, attr := range b.Attributes {
if _, hidden := remain.hiddenAttrs[name]; !hidden {
var suggestions []string
for _, attrS := range schema.Attributes {
if _, defined := content.Attributes[attrS.Name]; defined {
continue
}
suggestions = append(suggestions, attrS.Name)
}
suggestion := nameSuggestion(name, suggestions)
if suggestion != "" {
suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
} else {
// Is there a block of the same name?
for _, blockS := range schema.Blocks {
if blockS.Type == name {
suggestion = fmt.Sprintf(" Did you mean to define a block of type %q?", name)
break
}
}
}
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unsupported attribute",
Detail: fmt.Sprintf("An attribute named %q is not expected here.%s", name, suggestion),
Subject: &attr.NameRange,
})
}
}
for _, block := range b.Blocks {
blockTy := block.Type
if _, hidden := remain.hiddenBlocks[blockTy]; !hidden {
var suggestions []string
for _, blockS := range schema.Blocks {
suggestions = append(suggestions, blockS.Type)
}
suggestion := nameSuggestion(blockTy, suggestions)
if suggestion != "" {
suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
} else {
// Is there an attribute of the same name?
for _, attrS := range schema.Attributes {
if attrS.Name == blockTy {
suggestion = fmt.Sprintf(" Did you mean to define attribute %q?", blockTy)
break
}
}
}
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unsupported block type",
Detail: fmt.Sprintf("Blocks of type %q are not expected here.%s", blockTy, suggestion),
Subject: &block.TypeRange,
})
}
}
return content, diags
}
func (b *Body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
attrs := make(hcl.Attributes)
var blocks hcl.Blocks
var diags hcl.Diagnostics
hiddenAttrs := make(map[string]struct{})
hiddenBlocks := make(map[string]struct{})
if b.hiddenAttrs != nil {
for k, v := range b.hiddenAttrs {
hiddenAttrs[k] = v
}
}
if b.hiddenBlocks != nil {
for k, v := range b.hiddenBlocks {
hiddenBlocks[k] = v
}
}
for _, attrS := range schema.Attributes {
name := attrS.Name
attr, exists := b.Attributes[name]
_, hidden := hiddenAttrs[name]
if hidden || !exists {
if attrS.Required {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing required attribute",
Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name),
Subject: b.MissingItemRange().Ptr(),
})
}
continue
}
hiddenAttrs[name] = struct{}{}
attrs[name] = attr.AsHCLAttribute()
}
blocksWanted := make(map[string]hcl.BlockHeaderSchema)
for _, blockS := range schema.Blocks {
blocksWanted[blockS.Type] = blockS
}
for _, block := range b.Blocks {
if _, hidden := hiddenBlocks[block.Type]; hidden {
continue
}
blockS, wanted := blocksWanted[block.Type]
if !wanted {
continue
}
if len(block.Labels) > len(blockS.LabelNames) {
name := block.Type
if len(blockS.LabelNames) == 0 {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Extraneous label for %s", name),
Detail: fmt.Sprintf(
"No labels are expected for %s blocks.", name,
),
Subject: block.LabelRanges[0].Ptr(),
Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(),
})
} else {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Extraneous label for %s", name),
Detail: fmt.Sprintf(
"Only %d labels (%s) are expected for %s blocks.",
len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "), name,
),
Subject: block.LabelRanges[len(blockS.LabelNames)].Ptr(),
Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(),
})
}
continue
}
if len(block.Labels) < len(blockS.LabelNames) {
name := block.Type
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Missing %s for %s", blockS.LabelNames[len(block.Labels)], name),
Detail: fmt.Sprintf(
"All %s blocks must have %d labels (%s).",
name, len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "),
),
Subject: &block.OpenBraceRange,
Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(),
})
continue
}
blocks = append(blocks, block.AsHCLBlock())
}
// We hide blocks only after we've processed all of them, since otherwise
// we can't process more than one of the same type.
for _, blockS := range schema.Blocks {
hiddenBlocks[blockS.Type] = struct{}{}
}
remain := &Body{
Attributes: b.Attributes,
Blocks: b.Blocks,
hiddenAttrs: hiddenAttrs,
hiddenBlocks: hiddenBlocks,
SrcRange: b.SrcRange,
EndRange: b.EndRange,
}
return &hcl.BodyContent{
Attributes: attrs,
Blocks: blocks,
MissingItemRange: b.MissingItemRange(),
}, remain, diags
}
func (b *Body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
attrs := make(hcl.Attributes)
var diags hcl.Diagnostics
if len(b.Blocks) > 0 {
example := b.Blocks[0]
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: fmt.Sprintf("Unexpected %s block", example.Type),
Detail: "Blocks are not allowed here.",
Context: &example.TypeRange,
})
// we will continue processing anyway, and return the attributes
// we are able to find so that certain analyses can still be done
// in the face of errors.
}
if b.Attributes == nil {
return attrs, diags
}
for name, attr := range b.Attributes {
if _, hidden := b.hiddenAttrs[name]; hidden {
continue
}
attrs[name] = attr.AsHCLAttribute()
}
return attrs, diags
}
func (b *Body) MissingItemRange() hcl.Range {
return b.EndRange
}
// Attributes is the collection of attribute definitions within a body.
type Attributes map[string]*Attribute
func (a Attributes) walkChildNodes(w internalWalkFunc) {
for k, attr := range a {
a[k] = w(attr).(*Attribute)
}
}
// Range returns the range of some arbitrary point within the set of
// attributes, or an invalid range if there are no attributes.
//
// This is provided only to complete the Node interface, but has no practical
// use.
func (a Attributes) Range() hcl.Range {
// An attributes doesn't really have a useful range to report, since
// it's just a grouping construct. So we'll arbitrarily take the
// range of one of the attributes, or produce an invalid range if we have
// none. In practice, there's little reason to ask for the range of
// an Attributes.
for _, attr := range a {
return attr.Range()
}
return hcl.Range{
Filename: "<unknown>",
}
}
// Attribute represents a single attribute definition within a body.
type Attribute struct {
Name string
Expr Expression
SrcRange hcl.Range
NameRange hcl.Range
EqualsRange hcl.Range
}
func (a *Attribute) walkChildNodes(w internalWalkFunc) {
a.Expr = w(a.Expr).(Expression)
}
func (a *Attribute) Range() hcl.Range {
return a.SrcRange
}
// AsHCLAttribute returns the block data expressed as a *hcl.Attribute.
func (a *Attribute) AsHCLAttribute() *hcl.Attribute {
if a == nil {
return nil
}
return &hcl.Attribute{
Name: a.Name,
Expr: a.Expr,
Range: a.SrcRange,
NameRange: a.NameRange,
}
}
// Blocks is the list of nested blocks within a body.
type Blocks []*Block
func (bs Blocks) walkChildNodes(w internalWalkFunc) {
for i, block := range bs {
bs[i] = w(block).(*Block)
}
}
// Range returns the range of some arbitrary point within the list of
// blocks, or an invalid range if there are no blocks.
//
// This is provided only to complete the Node interface, but has no practical
// use.
func (bs Blocks) Range() hcl.Range {
if len(bs) > 0 {
return bs[0].Range()
}
return hcl.Range{
Filename: "<unknown>",
}
}
// Block represents a nested block structure
type Block struct {
Type string
Labels []string
Body *Body
TypeRange hcl.Range
LabelRanges []hcl.Range
OpenBraceRange hcl.Range
CloseBraceRange hcl.Range
}
func (b *Block) walkChildNodes(w internalWalkFunc) {
b.Body = w(b.Body).(*Body)
}
func (b *Block) Range() hcl.Range {
return hcl.RangeBetween(b.TypeRange, b.CloseBraceRange)
}

View file

@ -0,0 +1,118 @@
package hclsyntax
import (
"github.com/hashicorp/hcl2/hcl"
)
// -----------------------------------------------------------------------------
// The methods in this file are all optional extension methods that serve to
// implement the methods of the same name on *hcl.File when its root body
// is provided by this package.
// -----------------------------------------------------------------------------
// BlocksAtPos implements the method of the same name for an *hcl.File that
// is backed by a *Body.
func (b *Body) BlocksAtPos(pos hcl.Pos) []*hcl.Block {
list, _ := b.blocksAtPos(pos, true)
return list
}
// InnermostBlockAtPos implements the method of the same name for an *hcl.File
// that is backed by a *Body.
func (b *Body) InnermostBlockAtPos(pos hcl.Pos) *hcl.Block {
_, innermost := b.blocksAtPos(pos, false)
return innermost.AsHCLBlock()
}
// OutermostBlockAtPos implements the method of the same name for an *hcl.File
// that is backed by a *Body.
func (b *Body) OutermostBlockAtPos(pos hcl.Pos) *hcl.Block {
return b.outermostBlockAtPos(pos).AsHCLBlock()
}
// blocksAtPos is the internal engine of both BlocksAtPos and
// InnermostBlockAtPos, which both need to do the same logic but return a
// differently-shaped result.
//
// list is nil if makeList is false, avoiding an allocation. Innermost is
// always set, and if the returned list is non-nil it will always match the
// final element from that list.
func (b *Body) blocksAtPos(pos hcl.Pos, makeList bool) (list []*hcl.Block, innermost *Block) {
current := b
Blocks:
for current != nil {
for _, block := range current.Blocks {
wholeRange := hcl.RangeBetween(block.TypeRange, block.CloseBraceRange)
if wholeRange.ContainsPos(pos) {
innermost = block
if makeList {
list = append(list, innermost.AsHCLBlock())
}
current = block.Body
continue Blocks
}
}
// If we fall out here then none of the current body's nested blocks
// contain the position we are looking for, and so we're done.
break
}
return
}
// outermostBlockAtPos is the internal version of OutermostBlockAtPos that
// returns a hclsyntax.Block rather than an hcl.Block, allowing for further
// analysis if necessary.
func (b *Body) outermostBlockAtPos(pos hcl.Pos) *Block {
// This is similar to blocksAtPos, but simpler because we know it only
// ever needs to search the first level of nested blocks.
for _, block := range b.Blocks {
wholeRange := hcl.RangeBetween(block.TypeRange, block.CloseBraceRange)
if wholeRange.ContainsPos(pos) {
return block
}
}
return nil
}
// AttributeAtPos implements the method of the same name for an *hcl.File
// that is backed by a *Body.
func (b *Body) AttributeAtPos(pos hcl.Pos) *hcl.Attribute {
return b.attributeAtPos(pos).AsHCLAttribute()
}
// attributeAtPos is the internal version of AttributeAtPos that returns a
// hclsyntax.Block rather than an hcl.Block, allowing for further analysis if
// necessary.
func (b *Body) attributeAtPos(pos hcl.Pos) *Attribute {
searchBody := b
_, block := b.blocksAtPos(pos, false)
if block != nil {
searchBody = block.Body
}
for _, attr := range searchBody.Attributes {
if attr.SrcRange.ContainsPos(pos) {
return attr
}
}
return nil
}
// OutermostExprAtPos implements the method of the same name for an *hcl.File
// that is backed by a *Body.
func (b *Body) OutermostExprAtPos(pos hcl.Pos) hcl.Expression {
attr := b.attributeAtPos(pos)
if attr == nil {
return nil
}
if !attr.Expr.Range().ContainsPos(pos) {
return nil
}
return attr.Expr
}

272
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go generated vendored Normal file
View file

@ -0,0 +1,272 @@
package hclsyntax
import (
"fmt"
"github.com/apparentlymart/go-textseg/textseg"
"github.com/hashicorp/hcl2/hcl"
)
// Token represents a sequence of bytes from some HCL code that has been
// tagged with a type and its range within the source file.
type Token struct {
Type TokenType
Bytes []byte
Range hcl.Range
}
// Tokens is a slice of Token.
type Tokens []Token
// TokenType is an enumeration used for the Type field on Token.
type TokenType rune
const (
// Single-character tokens are represented by their own character, for
// convenience in producing these within the scanner. However, the values
// are otherwise arbitrary and just intended to be mnemonic for humans
// who might see them in debug output.
TokenOBrace TokenType = '{'
TokenCBrace TokenType = '}'
TokenOBrack TokenType = '['
TokenCBrack TokenType = ']'
TokenOParen TokenType = '('
TokenCParen TokenType = ')'
TokenOQuote TokenType = '«'
TokenCQuote TokenType = '»'
TokenOHeredoc TokenType = 'H'
TokenCHeredoc TokenType = 'h'
TokenStar TokenType = '*'
TokenSlash TokenType = '/'
TokenPlus TokenType = '+'
TokenMinus TokenType = '-'
TokenPercent TokenType = '%'
TokenEqual TokenType = '='
TokenEqualOp TokenType = '≔'
TokenNotEqual TokenType = '≠'
TokenLessThan TokenType = '<'
TokenLessThanEq TokenType = '≤'
TokenGreaterThan TokenType = '>'
TokenGreaterThanEq TokenType = '≥'
TokenAnd TokenType = '∧'
TokenOr TokenType = ''
TokenBang TokenType = '!'
TokenDot TokenType = '.'
TokenComma TokenType = ','
TokenEllipsis TokenType = '…'
TokenFatArrow TokenType = '⇒'
TokenQuestion TokenType = '?'
TokenColon TokenType = ':'
TokenTemplateInterp TokenType = '∫'
TokenTemplateControl TokenType = 'λ'
TokenTemplateSeqEnd TokenType = '∎'
TokenQuotedLit TokenType = 'Q' // might contain backslash escapes
TokenStringLit TokenType = 'S' // cannot contain backslash escapes
TokenNumberLit TokenType = 'N'
TokenIdent TokenType = 'I'
TokenComment TokenType = 'C'
TokenNewline TokenType = '\n'
TokenEOF TokenType = '␄'
// The rest are not used in the language but recognized by the scanner so
// we can generate good diagnostics in the parser when users try to write
// things that might work in other languages they are familiar with, or
// simply make incorrect assumptions about the HCL language.
TokenBitwiseAnd TokenType = '&'
TokenBitwiseOr TokenType = '|'
TokenBitwiseNot TokenType = '~'
TokenBitwiseXor TokenType = '^'
TokenStarStar TokenType = '➚'
TokenBacktick TokenType = '`'
TokenSemicolon TokenType = ';'
TokenTabs TokenType = '␉'
TokenInvalid TokenType = '<27>'
TokenBadUTF8 TokenType = '💩'
// TokenNil is a placeholder for when a token is required but none is
// available, e.g. when reporting errors. The scanner will never produce
// this as part of a token stream.
TokenNil TokenType = '\x00'
)
func (t TokenType) GoString() string {
return fmt.Sprintf("hclsyntax.%s", t.String())
}
type scanMode int
const (
scanNormal scanMode = iota
scanTemplate
scanIdentOnly
)
type tokenAccum struct {
Filename string
Bytes []byte
Pos hcl.Pos
Tokens []Token
}
func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) {
// Walk through our buffer to figure out how much we need to adjust
// the start pos to get our end pos.
start := f.Pos
start.Column += startOfs - f.Pos.Byte // Safe because only ASCII spaces can be in the offset
start.Byte = startOfs
end := start
end.Byte = endOfs
b := f.Bytes[startOfs:endOfs]
for len(b) > 0 {
advance, seq, _ := textseg.ScanGraphemeClusters(b, true)
if (len(seq) == 1 && seq[0] == '\n') || (len(seq) == 2 && seq[0] == '\r' && seq[1] == '\n') {
end.Line++
end.Column = 1
} else {
end.Column++
}
b = b[advance:]
}
f.Pos = end
f.Tokens = append(f.Tokens, Token{
Type: ty,
Bytes: f.Bytes[startOfs:endOfs],
Range: hcl.Range{
Filename: f.Filename,
Start: start,
End: end,
},
})
}
type heredocInProgress struct {
Marker []byte
StartOfLine bool
}
// checkInvalidTokens does a simple pass across the given tokens and generates
// diagnostics for tokens that should _never_ appear in HCL source. This
// is intended to avoid the need for the parser to have special support
// for them all over.
//
// Returns a diagnostics with no errors if everything seems acceptable.
// Otherwise, returns zero or more error diagnostics, though tries to limit
// repetition of the same information.
func checkInvalidTokens(tokens Tokens) hcl.Diagnostics {
var diags hcl.Diagnostics
toldBitwise := 0
toldExponent := 0
toldBacktick := 0
toldSemicolon := 0
toldTabs := 0
toldBadUTF8 := 0
for _, tok := range tokens {
switch tok.Type {
case TokenBitwiseAnd, TokenBitwiseOr, TokenBitwiseXor, TokenBitwiseNot:
if toldBitwise < 4 {
var suggestion string
switch tok.Type {
case TokenBitwiseAnd:
suggestion = " Did you mean boolean AND (\"&&\")?"
case TokenBitwiseOr:
suggestion = " Did you mean boolean OR (\"&&\")?"
case TokenBitwiseNot:
suggestion = " Did you mean boolean NOT (\"!\")?"
}
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unsupported operator",
Detail: fmt.Sprintf("Bitwise operators are not supported.%s", suggestion),
Subject: &tok.Range,
})
toldBitwise++
}
case TokenStarStar:
if toldExponent < 1 {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unsupported operator",
Detail: "\"**\" is not a supported operator. Exponentiation is not supported as an operator.",
Subject: &tok.Range,
})
toldExponent++
}
case TokenBacktick:
// Only report for alternating (even) backticks, so we won't report both start and ends of the same
// backtick-quoted string.
if toldExponent < 4 && (toldExponent%2) == 0 {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid character",
Detail: "The \"`\" character is not valid. To create a multi-line string, use the \"heredoc\" syntax, like \"<<EOT\".",
Subject: &tok.Range,
})
toldBacktick++
}
case TokenSemicolon:
if toldSemicolon < 1 {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid character",
Detail: "The \";\" character is not valid. Use newlines to separate arguments and blocks, and commas to separate items in collection values.",
Subject: &tok.Range,
})
toldSemicolon++
}
case TokenTabs:
if toldTabs < 1 {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid character",
Detail: "Tab characters may not be used. The recommended indentation style is two spaces per indent.",
Subject: &tok.Range,
})
toldTabs++
}
case TokenBadUTF8:
if toldBadUTF8 < 1 {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid character encoding",
Detail: "All input files must be UTF-8 encoded. Ensure that UTF-8 encoding is selected in your editor.",
Subject: &tok.Range,
})
toldBadUTF8++
}
case TokenInvalid:
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid character",
Detail: "This character is not used within the language.",
Subject: &tok.Range,
})
toldTabs++
}
}
return diags
}

View file

@ -0,0 +1,69 @@
// Code generated by "stringer -type TokenType -output token_type_string.go"; DO NOT EDIT.
package hclsyntax
import "strconv"
const _TokenType_name = "TokenNilTokenNewlineTokenBangTokenPercentTokenBitwiseAndTokenOParenTokenCParenTokenStarTokenPlusTokenCommaTokenMinusTokenDotTokenSlashTokenColonTokenSemicolonTokenLessThanTokenEqualTokenGreaterThanTokenQuestionTokenCommentTokenOHeredocTokenIdentTokenNumberLitTokenQuotedLitTokenStringLitTokenOBrackTokenCBrackTokenBitwiseXorTokenBacktickTokenCHeredocTokenOBraceTokenBitwiseOrTokenCBraceTokenBitwiseNotTokenOQuoteTokenCQuoteTokenTemplateControlTokenEllipsisTokenFatArrowTokenTemplateSeqEndTokenAndTokenOrTokenTemplateInterpTokenEqualOpTokenNotEqualTokenLessThanEqTokenGreaterThanEqTokenEOFTokenTabsTokenStarStarTokenInvalidTokenBadUTF8"
var _TokenType_map = map[TokenType]string{
0: _TokenType_name[0:8],
10: _TokenType_name[8:20],
33: _TokenType_name[20:29],
37: _TokenType_name[29:41],
38: _TokenType_name[41:56],
40: _TokenType_name[56:67],
41: _TokenType_name[67:78],
42: _TokenType_name[78:87],
43: _TokenType_name[87:96],
44: _TokenType_name[96:106],
45: _TokenType_name[106:116],
46: _TokenType_name[116:124],
47: _TokenType_name[124:134],
58: _TokenType_name[134:144],
59: _TokenType_name[144:158],
60: _TokenType_name[158:171],
61: _TokenType_name[171:181],
62: _TokenType_name[181:197],
63: _TokenType_name[197:210],
67: _TokenType_name[210:222],
72: _TokenType_name[222:235],
73: _TokenType_name[235:245],
78: _TokenType_name[245:259],
81: _TokenType_name[259:273],
83: _TokenType_name[273:287],
91: _TokenType_name[287:298],
93: _TokenType_name[298:309],
94: _TokenType_name[309:324],
96: _TokenType_name[324:337],
104: _TokenType_name[337:350],
123: _TokenType_name[350:361],
124: _TokenType_name[361:375],
125: _TokenType_name[375:386],
126: _TokenType_name[386:401],
171: _TokenType_name[401:412],
187: _TokenType_name[412:423],
955: _TokenType_name[423:443],
8230: _TokenType_name[443:456],
8658: _TokenType_name[456:469],
8718: _TokenType_name[469:488],
8743: _TokenType_name[488:496],
8744: _TokenType_name[496:503],
8747: _TokenType_name[503:522],
8788: _TokenType_name[522:534],
8800: _TokenType_name[534:547],
8804: _TokenType_name[547:562],
8805: _TokenType_name[562:580],
9220: _TokenType_name[580:588],
9225: _TokenType_name[588:597],
10138: _TokenType_name[597:610],
65533: _TokenType_name[610:622],
128169: _TokenType_name[622:634],
}
func (i TokenType) String() string {
if str, ok := _TokenType_map[i]; ok {
return str
}
return "TokenType(" + strconv.FormatInt(int64(i), 10) + ")"
}

View file

@ -0,0 +1,335 @@
#!/usr/bin/env ruby
#
# This scripted has been updated to accept more command-line arguments:
#
# -u, --url URL to process
# -m, --machine Machine name
# -p, --properties Properties to add to the machine
# -o, --output Write output to file
#
# Updated by: Marty Schoch <marty.schoch@gmail.com>
#
# This script uses the unicode spec to generate a Ragel state machine
# that recognizes unicode alphanumeric characters. It generates 5
# character classes: uupper, ulower, ualpha, udigit, and ualnum.
# Currently supported encodings are UTF-8 [default] and UCS-4.
#
# Usage: unicode2ragel.rb [options]
# -e, --encoding [ucs4 | utf8] Data encoding
# -h, --help Show this message
#
# This script was originally written as part of the Ferret search
# engine library.
#
# Author: Rakan El-Khalil <rakan@well.com>
require 'optparse'
require 'open-uri'
ENCODINGS = [ :utf8, :ucs4 ]
ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" }
DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt"
DEFAULT_MACHINE_NAME= "WChar"
###
# Display vars & default option
TOTAL_WIDTH = 80
RANGE_WIDTH = 23
@encoding = :utf8
@chart_url = DEFAULT_CHART_URL
machine_name = DEFAULT_MACHINE_NAME
properties = []
@output = $stdout
###
# Option parsing
cli_opts = OptionParser.new do |opts|
opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o|
@encoding = o.downcase.to_sym
end
opts.on("-h", "--help", "Show this message") do
puts opts
exit
end
opts.on("-u", "--url URL", "URL to process") do |o|
@chart_url = o
end
opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o|
machine_name = o
end
opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o|
properties = o
end
opts.on("-o", "--output FILE", "output file") do |o|
@output = File.new(o, "w+")
end
end
cli_opts.parse(ARGV)
unless ENCODINGS.member? @encoding
puts "Invalid encoding: #{@encoding}"
puts cli_opts
exit
end
##
# Downloads the document at url and yields every alpha line's hex
# range and description.
def each_alpha( url, property )
open( url ) do |file|
file.each_line do |line|
next if line =~ /^#/;
next if line !~ /; #{property} #/;
range, description = line.split(/;/)
range.strip!
description.gsub!(/.*#/, '').strip!
if range =~ /\.\./
start, stop = range.split '..'
else start = stop = range
end
yield start.hex .. stop.hex, description
end
end
end
###
# Formats to hex at minimum width
def to_hex( n )
r = "%0X" % n
r = "0#{r}" unless (r.length % 2).zero?
r
end
###
# UCS4 is just a straight hex conversion of the unicode codepoint.
def to_ucs4( range )
rangestr = "0x" + to_hex(range.begin)
rangestr << "..0x" + to_hex(range.end) if range.begin != range.end
[ rangestr ]
end
##
# 0x00 - 0x7f -> 0zzzzzzz[7]
# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6]
# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6]
# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6]
UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff]
def to_utf8_enc( n )
r = 0
if n <= 0x7f
r = n
elsif n <= 0x7ff
y = 0xc0 | (n >> 6)
z = 0x80 | (n & 0x3f)
r = y << 8 | z
elsif n <= 0xffff
x = 0xe0 | (n >> 12)
y = 0x80 | (n >> 6) & 0x3f
z = 0x80 | n & 0x3f
r = x << 16 | y << 8 | z
elsif n <= 0x10ffff
w = 0xf0 | (n >> 18)
x = 0x80 | (n >> 12) & 0x3f
y = 0x80 | (n >> 6) & 0x3f
z = 0x80 | n & 0x3f
r = w << 24 | x << 16 | y << 8 | z
end
to_hex(r)
end
def from_utf8_enc( n )
n = n.hex
r = 0
if n <= 0x7f
r = n
elsif n <= 0xdfff
y = (n >> 8) & 0x1f
z = n & 0x3f
r = y << 6 | z
elsif n <= 0xefffff
x = (n >> 16) & 0x0f
y = (n >> 8) & 0x3f
z = n & 0x3f
r = x << 10 | y << 6 | z
elsif n <= 0xf7ffffff
w = (n >> 24) & 0x07
x = (n >> 16) & 0x3f
y = (n >> 8) & 0x3f
z = n & 0x3f
r = w << 18 | x << 12 | y << 6 | z
end
r
end
###
# Given a range, splits it up into ranges that can be continuously
# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff]
# This is not strictly needed since the current [5.1] unicode standard
# doesn't have ranges that straddle utf8 boundaries. This is included
# for completeness as there is no telling if that will ever change.
def utf8_ranges( range )
ranges = []
UTF8_BOUNDARIES.each do |max|
if range.begin <= max
if range.end <= max
ranges << range
return ranges
end
ranges << (range.begin .. max)
range = (max + 1) .. range.end
end
end
ranges
end
def build_range( start, stop )
size = start.size/2
left = size - 1
return [""] if size < 1
a = start[0..1]
b = stop[0..1]
###
# Shared prefix
if a == b
return build_range(start[2..-1], stop[2..-1]).map do |elt|
"0x#{a} " + elt
end
end
###
# Unshared prefix, end of run
return ["0x#{a}..0x#{b} "] if left.zero?
###
# Unshared prefix, not end of run
# Range can be 0x123456..0x56789A
# Which is equivalent to:
# 0x123456 .. 0x12FFFF
# 0x130000 .. 0x55FFFF
# 0x560000 .. 0x56789A
ret = []
ret << build_range(start, a + "FF" * left)
###
# Only generate middle range if need be.
if a.hex+1 != b.hex
max = to_hex(b.hex - 1)
max = "FF" if b == "FF"
ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left
end
###
# Don't generate last range if it is covered by first range
ret << build_range(b + "00" * left, stop) unless b == "FF"
ret.flatten!
end
def to_utf8( range )
utf8_ranges( range ).map do |r|
begin_enc = to_utf8_enc(r.begin)
end_enc = to_utf8_enc(r.end)
build_range begin_enc, end_enc
end.flatten!
end
##
# Perform a 3-way comparison of the number of codepoints advertised by
# the unicode spec for the given range, the originally parsed range,
# and the resulting utf8 encoded range.
def count_codepoints( code )
code.split(' ').inject(1) do |acc, elt|
if elt =~ /0x(.+)\.\.0x(.+)/
if @encoding == :utf8
acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1)
else
acc * ($2.hex - $1.hex + 1)
end
else
acc
end
end
end
def is_valid?( range, desc, codes )
spec_count = 1
spec_count = $1.to_i if desc =~ /\[(\d+)\]/
range_count = range.end - range.begin + 1
sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) }
sum == spec_count and sum == range_count
end
##
# Generate the state maching to stdout
def generate_machine( name, property )
pipe = " "
@output.puts " #{name} = "
each_alpha( @chart_url, property ) do |range, desc|
codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range)
#raise "Invalid encoding of range #{range}: #{codes.inspect}" unless
# is_valid? range, desc, codes
range_width = codes.map { |a| a.size }.max
range_width = RANGE_WIDTH if range_width < RANGE_WIDTH
desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11
desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH
if desc.size > desc_width
desc = desc[0..desc_width - 4] + "..."
end
codes.each_with_index do |r, idx|
desc = "" unless idx.zero?
code = "%-#{range_width}s" % r
@output.puts " #{pipe} #{code} ##{desc}"
pipe = "|"
end
end
@output.puts " ;"
@output.puts ""
end
@output.puts <<EOF
# The following Ragel file was autogenerated with #{$0}
# from: #{@chart_url}
#
# It defines #{properties}.
#
# To use this, make sure that your alphtype is set to #{ALPHTYPES[@encoding]},
# and that your input is in #{@encoding}.
%%{
machine #{machine_name};
EOF
properties.each { |x| generate_machine( x, x ) }
@output.puts <<EOF
}%%
EOF

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,86 @@
package hclsyntax
import (
"github.com/hashicorp/hcl2/hcl"
)
// Variables returns all of the variables referenced within a given experssion.
//
// This is the implementation of the "Variables" method on every native
// expression.
func Variables(expr Expression) []hcl.Traversal {
var vars []hcl.Traversal
walker := &variablesWalker{
Callback: func(t hcl.Traversal) {
vars = append(vars, t)
},
}
Walk(expr, walker)
return vars
}
// variablesWalker is a Walker implementation that calls its callback for any
// root scope traversal found while walking.
type variablesWalker struct {
Callback func(hcl.Traversal)
localScopes []map[string]struct{}
}
func (w *variablesWalker) Enter(n Node) hcl.Diagnostics {
switch tn := n.(type) {
case *ScopeTraversalExpr:
t := tn.Traversal
// Check if the given root name appears in any of the active
// local scopes. We don't want to return local variables here, since
// the goal of walking variables is to tell the calling application
// which names it needs to populate in the _root_ scope.
name := t.RootName()
for _, names := range w.localScopes {
if _, localized := names[name]; localized {
return nil
}
}
w.Callback(t)
case ChildScope:
w.localScopes = append(w.localScopes, tn.LocalNames)
}
return nil
}
func (w *variablesWalker) Exit(n Node) hcl.Diagnostics {
switch n.(type) {
case ChildScope:
// pop the latest local scope, assuming that the walker will
// behave symmetrically as promised.
w.localScopes = w.localScopes[:len(w.localScopes)-1]
}
return nil
}
// ChildScope is a synthetic AST node that is visited during a walk to
// indicate that its descendent will be evaluated in a child scope, which
// may mask certain variables from the parent scope as locals.
//
// ChildScope nodes don't really exist in the AST, but are rather synthesized
// on the fly during walk. Therefore it doesn't do any good to transform them;
// instead, transform either parent node that created a scope or the expression
// that the child scope struct wraps.
type ChildScope struct {
LocalNames map[string]struct{}
Expr *Expression // pointer because it can be replaced on walk
}
func (e ChildScope) walkChildNodes(w internalWalkFunc) {
*(e.Expr) = w(*(e.Expr)).(Expression)
}
// Range returns the range of the expression that the ChildScope is
// encapsulating. It isn't really very useful to call Range on a ChildScope.
func (e ChildScope) Range() hcl.Range {
return (*e.Expr).Range()
}

77
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/walk.go generated vendored Normal file
View file

@ -0,0 +1,77 @@
package hclsyntax
import (
"github.com/hashicorp/hcl2/hcl"
)
// VisitFunc is the callback signature for VisitAll.
type VisitFunc func(node Node) hcl.Diagnostics
// VisitAll is a basic way to traverse the AST beginning with a particular
// node. The given function will be called once for each AST node in
// depth-first order, but no context is provided about the shape of the tree.
//
// The VisitFunc may return diagnostics, in which case they will be accumulated
// and returned as a single set.
func VisitAll(node Node, f VisitFunc) hcl.Diagnostics {
diags := f(node)
node.walkChildNodes(func(node Node) Node {
diags = append(diags, VisitAll(node, f)...)
return node
})
return diags
}
// Walker is an interface used with Walk.
type Walker interface {
Enter(node Node) hcl.Diagnostics
Exit(node Node) hcl.Diagnostics
}
// Walk is a more complex way to traverse the AST starting with a particular
// node, which provides information about the tree structure via separate
// Enter and Exit functions.
func Walk(node Node, w Walker) hcl.Diagnostics {
diags := w.Enter(node)
node.walkChildNodes(func(node Node) Node {
diags = append(diags, Walk(node, w)...)
return node
})
return diags
}
// Transformer is an interface used with Transform
type Transformer interface {
// Transform accepts a node and returns a replacement node along with
// a flag for whether to also visit child nodes. If the flag is false,
// none of the child nodes will be visited and the TransformExit method
// will not be called for the node.
//
// It is acceptable and appropriate for Transform to return the same node
// it was given, for situations where no transform is needed.
Transform(node Node) (Node, bool, hcl.Diagnostics)
// TransformExit signals the end of transformations of child nodes of the
// given node. If Transform returned a new node, the given node is the
// node that was returned, rather than the node that was originally
// encountered.
TransformExit(node Node) hcl.Diagnostics
}
// Transform allows for in-place transformations of an AST starting with a
// particular node. The provider Transformer implementation drives the
// transformation process. The return value is the node that replaced the
// given top-level node.
func Transform(node Node, t Transformer) (Node, hcl.Diagnostics) {
newNode, descend, diags := t.Transform(node)
if !descend {
return newNode, diags
}
node.walkChildNodes(func(node Node) Node {
newNode, newDiags := Transform(node, t)
diags = append(diags, newDiags...)
return newNode
})
diags = append(diags, t.TransformExit(newNode)...)
return newNode, diags
}

121
vendor/github.com/hashicorp/hcl2/hcl/json/ast.go generated vendored Normal file
View file

@ -0,0 +1,121 @@
package json
import (
"math/big"
"github.com/hashicorp/hcl2/hcl"
)
type node interface {
Range() hcl.Range
StartRange() hcl.Range
}
type objectVal struct {
Attrs []*objectAttr
SrcRange hcl.Range // range of the entire object, brace-to-brace
OpenRange hcl.Range // range of the opening brace
CloseRange hcl.Range // range of the closing brace
}
func (n *objectVal) Range() hcl.Range {
return n.SrcRange
}
func (n *objectVal) StartRange() hcl.Range {
return n.OpenRange
}
type objectAttr struct {
Name string
Value node
NameRange hcl.Range // range of the name string
}
func (n *objectAttr) Range() hcl.Range {
return n.NameRange
}
func (n *objectAttr) StartRange() hcl.Range {
return n.NameRange
}
type arrayVal struct {
Values []node
SrcRange hcl.Range // range of the entire object, bracket-to-bracket
OpenRange hcl.Range // range of the opening bracket
}
func (n *arrayVal) Range() hcl.Range {
return n.SrcRange
}
func (n *arrayVal) StartRange() hcl.Range {
return n.OpenRange
}
type booleanVal struct {
Value bool
SrcRange hcl.Range
}
func (n *booleanVal) Range() hcl.Range {
return n.SrcRange
}
func (n *booleanVal) StartRange() hcl.Range {
return n.SrcRange
}
type numberVal struct {
Value *big.Float
SrcRange hcl.Range
}
func (n *numberVal) Range() hcl.Range {
return n.SrcRange
}
func (n *numberVal) StartRange() hcl.Range {
return n.SrcRange
}
type stringVal struct {
Value string
SrcRange hcl.Range
}
func (n *stringVal) Range() hcl.Range {
return n.SrcRange
}
func (n *stringVal) StartRange() hcl.Range {
return n.SrcRange
}
type nullVal struct {
SrcRange hcl.Range
}
func (n *nullVal) Range() hcl.Range {
return n.SrcRange
}
func (n *nullVal) StartRange() hcl.Range {
return n.SrcRange
}
// invalidVal is used as a placeholder where a value is needed for a valid
// parse tree but the input was invalid enough to prevent one from being
// created.
type invalidVal struct {
SrcRange hcl.Range
}
func (n invalidVal) Range() hcl.Range {
return n.SrcRange
}
func (n invalidVal) StartRange() hcl.Range {
return n.SrcRange
}

View file

@ -0,0 +1,33 @@
package json
import (
"github.com/agext/levenshtein"
)
var keywords = []string{"false", "true", "null"}
// keywordSuggestion tries to find a valid JSON keyword that is close to the
// given string and returns it if found. If no keyword is close enough, returns
// the empty string.
func keywordSuggestion(given string) string {
return nameSuggestion(given, keywords)
}
// nameSuggestion tries to find a name from the given slice of suggested names
// that is close to the given name and returns it if found. If no suggestion
// is close enough, returns the empty string.
//
// The suggestions are tried in order, so earlier suggestions take precedence
// if the given string is similar to two or more suggestions.
//
// This function is intended to be used with a relatively-small number of
// suggestions. It's not optimized for hundreds or thousands of them.
func nameSuggestion(given string, suggestions []string) string {
for _, suggestion := range suggestions {
dist := levenshtein.Distance(given, suggestion, nil)
if dist < 3 { // threshold determined experimentally
return suggestion
}
}
return ""
}

8
vendor/github.com/hashicorp/hcl2/hcl/json/doc.go generated vendored Normal file
View file

@ -0,0 +1,8 @@
// Package json is the JSON parser for HCL. It parses JSON files and returns
// implementations of the core HCL structural interfaces in terms of the
// JSON data inside.
//
// This is not a generic JSON parser. Instead, it deals with the mapping from
// the JSON information model to the HCL information model, using a number
// of hard-coded structural conventions.
package json

View file

@ -0,0 +1,70 @@
package json
import (
"fmt"
"strings"
)
type navigation struct {
root node
}
// Implementation of hcled.ContextString
func (n navigation) ContextString(offset int) string {
steps := navigationStepsRev(n.root, offset)
if steps == nil {
return ""
}
// We built our slice backwards, so we'll reverse it in-place now.
half := len(steps) / 2 // integer division
for i := 0; i < half; i++ {
steps[i], steps[len(steps)-1-i] = steps[len(steps)-1-i], steps[i]
}
ret := strings.Join(steps, "")
if len(ret) > 0 && ret[0] == '.' {
ret = ret[1:]
}
return ret
}
func navigationStepsRev(v node, offset int) []string {
switch tv := v.(type) {
case *objectVal:
// Do any of our properties have an object that contains the target
// offset?
for _, attr := range tv.Attrs {
k := attr.Name
av := attr.Value
switch av.(type) {
case *objectVal, *arrayVal:
// okay
default:
continue
}
if av.Range().ContainsOffset(offset) {
return append(navigationStepsRev(av, offset), "."+k)
}
}
case *arrayVal:
// Do any of our elements contain the target offset?
for i, elem := range tv.Values {
switch elem.(type) {
case *objectVal, *arrayVal:
// okay
default:
continue
}
if elem.Range().ContainsOffset(offset) {
return append(navigationStepsRev(elem, offset), fmt.Sprintf("[%d]", i))
}
}
}
return nil
}

491
vendor/github.com/hashicorp/hcl2/hcl/json/parser.go generated vendored Normal file
View file

@ -0,0 +1,491 @@
package json
import (
"encoding/json"
"fmt"
"math/big"
"github.com/hashicorp/hcl2/hcl"
)
func parseFileContent(buf []byte, filename string) (node, hcl.Diagnostics) {
tokens := scan(buf, pos{
Filename: filename,
Pos: hcl.Pos{
Byte: 0,
Line: 1,
Column: 1,
},
})
p := newPeeker(tokens)
node, diags := parseValue(p)
if len(diags) == 0 && p.Peek().Type != tokenEOF {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Extraneous data after value",
Detail: "Extra characters appear after the JSON value.",
Subject: p.Peek().Range.Ptr(),
})
}
return node, diags
}
func parseValue(p *peeker) (node, hcl.Diagnostics) {
tok := p.Peek()
wrapInvalid := func(n node, diags hcl.Diagnostics) (node, hcl.Diagnostics) {
if n != nil {
return n, diags
}
return invalidVal{tok.Range}, diags
}
switch tok.Type {
case tokenBraceO:
return wrapInvalid(parseObject(p))
case tokenBrackO:
return wrapInvalid(parseArray(p))
case tokenNumber:
return wrapInvalid(parseNumber(p))
case tokenString:
return wrapInvalid(parseString(p))
case tokenKeyword:
return wrapInvalid(parseKeyword(p))
case tokenBraceC:
return wrapInvalid(nil, hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: "Missing JSON value",
Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.",
Subject: &tok.Range,
},
})
case tokenBrackC:
return wrapInvalid(nil, hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: "Missing array element value",
Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.",
Subject: &tok.Range,
},
})
case tokenEOF:
return wrapInvalid(nil, hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: "Missing value",
Detail: "The JSON data ends prematurely.",
Subject: &tok.Range,
},
})
default:
return wrapInvalid(nil, hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: "Invalid start of value",
Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.",
Subject: &tok.Range,
},
})
}
}
func tokenCanStartValue(tok token) bool {
switch tok.Type {
case tokenBraceO, tokenBrackO, tokenNumber, tokenString, tokenKeyword:
return true
default:
return false
}
}
func parseObject(p *peeker) (node, hcl.Diagnostics) {
var diags hcl.Diagnostics
open := p.Read()
attrs := []*objectAttr{}
// recover is used to shift the peeker to what seems to be the end of
// our object, so that when we encounter an error we leave the peeker
// at a reasonable point in the token stream to continue parsing.
recover := func(tok token) {
open := 1
for {
switch tok.Type {
case tokenBraceO:
open++
case tokenBraceC:
open--
if open <= 1 {
return
}
case tokenEOF:
// Ran out of source before we were able to recover,
// so we'll bail here and let the caller deal with it.
return
}
tok = p.Read()
}
}
Token:
for {
if p.Peek().Type == tokenBraceC {
break Token
}
keyNode, keyDiags := parseValue(p)
diags = diags.Extend(keyDiags)
if keyNode == nil {
return nil, diags
}
keyStrNode, ok := keyNode.(*stringVal)
if !ok {
return nil, diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid object property name",
Detail: "A JSON object property name must be a string",
Subject: keyNode.StartRange().Ptr(),
})
}
key := keyStrNode.Value
colon := p.Read()
if colon.Type != tokenColon {
recover(colon)
if colon.Type == tokenBraceC || colon.Type == tokenComma {
// Catch common mistake of using braces instead of brackets
// for an object.
return nil, diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing object value",
Detail: "A JSON object attribute must have a value, introduced by a colon.",
Subject: &colon.Range,
})
}
if colon.Type == tokenEquals {
// Possible confusion with native HCL syntax.
return nil, diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing property value colon",
Detail: "JSON uses a colon as its name/value delimiter, not an equals sign.",
Subject: &colon.Range,
})
}
return nil, diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing property value colon",
Detail: "A colon must appear between an object property's name and its value.",
Subject: &colon.Range,
})
}
valNode, valDiags := parseValue(p)
diags = diags.Extend(valDiags)
if valNode == nil {
return nil, diags
}
attrs = append(attrs, &objectAttr{
Name: key,
Value: valNode,
NameRange: keyStrNode.SrcRange,
})
switch p.Peek().Type {
case tokenComma:
comma := p.Read()
if p.Peek().Type == tokenBraceC {
// Special error message for this common mistake
return nil, diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Trailing comma in object",
Detail: "JSON does not permit a trailing comma after the final property in an object.",
Subject: &comma.Range,
})
}
continue Token
case tokenEOF:
return nil, diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unclosed object",
Detail: "No closing brace was found for this JSON object.",
Subject: &open.Range,
})
case tokenBrackC:
// Consume the bracket anyway, so that we don't return with the peeker
// at a strange place.
p.Read()
return nil, diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Mismatched braces",
Detail: "A JSON object must be closed with a brace, not a bracket.",
Subject: p.Peek().Range.Ptr(),
})
case tokenBraceC:
break Token
default:
recover(p.Read())
return nil, diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing attribute seperator comma",
Detail: "A comma must appear between each property definition in an object.",
Subject: p.Peek().Range.Ptr(),
})
}
}
close := p.Read()
return &objectVal{
Attrs: attrs,
SrcRange: hcl.RangeBetween(open.Range, close.Range),
OpenRange: open.Range,
CloseRange: close.Range,
}, diags
}
func parseArray(p *peeker) (node, hcl.Diagnostics) {
var diags hcl.Diagnostics
open := p.Read()
vals := []node{}
// recover is used to shift the peeker to what seems to be the end of
// our array, so that when we encounter an error we leave the peeker
// at a reasonable point in the token stream to continue parsing.
recover := func(tok token) {
open := 1
for {
switch tok.Type {
case tokenBrackO:
open++
case tokenBrackC:
open--
if open <= 1 {
return
}
case tokenEOF:
// Ran out of source before we were able to recover,
// so we'll bail here and let the caller deal with it.
return
}
tok = p.Read()
}
}
Token:
for {
if p.Peek().Type == tokenBrackC {
break Token
}
valNode, valDiags := parseValue(p)
diags = diags.Extend(valDiags)
if valNode == nil {
return nil, diags
}
vals = append(vals, valNode)
switch p.Peek().Type {
case tokenComma:
comma := p.Read()
if p.Peek().Type == tokenBrackC {
// Special error message for this common mistake
return nil, diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Trailing comma in array",
Detail: "JSON does not permit a trailing comma after the final value in an array.",
Subject: &comma.Range,
})
}
continue Token
case tokenColon:
recover(p.Read())
return nil, diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid array value",
Detail: "A colon is not used to introduce values in a JSON array.",
Subject: p.Peek().Range.Ptr(),
})
case tokenEOF:
recover(p.Read())
return nil, diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Unclosed object",
Detail: "No closing bracket was found for this JSON array.",
Subject: &open.Range,
})
case tokenBraceC:
recover(p.Read())
return nil, diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Mismatched brackets",
Detail: "A JSON array must be closed with a bracket, not a brace.",
Subject: p.Peek().Range.Ptr(),
})
case tokenBrackC:
break Token
default:
recover(p.Read())
return nil, diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing attribute seperator comma",
Detail: "A comma must appear between each value in an array.",
Subject: p.Peek().Range.Ptr(),
})
}
}
close := p.Read()
return &arrayVal{
Values: vals,
SrcRange: hcl.RangeBetween(open.Range, close.Range),
OpenRange: open.Range,
}, diags
}
func parseNumber(p *peeker) (node, hcl.Diagnostics) {
tok := p.Read()
// Use encoding/json to validate the number syntax.
// TODO: Do this more directly to produce better diagnostics.
var num json.Number
err := json.Unmarshal(tok.Bytes, &num)
if err != nil {
return nil, hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: "Invalid JSON number",
Detail: fmt.Sprintf("There is a syntax error in the given JSON number."),
Subject: &tok.Range,
},
}
}
f, _, err := big.ParseFloat(string(num), 10, 512, big.ToNearestEven)
if err != nil {
// Should never happen if above passed, since JSON numbers are a subset
// of what big.Float can parse...
return nil, hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: "Invalid JSON number",
Detail: fmt.Sprintf("There is a syntax error in the given JSON number."),
Subject: &tok.Range,
},
}
}
return &numberVal{
Value: f,
SrcRange: tok.Range,
}, nil
}
func parseString(p *peeker) (node, hcl.Diagnostics) {
tok := p.Read()
var str string
err := json.Unmarshal(tok.Bytes, &str)
if err != nil {
var errRange hcl.Range
if serr, ok := err.(*json.SyntaxError); ok {
errOfs := serr.Offset
errPos := tok.Range.Start
errPos.Byte += int(errOfs)
// TODO: Use the byte offset to properly count unicode
// characters for the column, and mark the whole of the
// character that was wrong as part of our range.
errPos.Column += int(errOfs)
errEndPos := errPos
errEndPos.Byte++
errEndPos.Column++
errRange = hcl.Range{
Filename: tok.Range.Filename,
Start: errPos,
End: errEndPos,
}
} else {
errRange = tok.Range
}
var contextRange *hcl.Range
if errRange != tok.Range {
contextRange = &tok.Range
}
// FIXME: Eventually we should parse strings directly here so
// we can produce a more useful error message in the face fo things
// such as invalid escapes, etc.
return nil, hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: "Invalid JSON string",
Detail: fmt.Sprintf("There is a syntax error in the given JSON string."),
Subject: &errRange,
Context: contextRange,
},
}
}
return &stringVal{
Value: str,
SrcRange: tok.Range,
}, nil
}
func parseKeyword(p *peeker) (node, hcl.Diagnostics) {
tok := p.Read()
s := string(tok.Bytes)
switch s {
case "true":
return &booleanVal{
Value: true,
SrcRange: tok.Range,
}, nil
case "false":
return &booleanVal{
Value: false,
SrcRange: tok.Range,
}, nil
case "null":
return &nullVal{
SrcRange: tok.Range,
}, nil
case "undefined", "NaN", "Infinity":
return nil, hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: "Invalid JSON keyword",
Detail: fmt.Sprintf("The JavaScript identifier %q cannot be used in JSON.", s),
Subject: &tok.Range,
},
}
default:
var dym string
if suggest := keywordSuggestion(s); suggest != "" {
dym = fmt.Sprintf(" Did you mean %q?", suggest)
}
return nil, hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: "Invalid JSON keyword",
Detail: fmt.Sprintf("%q is not a valid JSON keyword.%s", s, dym),
Subject: &tok.Range,
},
}
}
}

25
vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go generated vendored Normal file
View file

@ -0,0 +1,25 @@
package json
type peeker struct {
tokens []token
pos int
}
func newPeeker(tokens []token) *peeker {
return &peeker{
tokens: tokens,
pos: 0,
}
}
func (p *peeker) Peek() token {
return p.tokens[p.pos]
}
func (p *peeker) Read() token {
ret := p.tokens[p.pos]
if ret.Type != tokenEOF {
p.pos++
}
return ret
}

94
vendor/github.com/hashicorp/hcl2/hcl/json/public.go generated vendored Normal file
View file

@ -0,0 +1,94 @@
package json
import (
"fmt"
"io/ioutil"
"os"
"github.com/hashicorp/hcl2/hcl"
)
// Parse attempts to parse the given buffer as JSON and, if successful, returns
// a hcl.File for the HCL configuration represented by it.
//
// This is not a generic JSON parser. Instead, it deals only with the profile
// of JSON used to express HCL configuration.
//
// The returned file is valid only if the returned diagnostics returns false
// from its HasErrors method. If HasErrors returns true, the file represents
// the subset of data that was able to be parsed, which may be none.
func Parse(src []byte, filename string) (*hcl.File, hcl.Diagnostics) {
rootNode, diags := parseFileContent(src, filename)
switch rootNode.(type) {
case *objectVal, *arrayVal:
// okay
default:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Root value must be object",
Detail: "The root value in a JSON-based configuration must be either a JSON object or a JSON array of objects.",
Subject: rootNode.StartRange().Ptr(),
})
// Since we've already produced an error message for this being
// invalid, we'll return an empty placeholder here so that trying to
// extract content from our root body won't produce a redundant
// error saying the same thing again in more general terms.
fakePos := hcl.Pos{
Byte: 0,
Line: 1,
Column: 1,
}
fakeRange := hcl.Range{
Filename: filename,
Start: fakePos,
End: fakePos,
}
rootNode = &objectVal{
Attrs: []*objectAttr{},
SrcRange: fakeRange,
OpenRange: fakeRange,
}
}
file := &hcl.File{
Body: &body{
val: rootNode,
},
Bytes: src,
Nav: navigation{rootNode},
}
return file, diags
}
// ParseFile is a convenience wrapper around Parse that first attempts to load
// data from the given filename, passing the result to Parse if successful.
//
// If the file cannot be read, an error diagnostic with nil context is returned.
func ParseFile(filename string) (*hcl.File, hcl.Diagnostics) {
f, err := os.Open(filename)
if err != nil {
return nil, hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: "Failed to open file",
Detail: fmt.Sprintf("The file %q could not be opened.", filename),
},
}
}
defer f.Close()
src, err := ioutil.ReadAll(f)
if err != nil {
return nil, hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: "Failed to read file",
Detail: fmt.Sprintf("The file %q was opened, but an error occured while reading it.", filename),
},
}
}
return Parse(src, filename)
}

293
vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go generated vendored Normal file
View file

@ -0,0 +1,293 @@
package json
import (
"fmt"
"github.com/apparentlymart/go-textseg/textseg"
"github.com/hashicorp/hcl2/hcl"
)
//go:generate stringer -type tokenType scanner.go
type tokenType rune
const (
tokenBraceO tokenType = '{'
tokenBraceC tokenType = '}'
tokenBrackO tokenType = '['
tokenBrackC tokenType = ']'
tokenComma tokenType = ','
tokenColon tokenType = ':'
tokenKeyword tokenType = 'K'
tokenString tokenType = 'S'
tokenNumber tokenType = 'N'
tokenEOF tokenType = '␄'
tokenInvalid tokenType = 0
tokenEquals tokenType = '=' // used only for reminding the user of JSON syntax
)
type token struct {
Type tokenType
Bytes []byte
Range hcl.Range
}
// scan returns the primary tokens for the given JSON buffer in sequence.
//
// The responsibility of this pass is to just mark the slices of the buffer
// as being of various types. It is lax in how it interprets the multi-byte
// token types keyword, string and number, preferring to capture erroneous
// extra bytes that we presume the user intended to be part of the token
// so that we can generate more helpful diagnostics in the parser.
func scan(buf []byte, start pos) []token {
var tokens []token
p := start
for {
if len(buf) == 0 {
tokens = append(tokens, token{
Type: tokenEOF,
Bytes: nil,
Range: posRange(p, p),
})
return tokens
}
buf, p = skipWhitespace(buf, p)
if len(buf) == 0 {
tokens = append(tokens, token{
Type: tokenEOF,
Bytes: nil,
Range: posRange(p, p),
})
return tokens
}
start = p
first := buf[0]
switch {
case first == '{' || first == '}' || first == '[' || first == ']' || first == ',' || first == ':' || first == '=':
p.Pos.Column++
p.Pos.Byte++
tokens = append(tokens, token{
Type: tokenType(first),
Bytes: buf[0:1],
Range: posRange(start, p),
})
buf = buf[1:]
case first == '"':
var tokBuf []byte
tokBuf, buf, p = scanString(buf, p)
tokens = append(tokens, token{
Type: tokenString,
Bytes: tokBuf,
Range: posRange(start, p),
})
case byteCanStartNumber(first):
var tokBuf []byte
tokBuf, buf, p = scanNumber(buf, p)
tokens = append(tokens, token{
Type: tokenNumber,
Bytes: tokBuf,
Range: posRange(start, p),
})
case byteCanStartKeyword(first):
var tokBuf []byte
tokBuf, buf, p = scanKeyword(buf, p)
tokens = append(tokens, token{
Type: tokenKeyword,
Bytes: tokBuf,
Range: posRange(start, p),
})
default:
tokens = append(tokens, token{
Type: tokenInvalid,
Bytes: buf[:1],
Range: start.Range(1, 1),
})
// If we've encountered an invalid then we might as well stop
// scanning since the parser won't proceed beyond this point.
return tokens
}
}
}
func byteCanStartNumber(b byte) bool {
switch b {
// We are slightly more tolerant than JSON requires here since we
// expect the parser will make a stricter interpretation of the
// number bytes, but we specifically don't allow 'e' or 'E' here
// since we want the scanner to treat that as the start of an
// invalid keyword instead, to produce more intelligible error messages.
case '-', '+', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
return true
default:
return false
}
}
func scanNumber(buf []byte, start pos) ([]byte, []byte, pos) {
// The scanner doesn't check that the sequence of digit-ish bytes is
// in a valid order. The parser must do this when decoding a number
// token.
var i int
p := start
Byte:
for i = 0; i < len(buf); i++ {
switch buf[i] {
case '-', '+', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
p.Pos.Byte++
p.Pos.Column++
default:
break Byte
}
}
return buf[:i], buf[i:], p
}
func byteCanStartKeyword(b byte) bool {
switch {
// We allow any sequence of alphabetical characters here, even though
// JSON is more constrained, so that we can collect what we presume
// the user intended to be a single keyword and then check its validity
// in the parser, where we can generate better diagnostics.
// So e.g. we want to be able to say:
// unrecognized keyword "True". Did you mean "true"?
case b >= 'a' || b <= 'z' || b >= 'A' || b <= 'Z':
return true
default:
return false
}
}
func scanKeyword(buf []byte, start pos) ([]byte, []byte, pos) {
var i int
p := start
Byte:
for i = 0; i < len(buf); i++ {
b := buf[i]
switch {
case (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_':
p.Pos.Byte++
p.Pos.Column++
default:
break Byte
}
}
return buf[:i], buf[i:], p
}
func scanString(buf []byte, start pos) ([]byte, []byte, pos) {
// The scanner doesn't validate correct use of escapes, etc. It pays
// attention to escapes only for the purpose of identifying the closing
// quote character. It's the parser's responsibility to do proper
// validation.
//
// The scanner also doesn't specifically detect unterminated string
// literals, though they can be identified in the parser by checking if
// the final byte in a string token is the double-quote character.
// Skip the opening quote symbol
i := 1
p := start
p.Pos.Byte++
p.Pos.Column++
escaping := false
Byte:
for i < len(buf) {
b := buf[i]
switch {
case b == '\\':
escaping = !escaping
p.Pos.Byte++
p.Pos.Column++
i++
case b == '"':
p.Pos.Byte++
p.Pos.Column++
i++
if !escaping {
break Byte
}
escaping = false
case b < 32:
break Byte
default:
// Advance by one grapheme cluster, so that we consider each
// grapheme to be a "column".
// Ignoring error because this scanner cannot produce errors.
advance, _, _ := textseg.ScanGraphemeClusters(buf[i:], true)
p.Pos.Byte += advance
p.Pos.Column++
i += advance
escaping = false
}
}
return buf[:i], buf[i:], p
}
func skipWhitespace(buf []byte, start pos) ([]byte, pos) {
var i int
p := start
Byte:
for i = 0; i < len(buf); i++ {
switch buf[i] {
case ' ':
p.Pos.Byte++
p.Pos.Column++
case '\n':
p.Pos.Byte++
p.Pos.Column = 1
p.Pos.Line++
case '\r':
// For the purpose of line/column counting we consider a
// carriage return to take up no space, assuming that it will
// be paired up with a newline (on Windows, for example) that
// will account for both of them.
p.Pos.Byte++
case '\t':
// We arbitrarily count a tab as if it were two spaces, because
// we need to choose _some_ number here. This means any system
// that renders code on-screen with markers must itself treat
// tabs as a pair of spaces for rendering purposes, or instead
// use the byte offset and back into its own column position.
p.Pos.Byte++
p.Pos.Column += 2
default:
break Byte
}
}
return buf[i:], p
}
type pos struct {
Filename string
Pos hcl.Pos
}
func (p *pos) Range(byteLen, charLen int) hcl.Range {
start := p.Pos
end := p.Pos
end.Byte += byteLen
end.Column += charLen
return hcl.Range{
Filename: p.Filename,
Start: start,
End: end,
}
}
func posRange(start, end pos) hcl.Range {
return hcl.Range{
Filename: start.Filename,
Start: start.Pos,
End: end.Pos,
}
}
func (t token) GoString() string {
return fmt.Sprintf("json.token{json.%s, []byte(%q), %#v}", t.Type, t.Bytes, t.Range)
}

405
vendor/github.com/hashicorp/hcl2/hcl/json/spec.md generated vendored Normal file
View file

@ -0,0 +1,405 @@
# HCL JSON Syntax Specification
This is the specification for the JSON serialization for hcl. HCL is a system
for defining configuration languages for applications. The HCL information
model is designed to support multiple concrete syntaxes for configuration,
and this JSON-based format complements [the native syntax](../hclsyntax/spec.md)
by being easy to machine-generate, whereas the native syntax is oriented
towards human authoring and maintenance
This syntax is defined in terms of JSON as defined in
[RFC7159](https://tools.ietf.org/html/rfc7159). As such it inherits the JSON
grammar as-is, and merely defines a specific methodology for interpreting
JSON constructs into HCL structural elements and expressions.
This mapping is defined such that valid JSON-serialized HCL input can be
_produced_ using standard JSON implementations in various programming languages.
_Parsing_ such JSON has some additional constraints not beyond what is normally
supported by JSON parsers, so a specialized parser may be required that
is able to:
* Preserve the relative ordering of properties defined in an object.
* Preserve multiple definitions of the same property name.
* Preserve numeric values to the precision required by the number type
in [the HCL syntax-agnostic information model](../spec.md).
* Retain source location information for parsed tokens/constructs in order
to produce good error messages.
## Structural Elements
[The HCL syntax-agnostic information model](../spec.md) defines a _body_ as an
abstract container for attribute definitions and child blocks. A body is
represented in JSON as either a single JSON object or a JSON array of objects.
Body processing is in terms of JSON object properties, visited in the order
they appear in the input. Where a body is represented by a single JSON object,
the properties of that object are visited in order. Where a body is
represented by a JSON array, each of its elements are visited in order and
each element has its properties visited in order. If any element of the array
is not a JSON object then the input is erroneous.
When a body is being processed in the _dynamic attributes_ mode, the allowance
of a JSON array in the previous paragraph does not apply and instead a single
JSON object is always required.
As defined in the language-agnostic model, body processing is in terms
of a schema which provides context for interpreting the body's content. For
JSON bodies, the schema is crucial to allow differentiation of attribute
definitions and block definitions, both of which are represented via object
properties.
The special property name `"//"`, when used in an object representing a HCL
body, is parsed and ignored. A property with this name can be used to
include human-readable comments. (This special property name is _not_
processed in this way for any _other_ HCL constructs that are represented as
JSON objects.)
### Attributes
Where the given schema describes an attribute with a given name, the object
property with the matching name — if present — serves as the attribute's
definition.
When a body is being processed in the _dynamic attributes_ mode, each object
property serves as an attribute definition for the attribute whose name
matches the property name.
The value of an attribute definition property is interpreted as an _expression_,
as described in a later section.
Given a schema that calls for an attribute named "foo", a JSON object like
the following provides a definition for that attribute:
```json
{
"foo": "bar baz"
}
```
### Blocks
Where the given schema describes a block with a given type name, each object
property with the matching name serves as a definition of zero or more blocks
of that type.
Processing of child blocks is in terms of nested JSON objects and arrays.
If the schema defines one or more _labels_ for the block type, a nested JSON
object or JSON array of objects is required for each labelling level. These
are flattened to a single ordered sequence of object properties using the
same algorithm as for body content as defined above. Each object property
serves as a label value at the corresponding level.
After any labelling levels, the next nested value is either a JSON object
representing a single block body, or a JSON array of JSON objects that each
represent a single block body. Use of an array accommodates the definition
of multiple blocks that have identical type and labels.
Given a schema that calls for a block type named "foo" with no labels, the
following JSON objects are all valid definitions of zero or more blocks of this
type:
```json
{
"foo": {
"child_attr": "baz"
}
}
```
```json
{
"foo": [
{
"child_attr": "baz"
},
{
"child_attr": "boz"
}
]
}
```
```json
{
"foo": []
}
```
The first of these defines a single child block of type "foo". The second
defines _two_ such blocks. The final example shows a degenerate definition
of zero blocks, though generators should prefer to omit the property entirely
in this scenario.
Given a schema that calls for a block type named "foo" with _two_ labels, the
extra label levels must be represented as objects or arrays of objects as in
the following examples:
```json
{
"foo": {
"bar": {
"baz": {
"child_attr": "baz"
},
"boz": {
"child_attr": "baz"
}
},
"boz": {
"baz": {
"child_attr": "baz"
},
}
}
}
```
```json
{
"foo": {
"bar": {
"baz": {
"child_attr": "baz"
},
"boz": {
"child_attr": "baz"
}
},
"boz": {
"baz": [
{
"child_attr": "baz"
},
{
"child_attr": "boz"
}
]
}
}
}
```
```json
{
"foo": [
{
"bar": {
"baz": {
"child_attr": "baz"
},
"boz": {
"child_attr": "baz"
}
},
},
{
"bar": {
"baz": [
{
"child_attr": "baz"
},
{
"child_attr": "boz"
}
]
}
}
]
}
```
```json
{
"foo": {
"bar": {
"baz": {
"child_attr": "baz"
},
"boz": {
"child_attr": "baz"
}
},
"bar": {
"baz": [
{
"child_attr": "baz"
},
{
"child_attr": "boz"
}
]
}
}
}
```
Arrays can be introduced at either the label definition or block body
definition levels to define multiple definitions of the same block type
or labels while preserving order.
A JSON HCL parser _must_ support duplicate definitions of the same property
name within a single object, preserving all of them and the relative ordering
between them. The array-based forms are also required so that JSON HCL
configurations can be produced with JSON producing libraries that are not
able to preserve property definition order and multiple definitions of
the same property.
## Expressions
JSON lacks a native expression syntax, so the HCL JSON syntax instead defines
a mapping for each of the JSON value types, including a special mapping for
strings that allows optional use of arbitrary expressions.
### Objects
When interpreted as an expression, a JSON object represents a value of a HCL
object type.
Each property of the JSON object represents an attribute of the HCL object type.
The property name string given in the JSON input is interpreted as a string
expression as described below, and its result is converted to string as defined
by the syntax-agnostic information model. If such a conversion is not possible,
an error is produced and evaluation fails.
An instance of the constructed object type is then created, whose values
are interpreted by again recursively applying the mapping rules defined in
this section to each of the property values.
If any evaluated property name strings produce null values, an error is
produced and evaluation fails. If any produce _unknown_ values, the _entire
object's_ result is an unknown value of the dynamic pseudo-type, signalling
that the type of the object cannot be determined.
It is an error to define the same property name multiple times within a single
JSON object interpreted as an expression. In full expression mode, this
constraint applies to the name expression results after conversion to string,
rather than the raw string that may contain interpolation expressions.
### Arrays
When interpreted as an expression, a JSON array represents a value of a HCL
tuple type.
Each element of the JSON array represents an element of the HCL tuple type.
The tuple type is constructed by enumerating the JSON array elements, creating
for each an element whose type is the result of recursively applying the
expression mapping rules. Correspondence is preserved between the array element
indices and the tuple element indices.
An instance of the constructed tuple type is then created, whose values are
interpreted by again recursively applying the mapping rules defined in this
section.
### Numbers
When interpreted as an expression, a JSON number represents a HCL number value.
HCL numbers are arbitrary-precision decimal values, so a JSON HCL parser must
be able to translate exactly the value given to a number of corresponding
precision, within the constraints set by the HCL syntax-agnostic information
model.
In practice, off-the-shelf JSON serializers often do not support customizing the
processing of numbers, and instead force processing as 32-bit or 64-bit
floating point values.
A _producer_ of JSON HCL that uses such a serializer can provide numeric values
as JSON strings where they have precision too great for representation in the
serializer's chosen numeric type in situations where the result will be
converted to number (using the standard conversion rules) by a calling
application.
Alternatively, for expressions that are evaluated in full expression mode an
embedded template interpolation can be used to faithfully represent a number,
such as `"${1e150}"`, which will then be evaluated by the underlying HCL native
syntax expression evaluator.
### Boolean Values
The JSON boolean values `true` and `false`, when interpreted as expressions,
represent the corresponding HCL boolean values.
### The Null Value
The JSON value `null`, when interpreted as an expression, represents a
HCL null value of the dynamic pseudo-type.
### Strings
When interpreted as an expression, a JSON string may be interpreted in one of
two ways depending on the evaluation mode.
If evaluating in literal-only mode (as defined by the syntax-agnostic
information model) the literal string is intepreted directly as a HCL string
value, by directly using the exact sequence of unicode characters represented.
Template interpolations and directives MUST NOT be processed in this mode,
allowing any characters that appear as introduction sequences to pass through
literally:
```json
"Hello world! Template sequences like ${ are not intepreted here."
```
When evaluating in full expression mode (again, as defined by the syntax-
agnostic information model) the literal string is instead interpreted as a
_standalone template_ in the HCL Native Syntax. The expression evaluation
result is then the direct result of evaluating that template with the current
variable scope and function table.
```json
"Hello, ${name}! Template sequences are interpreted in full expression mode."
```
In particular the _Template Interpolation Unwrapping_ requirement from the
HCL native syntax specification must be implemented, allowing the use of
single-interpolation templates to represent expressions that would not
otherwise be representable in JSON, such as the following example where
the result must be a number, rather than a string representation of a number:
```json
"${ a + b }"
```
## Static Analysis
The HCL static analysis operations are implemented for JSON values that
represent expressions, as described in the following sections.
Due to the limited expressive power of the JSON syntax alone, use of these
static analyses functions rather than normal expression evaluation is used
as additional context for how a JSON value is to be interpreted, which means
that static analyses can result in a different interpretation of a given
expression than normal evaluation.
### Static List
An expression interpreted as a static list must be a JSON array. Each of the
values in the array is interpreted as an expression and returned.
### Static Map
An expression interpreted as a static map must be a JSON object. Each of the
key/value pairs in the object is presented as a pair of expressions. Since
object property names are always strings, evaluating the key expression with
a non-`nil` evaluation context will evaluate any template sequences given
in the property name.
### Static Call
An expression interpreted as a static call must be a string. The content of
the string is interpreted as a native syntax expression (not a _template_,
unlike normal evaluation) and then the static call analysis is delegated to
that expression.
If the original expression is not a string or its contents cannot be parsed
as a native syntax expression then static call analysis is not supported.
### Static Traversal
An expression interpreted as a static traversal must be a string. The content
of the string is interpreted as a native syntax expression (not a _template_,
unlike normal evaluation) and then static traversal analysis is delegated
to that expression.
If the original expression is not a string or its contents cannot be parsed
as a native syntax expression then static call analysis is not supported.

623
vendor/github.com/hashicorp/hcl2/hcl/json/structure.go generated vendored Normal file
View file

@ -0,0 +1,623 @@
package json
import (
"fmt"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/convert"
)
// body is the implementation of "Body" used for files processed with the JSON
// parser.
type body struct {
val node
// If non-nil, the keys of this map cause the corresponding attributes to
// be treated as non-existing. This is used when Body.PartialContent is
// called, to produce the "remaining content" Body.
hiddenAttrs map[string]struct{}
}
// expression is the implementation of "Expression" used for files processed
// with the JSON parser.
type expression struct {
src node
}
func (b *body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
content, newBody, diags := b.PartialContent(schema)
hiddenAttrs := newBody.(*body).hiddenAttrs
var nameSuggestions []string
for _, attrS := range schema.Attributes {
if _, ok := hiddenAttrs[attrS.Name]; !ok {
// Only suggest an attribute name if we didn't use it already.
nameSuggestions = append(nameSuggestions, attrS.Name)
}
}
for _, blockS := range schema.Blocks {
// Blocks can appear multiple times, so we'll suggest their type
// names regardless of whether they've already been used.
nameSuggestions = append(nameSuggestions, blockS.Type)
}
jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil)
diags = append(diags, attrDiags...)
for _, attr := range jsonAttrs {
k := attr.Name
if k == "//" {
// Ignore "//" keys in objects representing bodies, to allow
// their use as comments.
continue
}
if _, ok := hiddenAttrs[k]; !ok {
suggestion := nameSuggestion(k, nameSuggestions)
if suggestion != "" {
suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
}
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Extraneous JSON object property",
Detail: fmt.Sprintf("No argument or block type is named %q.%s", k, suggestion),
Subject: &attr.NameRange,
Context: attr.Range().Ptr(),
})
}
}
return content, diags
}
func (b *body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
var diags hcl.Diagnostics
jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil)
diags = append(diags, attrDiags...)
usedNames := map[string]struct{}{}
if b.hiddenAttrs != nil {
for k := range b.hiddenAttrs {
usedNames[k] = struct{}{}
}
}
content := &hcl.BodyContent{
Attributes: map[string]*hcl.Attribute{},
Blocks: nil,
MissingItemRange: b.MissingItemRange(),
}
// Create some more convenient data structures for our work below.
attrSchemas := map[string]hcl.AttributeSchema{}
blockSchemas := map[string]hcl.BlockHeaderSchema{}
for _, attrS := range schema.Attributes {
attrSchemas[attrS.Name] = attrS
}
for _, blockS := range schema.Blocks {
blockSchemas[blockS.Type] = blockS
}
for _, jsonAttr := range jsonAttrs {
attrName := jsonAttr.Name
if _, used := b.hiddenAttrs[attrName]; used {
continue
}
if attrS, defined := attrSchemas[attrName]; defined {
if existing, exists := content.Attributes[attrName]; exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate argument",
Detail: fmt.Sprintf("The argument %q was already set at %s.", attrName, existing.Range),
Subject: &jsonAttr.NameRange,
Context: jsonAttr.Range().Ptr(),
})
continue
}
content.Attributes[attrS.Name] = &hcl.Attribute{
Name: attrS.Name,
Expr: &expression{src: jsonAttr.Value},
Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()),
NameRange: jsonAttr.NameRange,
}
usedNames[attrName] = struct{}{}
} else if blockS, defined := blockSchemas[attrName]; defined {
bv := jsonAttr.Value
blockDiags := b.unpackBlock(bv, blockS.Type, &jsonAttr.NameRange, blockS.LabelNames, nil, nil, &content.Blocks)
diags = append(diags, blockDiags...)
usedNames[attrName] = struct{}{}
}
// We ignore anything that isn't defined because that's the
// PartialContent contract. The Content method will catch leftovers.
}
// Make sure we got all the required attributes.
for _, attrS := range schema.Attributes {
if !attrS.Required {
continue
}
if _, defined := content.Attributes[attrS.Name]; !defined {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing required argument",
Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", attrS.Name),
Subject: b.MissingItemRange().Ptr(),
})
}
}
unusedBody := &body{
val: b.val,
hiddenAttrs: usedNames,
}
return content, unusedBody, diags
}
// JustAttributes for JSON bodies interprets all properties of the wrapped
// JSON object as attributes and returns them.
func (b *body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
var diags hcl.Diagnostics
attrs := make(map[string]*hcl.Attribute)
obj, ok := b.val.(*objectVal)
if !ok {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Incorrect JSON value type",
Detail: "A JSON object is required here, setting the arguments for this block.",
Subject: b.val.StartRange().Ptr(),
})
return attrs, diags
}
for _, jsonAttr := range obj.Attrs {
name := jsonAttr.Name
if name == "//" {
// Ignore "//" keys in objects representing bodies, to allow
// their use as comments.
continue
}
if _, hidden := b.hiddenAttrs[name]; hidden {
continue
}
if existing, exists := attrs[name]; exists {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate attribute definition",
Detail: fmt.Sprintf("The argument %q was already set at %s.", name, existing.Range),
Subject: &jsonAttr.NameRange,
})
continue
}
attrs[name] = &hcl.Attribute{
Name: name,
Expr: &expression{src: jsonAttr.Value},
Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()),
NameRange: jsonAttr.NameRange,
}
}
// No diagnostics possible here, since the parser already took care of
// finding duplicates and every JSON value can be a valid attribute value.
return attrs, diags
}
func (b *body) MissingItemRange() hcl.Range {
switch tv := b.val.(type) {
case *objectVal:
return tv.CloseRange
case *arrayVal:
return tv.OpenRange
default:
// Should not happen in correct operation, but might show up if the
// input is invalid and we are producing partial results.
return tv.StartRange()
}
}
func (b *body) unpackBlock(v node, typeName string, typeRange *hcl.Range, labelsLeft []string, labelsUsed []string, labelRanges []hcl.Range, blocks *hcl.Blocks) (diags hcl.Diagnostics) {
if len(labelsLeft) > 0 {
labelName := labelsLeft[0]
jsonAttrs, attrDiags := b.collectDeepAttrs(v, &labelName)
diags = append(diags, attrDiags...)
if len(jsonAttrs) == 0 {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing block label",
Detail: fmt.Sprintf("At least one object property is required, whose name represents the %s block's %s.", typeName, labelName),
Subject: v.StartRange().Ptr(),
})
return
}
labelsUsed := append(labelsUsed, "")
labelRanges := append(labelRanges, hcl.Range{})
for _, p := range jsonAttrs {
pk := p.Name
labelsUsed[len(labelsUsed)-1] = pk
labelRanges[len(labelRanges)-1] = p.NameRange
diags = append(diags, b.unpackBlock(p.Value, typeName, typeRange, labelsLeft[1:], labelsUsed, labelRanges, blocks)...)
}
return
}
// By the time we get here, we've peeled off all the labels and we're ready
// to deal with the block's actual content.
// need to copy the label slices because their underlying arrays will
// continue to be mutated after we return.
labels := make([]string, len(labelsUsed))
copy(labels, labelsUsed)
labelR := make([]hcl.Range, len(labelRanges))
copy(labelR, labelRanges)
switch tv := v.(type) {
case *objectVal:
// Single instance of the block
*blocks = append(*blocks, &hcl.Block{
Type: typeName,
Labels: labels,
Body: &body{
val: tv,
},
DefRange: tv.OpenRange,
TypeRange: *typeRange,
LabelRanges: labelR,
})
case *arrayVal:
// Multiple instances of the block
for _, av := range tv.Values {
*blocks = append(*blocks, &hcl.Block{
Type: typeName,
Labels: labels,
Body: &body{
val: av, // might be mistyped; we'll find out when content is requested for this body
},
DefRange: tv.OpenRange,
TypeRange: *typeRange,
LabelRanges: labelR,
})
}
default:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Incorrect JSON value type",
Detail: fmt.Sprintf("Either a JSON object or a JSON array is required, representing the contents of one or more %q blocks.", typeName),
Subject: v.StartRange().Ptr(),
})
}
return
}
// collectDeepAttrs takes either a single object or an array of objects and
// flattens it into a list of object attributes, collecting attributes from
// all of the objects in a given array.
//
// Ordering is preserved, so a list of objects that each have one property
// will result in those properties being returned in the same order as the
// objects appeared in the array.
//
// This is appropriate for use only for objects representing bodies or labels
// within a block.
//
// The labelName argument, if non-null, is used to tailor returned error
// messages to refer to block labels rather than attributes and child blocks.
// It has no other effect.
func (b *body) collectDeepAttrs(v node, labelName *string) ([]*objectAttr, hcl.Diagnostics) {
var diags hcl.Diagnostics
var attrs []*objectAttr
switch tv := v.(type) {
case *objectVal:
attrs = append(attrs, tv.Attrs...)
case *arrayVal:
for _, ev := range tv.Values {
switch tev := ev.(type) {
case *objectVal:
attrs = append(attrs, tev.Attrs...)
default:
if labelName != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Incorrect JSON value type",
Detail: fmt.Sprintf("A JSON object is required here, to specify %s labels for this block.", *labelName),
Subject: ev.StartRange().Ptr(),
})
} else {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Incorrect JSON value type",
Detail: "A JSON object is required here, to define arguments and child blocks.",
Subject: ev.StartRange().Ptr(),
})
}
}
}
default:
if labelName != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Incorrect JSON value type",
Detail: fmt.Sprintf("Either a JSON object or JSON array of objects is required here, to specify %s labels for this block.", *labelName),
Subject: v.StartRange().Ptr(),
})
} else {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Incorrect JSON value type",
Detail: "Either a JSON object or JSON array of objects is required here, to define arguments and child blocks.",
Subject: v.StartRange().Ptr(),
})
}
}
return attrs, diags
}
func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
switch v := e.src.(type) {
case *stringVal:
if ctx != nil {
// Parse string contents as a HCL native language expression.
// We only do this if we have a context, so passing a nil context
// is how the caller specifies that interpolations are not allowed
// and that the string should just be returned verbatim.
templateSrc := v.Value
expr, diags := hclsyntax.ParseTemplate(
[]byte(templateSrc),
v.SrcRange.Filename,
// This won't produce _exactly_ the right result, since
// the hclsyntax parser can't "see" any escapes we removed
// while parsing JSON, but it's better than nothing.
hcl.Pos{
Line: v.SrcRange.Start.Line,
// skip over the opening quote mark
Byte: v.SrcRange.Start.Byte + 1,
Column: v.SrcRange.Start.Column + 1,
},
)
if diags.HasErrors() {
return cty.DynamicVal, diags
}
val, evalDiags := expr.Value(ctx)
diags = append(diags, evalDiags...)
return val, diags
}
return cty.StringVal(v.Value), nil
case *numberVal:
return cty.NumberVal(v.Value), nil
case *booleanVal:
return cty.BoolVal(v.Value), nil
case *arrayVal:
vals := []cty.Value{}
for _, jsonVal := range v.Values {
val, _ := (&expression{src: jsonVal}).Value(ctx)
vals = append(vals, val)
}
return cty.TupleVal(vals), nil
case *objectVal:
var diags hcl.Diagnostics
attrs := map[string]cty.Value{}
attrRanges := map[string]hcl.Range{}
known := true
for _, jsonAttr := range v.Attrs {
// In this one context we allow keys to contain interpolation
// experessions too, assuming we're evaluating in interpolation
// mode. This achieves parity with the native syntax where
// object expressions can have dynamic keys, while block contents
// may not.
name, nameDiags := (&expression{src: &stringVal{
Value: jsonAttr.Name,
SrcRange: jsonAttr.NameRange,
}}).Value(ctx)
valExpr := &expression{src: jsonAttr.Value}
val, valDiags := valExpr.Value(ctx)
diags = append(diags, nameDiags...)
diags = append(diags, valDiags...)
var err error
name, err = convert.Convert(name, cty.String)
if err != nil {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid object key expression",
Detail: fmt.Sprintf("Cannot use this expression as an object key: %s.", err),
Subject: &jsonAttr.NameRange,
Expression: valExpr,
EvalContext: ctx,
})
continue
}
if name.IsNull() {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid object key expression",
Detail: "Cannot use null value as an object key.",
Subject: &jsonAttr.NameRange,
Expression: valExpr,
EvalContext: ctx,
})
continue
}
if !name.IsKnown() {
// This is a bit of a weird case, since our usual rules require
// us to tolerate unknowns and just represent the result as
// best we can but if we don't know the key then we can't
// know the type of our object at all, and thus we must turn
// the whole thing into cty.DynamicVal. This is consistent with
// how this situation is handled in the native syntax.
// We'll keep iterating so we can collect other errors in
// subsequent attributes.
known = false
continue
}
nameStr := name.AsString()
if _, defined := attrs[nameStr]; defined {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Duplicate object attribute",
Detail: fmt.Sprintf("An attribute named %q was already defined at %s.", nameStr, attrRanges[nameStr]),
Subject: &jsonAttr.NameRange,
Expression: e,
EvalContext: ctx,
})
continue
}
attrs[nameStr] = val
attrRanges[nameStr] = jsonAttr.NameRange
}
if !known {
// We encountered an unknown key somewhere along the way, so
// we can't know what our type will eventually be.
return cty.DynamicVal, diags
}
return cty.ObjectVal(attrs), diags
default:
// Default to DynamicVal so that ASTs containing invalid nodes can
// still be partially-evaluated.
return cty.DynamicVal, nil
}
}
func (e *expression) Variables() []hcl.Traversal {
var vars []hcl.Traversal
switch v := e.src.(type) {
case *stringVal:
templateSrc := v.Value
expr, diags := hclsyntax.ParseTemplate(
[]byte(templateSrc),
v.SrcRange.Filename,
// This won't produce _exactly_ the right result, since
// the hclsyntax parser can't "see" any escapes we removed
// while parsing JSON, but it's better than nothing.
hcl.Pos{
Line: v.SrcRange.Start.Line,
// skip over the opening quote mark
Byte: v.SrcRange.Start.Byte + 1,
Column: v.SrcRange.Start.Column + 1,
},
)
if diags.HasErrors() {
return vars
}
return expr.Variables()
case *arrayVal:
for _, jsonVal := range v.Values {
vars = append(vars, (&expression{src: jsonVal}).Variables()...)
}
case *objectVal:
for _, jsonAttr := range v.Attrs {
vars = append(vars, (&expression{src: jsonAttr.Value}).Variables()...)
}
}
return vars
}
func (e *expression) Range() hcl.Range {
return e.src.Range()
}
func (e *expression) StartRange() hcl.Range {
return e.src.StartRange()
}
// Implementation for hcl.AbsTraversalForExpr.
func (e *expression) AsTraversal() hcl.Traversal {
// In JSON-based syntax a traversal is given as a string containing
// traversal syntax as defined by hclsyntax.ParseTraversalAbs.
switch v := e.src.(type) {
case *stringVal:
traversal, diags := hclsyntax.ParseTraversalAbs([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start)
if diags.HasErrors() {
return nil
}
return traversal
default:
return nil
}
}
// Implementation for hcl.ExprCall.
func (e *expression) ExprCall() *hcl.StaticCall {
// In JSON-based syntax a static call is given as a string containing
// an expression in the native syntax that also supports ExprCall.
switch v := e.src.(type) {
case *stringVal:
expr, diags := hclsyntax.ParseExpression([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start)
if diags.HasErrors() {
return nil
}
call, diags := hcl.ExprCall(expr)
if diags.HasErrors() {
return nil
}
return call
default:
return nil
}
}
// Implementation for hcl.ExprList.
func (e *expression) ExprList() []hcl.Expression {
switch v := e.src.(type) {
case *arrayVal:
ret := make([]hcl.Expression, len(v.Values))
for i, node := range v.Values {
ret[i] = &expression{src: node}
}
return ret
default:
return nil
}
}
// Implementation for hcl.ExprMap.
func (e *expression) ExprMap() []hcl.KeyValuePair {
switch v := e.src.(type) {
case *objectVal:
ret := make([]hcl.KeyValuePair, len(v.Attrs))
for i, jsonAttr := range v.Attrs {
ret[i] = hcl.KeyValuePair{
Key: &expression{src: &stringVal{
Value: jsonAttr.Name,
SrcRange: jsonAttr.NameRange,
}},
Value: &expression{src: jsonAttr.Value},
}
}
return ret
default:
return nil
}
}

View file

@ -0,0 +1,29 @@
// Code generated by "stringer -type tokenType scanner.go"; DO NOT EDIT.
package json
import "strconv"
const _tokenType_name = "tokenInvalidtokenCommatokenColontokenEqualstokenKeywordtokenNumbertokenStringtokenBrackOtokenBrackCtokenBraceOtokenBraceCtokenEOF"
var _tokenType_map = map[tokenType]string{
0: _tokenType_name[0:12],
44: _tokenType_name[12:22],
58: _tokenType_name[22:32],
61: _tokenType_name[32:43],
75: _tokenType_name[43:55],
78: _tokenType_name[55:66],
83: _tokenType_name[66:77],
91: _tokenType_name[77:88],
93: _tokenType_name[88:99],
123: _tokenType_name[99:110],
125: _tokenType_name[110:121],
9220: _tokenType_name[121:129],
}
func (i tokenType) String() string {
if str, ok := _tokenType_map[i]; ok {
return str
}
return "tokenType(" + strconv.FormatInt(int64(i), 10) + ")"
}

226
vendor/github.com/hashicorp/hcl2/hcl/merged.go generated vendored Normal file
View file

@ -0,0 +1,226 @@
package hcl
import (
"fmt"
)
// MergeFiles combines the given files to produce a single body that contains
// configuration from all of the given files.
//
// The ordering of the given files decides the order in which contained
// elements will be returned. If any top-level attributes are defined with
// the same name across multiple files, a diagnostic will be produced from
// the Content and PartialContent methods describing this error in a
// user-friendly way.
func MergeFiles(files []*File) Body {
var bodies []Body
for _, file := range files {
bodies = append(bodies, file.Body)
}
return MergeBodies(bodies)
}
// MergeBodies is like MergeFiles except it deals directly with bodies, rather
// than with entire files.
func MergeBodies(bodies []Body) Body {
if len(bodies) == 0 {
// Swap out for our singleton empty body, to reduce the number of
// empty slices we have hanging around.
return emptyBody
}
// If any of the given bodies are already merged bodies, we'll unpack
// to flatten to a single mergedBodies, since that's conceptually simpler.
// This also, as a side-effect, eliminates any empty bodies, since
// empties are merged bodies with no inner bodies.
var newLen int
var flatten bool
for _, body := range bodies {
if children, merged := body.(mergedBodies); merged {
newLen += len(children)
flatten = true
} else {
newLen++
}
}
if !flatten { // not just newLen == len, because we might have mergedBodies with single bodies inside
return mergedBodies(bodies)
}
if newLen == 0 {
// Don't allocate a new empty when we already have one
return emptyBody
}
new := make([]Body, 0, newLen)
for _, body := range bodies {
if children, merged := body.(mergedBodies); merged {
new = append(new, children...)
} else {
new = append(new, body)
}
}
return mergedBodies(new)
}
var emptyBody = mergedBodies([]Body{})
// EmptyBody returns a body with no content. This body can be used as a
// placeholder when a body is required but no body content is available.
func EmptyBody() Body {
return emptyBody
}
type mergedBodies []Body
// Content returns the content produced by applying the given schema to all
// of the merged bodies and merging the result.
//
// Although required attributes _are_ supported, they should be used sparingly
// with merged bodies since in this case there is no contextual information
// with which to return good diagnostics. Applications working with merged
// bodies may wish to mark all attributes as optional and then check for
// required attributes afterwards, to produce better diagnostics.
func (mb mergedBodies) Content(schema *BodySchema) (*BodyContent, Diagnostics) {
// the returned body will always be empty in this case, because mergedContent
// will only ever call Content on the child bodies.
content, _, diags := mb.mergedContent(schema, false)
return content, diags
}
func (mb mergedBodies) PartialContent(schema *BodySchema) (*BodyContent, Body, Diagnostics) {
return mb.mergedContent(schema, true)
}
func (mb mergedBodies) JustAttributes() (Attributes, Diagnostics) {
attrs := make(map[string]*Attribute)
var diags Diagnostics
for _, body := range mb {
thisAttrs, thisDiags := body.JustAttributes()
if len(thisDiags) != 0 {
diags = append(diags, thisDiags...)
}
if thisAttrs != nil {
for name, attr := range thisAttrs {
if existing := attrs[name]; existing != nil {
diags = diags.Append(&Diagnostic{
Severity: DiagError,
Summary: "Duplicate argument",
Detail: fmt.Sprintf(
"Argument %q was already set at %s",
name, existing.NameRange.String(),
),
Subject: &attr.NameRange,
})
continue
}
attrs[name] = attr
}
}
}
return attrs, diags
}
func (mb mergedBodies) MissingItemRange() Range {
if len(mb) == 0 {
// Nothing useful to return here, so we'll return some garbage.
return Range{
Filename: "<empty>",
}
}
// arbitrarily use the first body's missing item range
return mb[0].MissingItemRange()
}
func (mb mergedBodies) mergedContent(schema *BodySchema, partial bool) (*BodyContent, Body, Diagnostics) {
// We need to produce a new schema with none of the attributes marked as
// required, since _any one_ of our bodies can contribute an attribute value.
// We'll separately check that all required attributes are present at
// the end.
mergedSchema := &BodySchema{
Blocks: schema.Blocks,
}
for _, attrS := range schema.Attributes {
mergedAttrS := attrS
mergedAttrS.Required = false
mergedSchema.Attributes = append(mergedSchema.Attributes, mergedAttrS)
}
var mergedLeftovers []Body
content := &BodyContent{
Attributes: map[string]*Attribute{},
}
var diags Diagnostics
for _, body := range mb {
var thisContent *BodyContent
var thisLeftovers Body
var thisDiags Diagnostics
if partial {
thisContent, thisLeftovers, thisDiags = body.PartialContent(mergedSchema)
} else {
thisContent, thisDiags = body.Content(mergedSchema)
}
if thisLeftovers != nil {
mergedLeftovers = append(mergedLeftovers)
}
if len(thisDiags) != 0 {
diags = append(diags, thisDiags...)
}
if thisContent.Attributes != nil {
for name, attr := range thisContent.Attributes {
if existing := content.Attributes[name]; existing != nil {
diags = diags.Append(&Diagnostic{
Severity: DiagError,
Summary: "Duplicate argument",
Detail: fmt.Sprintf(
"Argument %q was already set at %s",
name, existing.NameRange.String(),
),
Subject: &attr.NameRange,
})
continue
}
content.Attributes[name] = attr
}
}
if len(thisContent.Blocks) != 0 {
content.Blocks = append(content.Blocks, thisContent.Blocks...)
}
}
// Finally, we check for required attributes.
for _, attrS := range schema.Attributes {
if !attrS.Required {
continue
}
if content.Attributes[attrS.Name] == nil {
// We don't have any context here to produce a good diagnostic,
// which is why we warn in the Content docstring to minimize the
// use of required attributes on merged bodies.
diags = diags.Append(&Diagnostic{
Severity: DiagError,
Summary: "Missing required argument",
Detail: fmt.Sprintf(
"The argument %q is required, but was not set.",
attrS.Name,
),
})
}
}
leftoverBody := MergeBodies(mergedLeftovers)
return content, leftoverBody, diags
}

147
vendor/github.com/hashicorp/hcl2/hcl/ops.go generated vendored Normal file
View file

@ -0,0 +1,147 @@
package hcl
import (
"fmt"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/convert"
)
// Index is a helper function that performs the same operation as the index
// operator in the HCL expression language. That is, the result is the
// same as it would be for collection[key] in a configuration expression.
//
// This is exported so that applications can perform indexing in a manner
// consistent with how the language does it, including handling of null and
// unknown values, etc.
//
// Diagnostics are produced if the given combination of values is not valid.
// Therefore a pointer to a source range must be provided to use in diagnostics,
// though nil can be provided if the calling application is going to
// ignore the subject of the returned diagnostics anyway.
func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics) {
if collection.IsNull() {
return cty.DynamicVal, Diagnostics{
{
Severity: DiagError,
Summary: "Attempt to index null value",
Detail: "This value is null, so it does not have any indices.",
Subject: srcRange,
},
}
}
if key.IsNull() {
return cty.DynamicVal, Diagnostics{
{
Severity: DiagError,
Summary: "Invalid index",
Detail: "Can't use a null value as an indexing key.",
Subject: srcRange,
},
}
}
ty := collection.Type()
kty := key.Type()
if kty == cty.DynamicPseudoType || ty == cty.DynamicPseudoType {
return cty.DynamicVal, nil
}
switch {
case ty.IsListType() || ty.IsTupleType() || ty.IsMapType():
var wantType cty.Type
switch {
case ty.IsListType() || ty.IsTupleType():
wantType = cty.Number
case ty.IsMapType():
wantType = cty.String
default:
// should never happen
panic("don't know what key type we want")
}
key, keyErr := convert.Convert(key, wantType)
if keyErr != nil {
return cty.DynamicVal, Diagnostics{
{
Severity: DiagError,
Summary: "Invalid index",
Detail: fmt.Sprintf(
"The given key does not identify an element in this collection value: %s.",
keyErr.Error(),
),
Subject: srcRange,
},
}
}
has := collection.HasIndex(key)
if !has.IsKnown() {
if ty.IsTupleType() {
return cty.DynamicVal, nil
} else {
return cty.UnknownVal(ty.ElementType()), nil
}
}
if has.False() {
return cty.DynamicVal, Diagnostics{
{
Severity: DiagError,
Summary: "Invalid index",
Detail: "The given key does not identify an element in this collection value.",
Subject: srcRange,
},
}
}
return collection.Index(key), nil
case ty.IsObjectType():
key, keyErr := convert.Convert(key, cty.String)
if keyErr != nil {
return cty.DynamicVal, Diagnostics{
{
Severity: DiagError,
Summary: "Invalid index",
Detail: fmt.Sprintf(
"The given key does not identify an element in this collection value: %s.",
keyErr.Error(),
),
Subject: srcRange,
},
}
}
if !collection.IsKnown() {
return cty.DynamicVal, nil
}
if !key.IsKnown() {
return cty.DynamicVal, nil
}
attrName := key.AsString()
if !ty.HasAttribute(attrName) {
return cty.DynamicVal, Diagnostics{
{
Severity: DiagError,
Summary: "Invalid index",
Detail: "The given key does not identify an element in this collection value.",
Subject: srcRange,
},
}
}
return collection.GetAttr(attrName), nil
default:
return cty.DynamicVal, Diagnostics{
{
Severity: DiagError,
Summary: "Invalid index",
Detail: "This value does not have any indices.",
Subject: srcRange,
},
}
}
}

272
vendor/github.com/hashicorp/hcl2/hcl/pos.go generated vendored Normal file
View file

@ -0,0 +1,272 @@
package hcl
import "fmt"
// Pos represents a single position in a source file, by addressing the
// start byte of a unicode character encoded in UTF-8.
//
// Pos is generally used only in the context of a Range, which then defines
// which source file the position is within.
type Pos struct {
// Line is the source code line where this position points. Lines are
// counted starting at 1 and incremented for each newline character
// encountered.
Line int
// Column is the source code column where this position points, in
// unicode characters, with counting starting at 1.
//
// Column counts characters as they appear visually, so for example a
// latin letter with a combining diacritic mark counts as one character.
// This is intended for rendering visual markers against source code in
// contexts where these diacritics would be rendered in a single character
// cell. Technically speaking, Column is counting grapheme clusters as
// used in unicode normalization.
Column int
// Byte is the byte offset into the file where the indicated character
// begins. This is a zero-based offset to the first byte of the first
// UTF-8 codepoint sequence in the character, and thus gives a position
// that can be resolved _without_ awareness of Unicode characters.
Byte int
}
// Range represents a span of characters between two positions in a source
// file.
//
// This struct is usually used by value in types that represent AST nodes,
// but by pointer in types that refer to the positions of other objects,
// such as in diagnostics.
type Range struct {
// Filename is the name of the file into which this range's positions
// point.
Filename string
// Start and End represent the bounds of this range. Start is inclusive
// and End is exclusive.
Start, End Pos
}
// RangeBetween returns a new range that spans from the beginning of the
// start range to the end of the end range.
//
// The result is meaningless if the two ranges do not belong to the same
// source file or if the end range appears before the start range.
func RangeBetween(start, end Range) Range {
return Range{
Filename: start.Filename,
Start: start.Start,
End: end.End,
}
}
// RangeOver returns a new range that covers both of the given ranges and
// possibly additional content between them if the two ranges do not overlap.
//
// If either range is empty then it is ignored. The result is empty if both
// given ranges are empty.
//
// The result is meaningless if the two ranges to not belong to the same
// source file.
func RangeOver(a, b Range) Range {
if a.Empty() {
return b
}
if b.Empty() {
return a
}
var start, end Pos
if a.Start.Byte < b.Start.Byte {
start = a.Start
} else {
start = b.Start
}
if a.End.Byte > b.End.Byte {
end = a.End
} else {
end = b.End
}
return Range{
Filename: a.Filename,
Start: start,
End: end,
}
}
// ContainsPos returns true if and only if the given position is contained within
// the receiving range.
//
// In the unlikely case that the line/column information disagree with the byte
// offset information in the given position or receiving range, the byte
// offsets are given priority.
func (r Range) ContainsPos(pos Pos) bool {
return r.ContainsOffset(pos.Byte)
}
// ContainsOffset returns true if and only if the given byte offset is within
// the receiving Range.
func (r Range) ContainsOffset(offset int) bool {
return offset >= r.Start.Byte && offset < r.End.Byte
}
// Ptr returns a pointer to a copy of the receiver. This is a convenience when
// ranges in places where pointers are required, such as in Diagnostic, but
// the range in question is returned from a method. Go would otherwise not
// allow one to take the address of a function call.
func (r Range) Ptr() *Range {
return &r
}
// String returns a compact string representation of the receiver.
// Callers should generally prefer to present a range more visually,
// e.g. via markers directly on the relevant portion of source code.
func (r Range) String() string {
if r.Start.Line == r.End.Line {
return fmt.Sprintf(
"%s:%d,%d-%d",
r.Filename,
r.Start.Line, r.Start.Column,
r.End.Column,
)
} else {
return fmt.Sprintf(
"%s:%d,%d-%d,%d",
r.Filename,
r.Start.Line, r.Start.Column,
r.End.Line, r.End.Column,
)
}
}
func (r Range) Empty() bool {
return r.Start.Byte == r.End.Byte
}
// CanSliceBytes returns true if SliceBytes could return an accurate
// sub-slice of the given slice.
//
// This effectively tests whether the start and end offsets of the range
// are within the bounds of the slice, and thus whether SliceBytes can be
// trusted to produce an accurate start and end position within that slice.
func (r Range) CanSliceBytes(b []byte) bool {
switch {
case r.Start.Byte < 0 || r.Start.Byte > len(b):
return false
case r.End.Byte < 0 || r.End.Byte > len(b):
return false
case r.End.Byte < r.Start.Byte:
return false
default:
return true
}
}
// SliceBytes returns a sub-slice of the given slice that is covered by the
// receiving range, assuming that the given slice is the source code of the
// file indicated by r.Filename.
//
// If the receiver refers to any byte offsets that are outside of the slice
// then the result is constrained to the overlapping portion only, to avoid
// a panic. Use CanSliceBytes to determine if the result is guaranteed to
// be an accurate span of the requested range.
func (r Range) SliceBytes(b []byte) []byte {
start := r.Start.Byte
end := r.End.Byte
if start < 0 {
start = 0
} else if start > len(b) {
start = len(b)
}
if end < 0 {
end = 0
} else if end > len(b) {
end = len(b)
}
if end < start {
end = start
}
return b[start:end]
}
// Overlaps returns true if the receiver and the other given range share any
// characters in common.
func (r Range) Overlaps(other Range) bool {
switch {
case r.Filename != other.Filename:
// If the ranges are in different files then they can't possibly overlap
return false
case r.Empty() || other.Empty():
// Empty ranges can never overlap
return false
case r.ContainsOffset(other.Start.Byte) || r.ContainsOffset(other.End.Byte):
return true
case other.ContainsOffset(r.Start.Byte) || other.ContainsOffset(r.End.Byte):
return true
default:
return false
}
}
// Overlap finds a range that is either identical to or a sub-range of both
// the receiver and the other given range. It returns an empty range
// within the receiver if there is no overlap between the two ranges.
//
// A non-empty result is either identical to or a subset of the receiver.
func (r Range) Overlap(other Range) Range {
if !r.Overlaps(other) {
// Start == End indicates an empty range
return Range{
Filename: r.Filename,
Start: r.Start,
End: r.Start,
}
}
var start, end Pos
if r.Start.Byte > other.Start.Byte {
start = r.Start
} else {
start = other.Start
}
if r.End.Byte < other.End.Byte {
end = r.End
} else {
end = other.End
}
return Range{
Filename: r.Filename,
Start: start,
End: end,
}
}
// PartitionAround finds the portion of the given range that overlaps with
// the reciever and returns three ranges: the portion of the reciever that
// precedes the overlap, the overlap itself, and then the portion of the
// reciever that comes after the overlap.
//
// If the two ranges do not overlap then all three returned ranges are empty.
//
// If the given range aligns with or extends beyond either extent of the
// reciever then the corresponding outer range will be empty.
func (r Range) PartitionAround(other Range) (before, overlap, after Range) {
overlap = r.Overlap(other)
if overlap.Empty() {
return overlap, overlap, overlap
}
before = Range{
Filename: r.Filename,
Start: r.Start,
End: overlap.Start,
}
after = Range{
Filename: r.Filename,
Start: overlap.End,
End: r.End,
}
return before, overlap, after
}

Some files were not shown because too many files have changed in this diff Show more