open-nomad/jobspec/parse_service.go

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1140 lines
26 KiB
Go
Raw Normal View History

// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package jobspec
import (
"fmt"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/nomad/api"
"github.com/mitchellh/mapstructure"
)
func parseGroupServices(g *api.TaskGroup, serviceObjs *ast.ObjectList) error {
g.Services = make([]*api.Service, len(serviceObjs.Items))
for idx, o := range serviceObjs.Items {
service, err := parseService(o)
if err != nil {
return multierror.Prefix(err, fmt.Sprintf("service (%d):", idx))
}
g.Services[idx] = service
}
return nil
}
func parseServices(serviceObjs *ast.ObjectList) ([]*api.Service, error) {
services := make([]*api.Service, len(serviceObjs.Items))
for idx, o := range serviceObjs.Items {
service, err := parseService(o)
if err != nil {
return nil, multierror.Prefix(err, fmt.Sprintf("service (%d):", idx))
}
services[idx] = service
}
return services, nil
}
func parseService(o *ast.ObjectItem) (*api.Service, error) {
// Check for invalid keys
valid := []string{
"name",
"tags",
"canary_tags",
client: enable configuring enable_tag_override for services Consul provides a feature of Service Definitions where the tags associated with a service can be modified through the Catalog API, overriding the value(s) configured in the agent's service configuration. To enable this feature, the flag enable_tag_override must be configured in the service definition. Previously, Nomad did not allow configuring this flag, and thus the default value of false was used. Now, it is configurable. Because Nomad itself acts as a state machine around the the service definitions of the tasks it manages, it's worth describing what happens when this feature is enabled and why. Consider the basic case where there is no Nomad, and your service is provided to consul as a boring JSON file. The ultimate source of truth for the definition of that service is the file, and is stored in the agent. Later, Consul performs "anti-entropy" which synchronizes the Catalog (stored only the leaders). Then with enable_tag_override=true, the tags field is available for "external" modification through the Catalog API (rather than directly configuring the service definition file, or using the Agent API). The important observation is that if the service definition ever changes (i.e. the file is changed & config reloaded OR the Agent API is used to modify the service), those "external" tag values are thrown away, and the new service definition is once again the source of truth. In the Nomad case, Nomad itself is the source of truth over the Agent in the same way the JSON file was the source of truth in the example above. That means any time Nomad sets a new service definition, any externally configured tags are going to be replaced. When does this happen? Only on major lifecycle events, for example when a task is modified because of an updated job spec from the 'nomad job run <existing>' command. Otherwise, Nomad's periodic re-sync's with Consul will now no longer try to restore the externally modified tag values (as long as enable_tag_override=true). Fixes #2057
2020-02-07 21:22:19 +00:00
"enable_tag_override",
"port",
"check",
"address_mode",
"check_restart",
"connect",
"task",
"meta",
2019-11-13 03:27:54 +00:00
"canary_meta",
"tagged_addresses",
2021-07-22 19:25:36 +00:00
"on_update",
"provider",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
return nil, err
}
var service api.Service
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return nil, err
}
delete(m, "check")
delete(m, "check_restart")
delete(m, "connect")
delete(m, "meta")
2019-11-13 03:27:54 +00:00
delete(m, "canary_meta")
delete(m, "tagged_addresses")
if err := mapstructure.WeakDecode(m, &service); err != nil {
return nil, err
}
// Filter list
var listVal *ast.ObjectList
if ot, ok := o.Val.(*ast.ObjectType); ok {
listVal = ot.List
} else {
return nil, fmt.Errorf("'%s': should be an object", service.Name)
}
if co := listVal.Filter("check"); len(co.Items) > 0 {
if err := parseChecks(&service, co); err != nil {
return nil, multierror.Prefix(err, fmt.Sprintf("'%s',", service.Name))
}
}
// Filter check_restart
if cro := listVal.Filter("check_restart"); len(cro.Items) > 0 {
if len(cro.Items) > 1 {
return nil, fmt.Errorf("check_restart '%s': cannot have more than 1 check_restart", service.Name)
}
cr, err := parseCheckRestart(cro.Items[0])
if err != nil {
return nil, multierror.Prefix(err, fmt.Sprintf("'%s',", service.Name))
}
service.CheckRestart = cr
}
// Filter connect
if co := listVal.Filter("connect"); len(co.Items) > 0 {
if len(co.Items) > 1 {
return nil, fmt.Errorf("connect '%s': cannot have more than 1 connect block", service.Name)
}
c, err := parseConnect(co.Items[0])
if err != nil {
return nil, multierror.Prefix(err, fmt.Sprintf("'%s',", service.Name))
}
service.Connect = c
}
// Parse out meta fields. These are in HCL as a list so we need
// to iterate over them and merge them.
if metaO := listVal.Filter("meta"); len(metaO.Items) > 0 {
for _, o := range metaO.Elem().Items {
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return nil, err
}
if err := mapstructure.WeakDecode(m, &service.Meta); err != nil {
return nil, err
}
}
}
2019-11-13 03:27:54 +00:00
// Parse out canary_meta fields. These are in HCL as a list so we need
// to iterate over them and merge them.
if metaO := listVal.Filter("canary_meta"); len(metaO.Items) > 0 {
for _, o := range metaO.Elem().Items {
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return nil, err
}
if err := mapstructure.WeakDecode(m, &service.CanaryMeta); err != nil {
return nil, err
}
}
}
// Parse out tagged_addresses fields. These are in HCL as a list so we need
// to iterate over them and merge them.
if taO := listVal.Filter("tagged_addresses"); len(taO.Items) > 0 {
for _, o := range taO.Elem().Items {
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return nil, err
}
if err := mapstructure.WeakDecode(m, &service.TaggedAddresses); err != nil {
return nil, err
}
}
}
return &service, nil
}
func parseConnect(co *ast.ObjectItem) (*api.ConsulConnect, error) {
valid := []string{
"native",
"gateway",
"sidecar_service",
"sidecar_task",
}
if err := checkHCLKeys(co.Val, valid); err != nil {
return nil, multierror.Prefix(err, "connect ->")
}
var connect api.ConsulConnect
var m map[string]interface{}
if err := hcl.DecodeObject(&m, co.Val); err != nil {
return nil, err
}
delete(m, "gateway")
delete(m, "sidecar_service")
delete(m, "sidecar_task")
if err := mapstructure.WeakDecode(m, &connect); err != nil {
return nil, err
}
var connectList *ast.ObjectList
if ot, ok := co.Val.(*ast.ObjectType); ok {
connectList = ot.List
} else {
return nil, fmt.Errorf("connect should be an object")
}
// Parse the gateway
o := connectList.Filter("gateway")
if len(o.Items) > 1 {
return nil, fmt.Errorf("only one 'gateway' block allowed per task")
} else if len(o.Items) == 1 {
g, err := parseGateway(o.Items[0])
if err != nil {
return nil, fmt.Errorf("gateway, %v", err)
}
connect.Gateway = g
}
// Parse the sidecar_service
o = connectList.Filter("sidecar_service")
if len(o.Items) > 1 {
return nil, fmt.Errorf("only one 'sidecar_service' block allowed per task")
}
if len(o.Items) == 1 {
r, err := parseSidecarService(o.Items[0])
if err != nil {
return nil, fmt.Errorf("sidecar_service, %v", err)
}
connect.SidecarService = r
}
// Parse the sidecar_task
o = connectList.Filter("sidecar_task")
if len(o.Items) > 1 {
return nil, fmt.Errorf("only one 'sidecar_task' block allowed per task")
}
if len(o.Items) == 1 {
t, err := parseSidecarTask(o.Items[0])
if err != nil {
return nil, fmt.Errorf("sidecar_task, %v", err)
}
connect.SidecarTask = t
}
return &connect, nil
}
func parseGateway(o *ast.ObjectItem) (*api.ConsulGateway, error) {
valid := []string{
"proxy",
"ingress",
2021-06-22 22:47:00 +00:00
"terminating",
2021-07-22 19:57:02 +00:00
"mesh",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
return nil, multierror.Prefix(err, "gateway ->")
}
var gateway api.ConsulGateway
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return nil, err
}
delete(m, "proxy")
delete(m, "ingress")
2021-06-22 22:47:00 +00:00
delete(m, "terminating")
2021-07-22 19:57:02 +00:00
delete(m, "mesh")
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
WeaklyTypedInput: true,
Result: &gateway,
})
if err != nil {
return nil, err
}
if err := dec.Decode(m); err != nil {
return nil, fmt.Errorf("gateway: %v", err)
}
// list of parameters
var listVal *ast.ObjectList
if ot, ok := o.Val.(*ast.ObjectType); ok {
listVal = ot.List
} else {
return nil, fmt.Errorf("proxy: should be an object")
}
// extract and parse the proxy block
po := listVal.Filter("proxy")
if len(po.Items) != 1 {
return nil, fmt.Errorf("must have one 'proxy' block")
}
proxy, err := parseGatewayProxy(po.Items[0])
if err != nil {
return nil, fmt.Errorf("proxy, %v", err)
}
gateway.Proxy = proxy
// extract and parse the ingress block
2021-07-22 19:57:02 +00:00
if io := listVal.Filter("ingress"); len(io.Items) > 0 {
if len(io.Items) > 1 {
return nil, fmt.Errorf("ingress, %s", "multiple ingress blocks not allowed")
2021-07-22 19:57:02 +00:00
}
2021-06-22 22:47:00 +00:00
ingress, err := parseIngressConfigEntry(io.Items[0])
if err != nil {
return nil, fmt.Errorf("ingress, %v", err)
}
gateway.Ingress = ingress
}
2021-06-22 22:47:00 +00:00
2021-07-22 19:57:02 +00:00
if to := listVal.Filter("terminating"); len(to.Items) > 0 {
if len(to.Items) > 1 {
return nil, fmt.Errorf("terminating, %s", "multiple terminating blocks not allowed")
2021-07-22 19:57:02 +00:00
}
2021-06-22 22:47:00 +00:00
terminating, err := parseTerminatingConfigEntry(to.Items[0])
if err != nil {
return nil, fmt.Errorf("terminating, %v", err)
}
gateway.Terminating = terminating
}
2021-07-22 19:57:02 +00:00
if mo := listVal.Filter("mesh"); len(mo.Items) > 0 {
if len(mo.Items) > 1 {
return nil, fmt.Errorf("mesh, %s", "multiple mesh blocks not allowed")
2021-07-22 19:57:02 +00:00
}
// mesh should have no keys
if err := checkHCLKeys(mo.Items[0].Val, []string{}); err != nil {
return nil, fmt.Errorf("mesh, %s", err)
}
gateway.Mesh = &api.ConsulMeshConfigEntry{}
}
return &gateway, nil
}
// parseGatewayProxy parses envoy gateway proxy options supported by Consul.
//
// consul.io/docs/connect/proxies/envoy#gateway-options
func parseGatewayProxy(o *ast.ObjectItem) (*api.ConsulGatewayProxy, error) {
valid := []string{
"connect_timeout",
"envoy_gateway_bind_tagged_addresses",
"envoy_gateway_bind_addresses",
"envoy_gateway_no_default_bind",
"envoy_dns_discovery_type",
"config",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
return nil, multierror.Prefix(err, "proxy ->")
}
var proxy api.ConsulGatewayProxy
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return nil, err
}
delete(m, "config")
delete(m, "envoy_gateway_bind_addresses")
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
WeaklyTypedInput: true,
Result: &proxy,
})
if err != nil {
return nil, err
}
if err := dec.Decode(m); err != nil {
return nil, fmt.Errorf("proxy: %v", err)
}
var listVal *ast.ObjectList
if ot, ok := o.Val.(*ast.ObjectType); ok {
listVal = ot.List
} else {
return nil, fmt.Errorf("proxy: should be an object")
}
// need to parse envoy_gateway_bind_addresses if present
if ebo := listVal.Filter("envoy_gateway_bind_addresses"); len(ebo.Items) > 0 {
proxy.EnvoyGatewayBindAddresses = make(map[string]*api.ConsulGatewayBindAddress)
for _, listenerM := range ebo.Items { // object item, each listener object
listenerName := listenerM.Keys[0].Token.Value().(string)
var listenerListVal *ast.ObjectList
if ot, ok := listenerM.Val.(*ast.ObjectType); ok {
listenerListVal = ot.List
} else {
return nil, fmt.Errorf("listener: should be an object")
}
var bind api.ConsulGatewayBindAddress
if err := hcl.DecodeObject(&bind, listenerListVal); err != nil {
return nil, fmt.Errorf("port: should be an int")
}
bind.Name = listenerName
proxy.EnvoyGatewayBindAddresses[listenerName] = &bind
}
}
// need to parse the opaque config if present
if co := listVal.Filter("config"); len(co.Items) > 1 {
return nil, fmt.Errorf("only 1 meta object supported")
} else if len(co.Items) == 1 {
var mSlice []map[string]interface{}
if err := hcl.DecodeObject(&mSlice, co.Items[0].Val); err != nil {
return nil, err
}
if len(mSlice) > 1 {
return nil, fmt.Errorf("only 1 meta object supported")
}
m := mSlice[0]
if err := mapstructure.WeakDecode(m, &proxy.Config); err != nil {
return nil, err
}
proxy.Config = flattenMapSlice(proxy.Config)
}
return &proxy, nil
}
func parseConsulIngressService(o *ast.ObjectItem) (*api.ConsulIngressService, error) {
valid := []string{
"name",
"hosts",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
return nil, multierror.Prefix(err, "service ->")
}
var service api.ConsulIngressService
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return nil, err
}
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
Result: &service,
})
if err != nil {
return nil, err
}
if err := dec.Decode(m); err != nil {
return nil, err
}
return &service, nil
}
2021-06-22 22:47:00 +00:00
func parseConsulLinkedService(o *ast.ObjectItem) (*api.ConsulLinkedService, error) {
valid := []string{
"name",
"ca_file",
"cert_file",
"key_file",
"sni",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
return nil, multierror.Prefix(err, "service ->")
}
var service api.ConsulLinkedService
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return nil, err
}
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
Result: &service,
})
if err != nil {
return nil, err
}
if err := dec.Decode(m); err != nil {
return nil, err
}
return &service, nil
}
func parseConsulIngressListener(o *ast.ObjectItem) (*api.ConsulIngressListener, error) {
valid := []string{
"port",
"protocol",
"service",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
return nil, multierror.Prefix(err, "listener ->")
}
var listener api.ConsulIngressListener
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return nil, err
}
delete(m, "service")
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
Result: &listener,
})
if err != nil {
return nil, err
}
if err := dec.Decode(m); err != nil {
return nil, err
}
// Parse services
var listVal *ast.ObjectList
if ot, ok := o.Val.(*ast.ObjectType); ok {
listVal = ot.List
} else {
return nil, fmt.Errorf("listener: should be an object")
}
so := listVal.Filter("service")
if len(so.Items) > 0 {
listener.Services = make([]*api.ConsulIngressService, len(so.Items))
for i := range so.Items {
is, err := parseConsulIngressService(so.Items[i])
if err != nil {
return nil, err
}
listener.Services[i] = is
}
}
return &listener, nil
}
func parseConsulGatewayTLS(o *ast.ObjectItem) (*api.ConsulGatewayTLSConfig, error) {
valid := []string{
"enabled",
"tls_min_version",
"tls_max_version",
"cipher_suites",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
return nil, multierror.Prefix(err, "tls ->")
}
var tls api.ConsulGatewayTLSConfig
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return nil, err
}
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
Result: &tls,
})
if err != nil {
return nil, err
}
if err := dec.Decode(m); err != nil {
return nil, err
}
return &tls, nil
}
func parseIngressConfigEntry(o *ast.ObjectItem) (*api.ConsulIngressConfigEntry, error) {
valid := []string{
"tls",
"listener",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
return nil, multierror.Prefix(err, "ingress ->")
}
var ingress api.ConsulIngressConfigEntry
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return nil, err
}
delete(m, "tls")
delete(m, "listener")
// Parse tls and listener(s)
var listVal *ast.ObjectList
if ot, ok := o.Val.(*ast.ObjectType); ok {
listVal = ot.List
} else {
return nil, fmt.Errorf("ingress: should be an object")
}
if to := listVal.Filter("tls"); len(to.Items) > 1 {
return nil, fmt.Errorf("only 1 tls object supported")
} else if len(to.Items) == 1 {
if tls, err := parseConsulGatewayTLS(to.Items[0]); err != nil {
return nil, err
} else {
ingress.TLS = tls
}
}
lo := listVal.Filter("listener")
if len(lo.Items) > 0 {
ingress.Listeners = make([]*api.ConsulIngressListener, len(lo.Items))
for i := range lo.Items {
listener, err := parseConsulIngressListener(lo.Items[i])
if err != nil {
return nil, err
}
ingress.Listeners[i] = listener
}
}
return &ingress, nil
}
2021-06-22 22:47:00 +00:00
func parseTerminatingConfigEntry(o *ast.ObjectItem) (*api.ConsulTerminatingConfigEntry, error) {
valid := []string{
"service",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
return nil, multierror.Prefix(err, "terminating ->")
}
var terminating api.ConsulTerminatingConfigEntry
// Parse service(s)
var listVal *ast.ObjectList
if ot, ok := o.Val.(*ast.ObjectType); ok {
listVal = ot.List
} else {
return nil, fmt.Errorf("terminating: should be an object")
}
lo := listVal.Filter("service")
if len(lo.Items) > 0 {
terminating.Services = make([]*api.ConsulLinkedService, len(lo.Items))
for i := range lo.Items {
service, err := parseConsulLinkedService(lo.Items[i])
if err != nil {
return nil, err
}
terminating.Services[i] = service
}
}
return &terminating, nil
}
func parseSidecarService(o *ast.ObjectItem) (*api.ConsulSidecarService, error) {
valid := []string{
"port",
"proxy",
"tags",
"disable_default_tcp_check",
"meta",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
return nil, multierror.Prefix(err, "sidecar_service ->")
}
var sidecar api.ConsulSidecarService
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return nil, err
}
delete(m, "proxy")
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
WeaklyTypedInput: true,
Result: &sidecar,
})
if err != nil {
return nil, err
}
if err := dec.Decode(m); err != nil {
return nil, fmt.Errorf("sidecar_service: %v", err)
}
var proxyList *ast.ObjectList
if ot, ok := o.Val.(*ast.ObjectType); ok {
proxyList = ot.List
} else {
return nil, fmt.Errorf("sidecar_service: should be an object")
}
// Parse the proxy
po := proxyList.Filter("proxy")
if len(po.Items) == 0 {
return &sidecar, nil
}
if len(po.Items) > 1 {
return nil, fmt.Errorf("only one 'proxy' block allowed per task")
}
r, err := parseProxy(po.Items[0])
if err != nil {
return nil, fmt.Errorf("proxy, %v", err)
}
sidecar.Proxy = r
return &sidecar, nil
}
func parseSidecarTask(item *ast.ObjectItem) (*api.SidecarTask, error) {
task, err := parseTask(item, sidecarTaskKeys)
if err != nil {
return nil, err
}
sidecarTask := &api.SidecarTask{
Name: task.Name,
Driver: task.Driver,
User: task.User,
Config: task.Config,
Env: task.Env,
Resources: task.Resources,
Meta: task.Meta,
KillTimeout: task.KillTimeout,
LogConfig: task.LogConfig,
KillSignal: task.KillSignal,
}
2020-04-20 13:28:19 +00:00
// Parse ShutdownDelay separatly to get pointer
var m map[string]interface{}
if err := hcl.DecodeObject(&m, item.Val); err != nil {
return nil, err
}
m = map[string]interface{}{
"shutdown_delay": m["shutdown_delay"],
}
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
WeaklyTypedInput: true,
Result: sidecarTask,
})
if err != nil {
return nil, err
}
if err := dec.Decode(m); err != nil {
return nil, err
}
return sidecarTask, nil
}
func parseProxy(o *ast.ObjectItem) (*api.ConsulProxy, error) {
valid := []string{
"local_service_address",
"local_service_port",
"upstreams",
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
"expose",
"config",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
return nil, multierror.Prefix(err, "proxy ->")
}
var proxy api.ConsulProxy
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return nil, err
}
delete(m, "upstreams")
delete(m, "expose")
delete(m, "config")
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
Result: &proxy,
})
if err != nil {
return nil, err
}
if err := dec.Decode(m); err != nil {
return nil, fmt.Errorf("proxy: %v", err)
}
// Parse upstreams, expose, and config
var listVal *ast.ObjectList
if ot, ok := o.Val.(*ast.ObjectType); ok {
listVal = ot.List
} else {
return nil, fmt.Errorf("proxy: should be an object")
}
uo := listVal.Filter("upstreams")
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
if len(uo.Items) > 0 {
proxy.Upstreams = make([]*api.ConsulUpstream, len(uo.Items))
for i := range uo.Items {
u, err := parseUpstream(uo.Items[i])
if err != nil {
return nil, err
}
proxy.Upstreams[i] = u
}
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
}
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
if eo := listVal.Filter("expose"); len(eo.Items) > 1 {
return nil, fmt.Errorf("only 1 expose object supported")
} else if len(eo.Items) == 1 {
if e, err := parseExpose(eo.Items[0]); err != nil {
return nil, err
} else {
proxy.Expose = e
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
}
}
// If we have config, then parse that
if o := listVal.Filter("config"); len(o.Items) > 1 {
return nil, fmt.Errorf("only 1 meta object supported")
} else if len(o.Items) == 1 {
var mSlice []map[string]interface{}
if err := hcl.DecodeObject(&mSlice, o.Items[0].Val); err != nil {
return nil, err
}
if len(mSlice) > 1 {
return nil, fmt.Errorf("only 1 meta object supported")
}
m := mSlice[0]
if err := mapstructure.WeakDecode(m, &proxy.Config); err != nil {
return nil, err
}
proxy.Config = flattenMapSlice(proxy.Config)
}
return &proxy, nil
}
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
func parseExpose(eo *ast.ObjectItem) (*api.ConsulExposeConfig, error) {
valid := []string{
"path", // an array of path blocks
}
if err := checkHCLKeys(eo.Val, valid); err != nil {
return nil, multierror.Prefix(err, "expose ->")
}
var expose api.ConsulExposeConfig
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
var listVal *ast.ObjectList
if eoType, ok := eo.Val.(*ast.ObjectType); ok {
listVal = eoType.List
} else {
return nil, fmt.Errorf("expose: should be an object")
}
// Parse the expose block
po := listVal.Filter("path") // array
if len(po.Items) > 0 {
expose.Paths = make([]*api.ConsulExposePath, len(po.Items))
for i := range po.Items {
p, err := parseExposePath(po.Items[i])
if err != nil {
return nil, err
}
expose.Paths[i] = p
}
}
return &expose, nil
}
func parseExposePath(epo *ast.ObjectItem) (*api.ConsulExposePath, error) {
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
valid := []string{
"path",
"protocol",
"local_path_port",
"listener_port",
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
}
if err := checkHCLKeys(epo.Val, valid); err != nil {
return nil, multierror.Prefix(err, "path ->")
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
}
var path api.ConsulExposePath
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
var m map[string]interface{}
if err := hcl.DecodeObject(&m, epo.Val); err != nil {
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
return nil, err
}
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
Result: &path,
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
})
if err != nil {
return nil, err
}
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
if err := dec.Decode(m); err != nil {
return nil, err
}
return &path, nil
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
}
func parseUpstream(uo *ast.ObjectItem) (*api.ConsulUpstream, error) {
valid := []string{
"destination_name",
"local_bind_port",
2021-07-22 19:15:50 +00:00
"local_bind_address",
"datacenter",
"mesh_gateway",
}
if err := checkHCLKeys(uo.Val, valid); err != nil {
return nil, multierror.Prefix(err, "upstream ->")
}
var upstream api.ConsulUpstream
var m map[string]interface{}
if err := hcl.DecodeObject(&m, uo.Val); err != nil {
return nil, err
}
2021-07-22 19:15:50 +00:00
delete(m, "mesh_gateway")
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
WeaklyTypedInput: true,
Result: &upstream,
})
if err != nil {
return nil, err
}
if err := dec.Decode(m); err != nil {
return nil, err
}
2021-07-22 19:15:50 +00:00
var listVal *ast.ObjectList
if ot, ok := uo.Val.(*ast.ObjectType); ok {
listVal = ot.List
} else {
return nil, fmt.Errorf("'%s': should be an object", upstream.DestinationName)
}
if mgO := listVal.Filter("mesh_gateway"); len(mgO.Items) > 0 {
if len(mgO.Items) > 1 {
return nil, fmt.Errorf("upstream '%s': cannot have more than 1 mesh_gateway", upstream.DestinationName)
}
mgw, err := parseMeshGateway(mgO.Items[0])
if err != nil {
return nil, multierror.Prefix(err, fmt.Sprintf("'%s',", upstream.DestinationName))
}
upstream.MeshGateway = mgw
}
return &upstream, nil
}
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
2021-07-22 19:15:50 +00:00
func parseMeshGateway(gwo *ast.ObjectItem) (*api.ConsulMeshGateway, error) {
valid := []string{
"mode",
}
if err := checkHCLKeys(gwo.Val, valid); err != nil {
return nil, multierror.Prefix(err, "mesh_gateway ->")
}
var m map[string]interface{}
if err := hcl.DecodeObject(&m, gwo.Val); err != nil {
return nil, err
}
var mgw api.ConsulMeshGateway
if err := mapstructure.WeakDecode(m, &mgw); err != nil {
return nil, err
}
return &mgw, nil
}
func parseChecks(service *api.Service, checkObjs *ast.ObjectList) error {
service.Checks = make([]api.ServiceCheck, len(checkObjs.Items))
for idx, co := range checkObjs.Items {
// Check for invalid keys
valid := []string{
"name",
"type",
"interval",
"timeout",
"path",
"protocol",
"port",
connect: enable automatic expose paths for individual group service checks Part of #6120 Building on the support for enabling connect proxy paths in #7323, this change adds the ability to configure the 'service.check.expose' flag on group-level service check definitions for services that are connect-enabled. This is a slight deviation from the "magic" that Consul provides. With Consul, the 'expose' flag exists on the connect.proxy stanza, which will then auto-generate expose paths for every HTTP and gRPC service check associated with that connect-enabled service. A first attempt at providing similar magic for Nomad's Consul Connect integration followed that pattern exactly, as seen in #7396. However, on reviewing the PR we realized having the `expose` flag on the proxy stanza inseperably ties together the automatic path generation with every HTTP/gRPC defined on the service. This makes sense in Consul's context, because a service definition is reasonably associated with a single "task". With Nomad's group level service definitions however, there is a reasonable expectation that a service definition is more abstractly representative of multiple services within the task group. In this case, one would want to define checks of that service which concretely make HTTP or gRPC requests to different underlying tasks. Such a model is not possible with the course `proxy.expose` flag. Instead, we now have the flag made available within the check definitions themselves. By making the expose feature resolute to each check, it is possible to have some HTTP/gRPC checks which make use of the envoy exposed paths, as well as some HTTP/gRPC checks which make use of some orthongonal port-mapping to do checks on some other task (or even some other bound port of the same task) within the task group. Given this example, group "server-group" { network { mode = "bridge" port "forchecks" { to = -1 } } service { name = "myserver" port = 2000 connect { sidecar_service { } } check { name = "mycheck-myserver" type = "http" port = "forchecks" interval = "3s" timeout = "2s" method = "GET" path = "/classic/responder/health" expose = true } } } Nomad will automatically inject (via job endpoint mutator) the extrapolated expose path configuration, i.e. expose { path { path = "/classic/responder/health" protocol = "http" local_path_port = 2000 listener_port = "forchecks" } } Documentation is coming in #7440 (needs updating, doing next) Modifications to the `countdash` examples in https://github.com/hashicorp/demo-consul-101/pull/6 which will make the examples in the documentation actually runnable. Will add some e2e tests based on the above when it becomes available.
2020-03-25 01:49:55 +00:00
"expose",
"command",
"args",
"initial_status",
"tls_skip_verify",
"header",
"method",
"check_restart",
"address_mode",
"grpc_service",
"grpc_use_tls",
"task",
"success_before_passing",
"failures_before_critical",
2021-07-22 19:25:36 +00:00
"on_update",
2021-07-22 19:34:23 +00:00
"body",
}
if err := checkHCLKeys(co.Val, valid); err != nil {
return multierror.Prefix(err, "check ->")
}
var check api.ServiceCheck
var cm map[string]interface{}
if err := hcl.DecodeObject(&cm, co.Val); err != nil {
return err
}
// HCL allows repeating blocks so merge 'header' into a single
// map[string][]string.
if headerI, ok := cm["header"]; ok {
headerRaw, ok := headerI.([]map[string]interface{})
if !ok {
return fmt.Errorf("check -> header -> expected a []map[string][]string but found %T", headerI)
}
m := map[string][]string{}
for _, rawm := range headerRaw {
for k, vI := range rawm {
vs, ok := vI.([]interface{})
if !ok {
return fmt.Errorf("check -> header -> %q expected a []string but found %T", k, vI)
}
for _, vI := range vs {
v, ok := vI.(string)
if !ok {
return fmt.Errorf("check -> header -> %q expected a string but found %T", k, vI)
}
m[k] = append(m[k], v)
}
}
}
check.Header = m
// Remove "header" as it has been parsed
delete(cm, "header")
}
delete(cm, "check_restart")
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
WeaklyTypedInput: true,
Result: &check,
})
if err != nil {
return err
}
if err := dec.Decode(cm); err != nil {
return err
}
// Filter check_restart
var checkRestartList *ast.ObjectList
if ot, ok := co.Val.(*ast.ObjectType); ok {
checkRestartList = ot.List
} else {
return fmt.Errorf("check_restart '%s': should be an object", check.Name)
}
if cro := checkRestartList.Filter("check_restart"); len(cro.Items) > 0 {
if len(cro.Items) > 1 {
return fmt.Errorf("check_restart '%s': cannot have more than 1 check_restart", check.Name)
}
cr, err := parseCheckRestart(cro.Items[0])
if err != nil {
return multierror.Prefix(err, fmt.Sprintf("check: '%s',", check.Name))
}
check.CheckRestart = cr
}
service.Checks[idx] = check
}
return nil
}
func parseCheckRestart(cro *ast.ObjectItem) (*api.CheckRestart, error) {
valid := []string{
"limit",
"grace",
"ignore_warnings",
}
if err := checkHCLKeys(cro.Val, valid); err != nil {
return nil, multierror.Prefix(err, "check_restart ->")
}
var checkRestart api.CheckRestart
var crm map[string]interface{}
if err := hcl.DecodeObject(&crm, cro.Val); err != nil {
return nil, err
}
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
WeaklyTypedInput: true,
Result: &checkRestart,
})
if err != nil {
return nil, err
}
if err := dec.Decode(crm); err != nil {
return nil, err
}
return &checkRestart, nil
}