2014-01-02 21:12:05 +00:00
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
2016-10-28 02:01:32 +00:00
|
|
|
"encoding/hex"
|
2014-01-02 21:12:05 +00:00
|
|
|
"fmt"
|
|
|
|
"log"
|
2014-01-03 01:58:58 +00:00
|
|
|
"net"
|
|
|
|
"strings"
|
2017-06-29 14:42:17 +00:00
|
|
|
"sync/atomic"
|
2014-01-02 21:12:05 +00:00
|
|
|
"time"
|
2014-11-03 19:40:55 +00:00
|
|
|
|
2017-08-03 16:39:50 +00:00
|
|
|
"regexp"
|
|
|
|
|
2015-12-22 01:01:28 +00:00
|
|
|
"github.com/armon/go-metrics"
|
2017-09-25 18:40:42 +00:00
|
|
|
"github.com/hashicorp/consul/agent/config"
|
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
|
|
|
"github.com/hashicorp/consul/agent/consul"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2016-02-12 07:58:48 +00:00
|
|
|
"github.com/hashicorp/consul/lib"
|
2014-11-03 19:40:55 +00:00
|
|
|
"github.com/miekg/dns"
|
2014-01-02 21:12:05 +00:00
|
|
|
)
|
|
|
|
|
2014-01-03 01:58:58 +00:00
|
|
|
const (
|
2016-03-30 02:27:02 +00:00
|
|
|
// UDP can fit ~25 A records in a 512B response, and ~14 AAAA
|
|
|
|
// records. Limit further to prevent unintentional configuration
|
|
|
|
// abuse that would have a negative effect on application response
|
|
|
|
// times.
|
|
|
|
maxUDPAnswerLimit = 8
|
2016-02-12 07:58:48 +00:00
|
|
|
maxRecurseRecords = 5
|
2016-11-08 19:45:12 +00:00
|
|
|
|
|
|
|
// Increment a counter when requests staler than this are served
|
|
|
|
staleCounterThreshold = 5 * time.Second
|
2017-06-14 23:22:54 +00:00
|
|
|
|
|
|
|
defaultMaxUDPSize = 512
|
2018-03-27 20:31:27 +00:00
|
|
|
|
2018-03-27 19:00:33 +00:00
|
|
|
MaxDNSLabelLength = 63
|
2014-01-03 01:58:58 +00:00
|
|
|
)
|
|
|
|
|
2017-08-03 19:47:07 +00:00
|
|
|
var InvalidDnsRe = regexp.MustCompile(`[^A-Za-z0-9\\-]+`)
|
2017-08-03 16:39:50 +00:00
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
type dnsConfig struct {
|
|
|
|
AllowStale bool
|
|
|
|
Datacenter string
|
|
|
|
EnableTruncate bool
|
|
|
|
MaxStale time.Duration
|
|
|
|
NodeName string
|
|
|
|
NodeTTL time.Duration
|
|
|
|
OnlyPassing bool
|
|
|
|
RecursorTimeout time.Duration
|
|
|
|
SegmentName string
|
|
|
|
ServiceTTL map[string]time.Duration
|
|
|
|
UDPAnswerLimit int
|
2018-03-06 01:07:42 +00:00
|
|
|
ARecordLimit int
|
2017-09-25 18:40:42 +00:00
|
|
|
}
|
|
|
|
|
2014-01-02 21:12:05 +00:00
|
|
|
// DNSServer is used to wrap an Agent and expose various
|
|
|
|
// service discovery endpoints using a DNS interface.
|
|
|
|
type DNSServer struct {
|
2017-05-24 13:22:56 +00:00
|
|
|
*dns.Server
|
|
|
|
agent *Agent
|
2017-09-25 18:40:42 +00:00
|
|
|
config *dnsConfig
|
2017-05-24 13:22:56 +00:00
|
|
|
domain string
|
|
|
|
recursors []string
|
|
|
|
logger *log.Logger
|
2017-06-29 14:42:17 +00:00
|
|
|
|
|
|
|
// disableCompression is the config.DisableCompression flag that can
|
|
|
|
// be safely changed at runtime. It always contains a bool and is
|
|
|
|
// initialized with the value from config.DisableCompression.
|
|
|
|
disableCompression atomic.Value
|
2014-01-02 21:12:05 +00:00
|
|
|
}
|
|
|
|
|
2017-05-24 13:22:56 +00:00
|
|
|
func NewDNSServer(a *Agent) (*DNSServer, error) {
|
|
|
|
var recursors []string
|
|
|
|
for _, r := range a.config.DNSRecursors {
|
|
|
|
ra, err := recursorAddr(r)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Invalid recursor address: %v", err)
|
2017-05-23 17:04:06 +00:00
|
|
|
}
|
2017-05-24 13:22:56 +00:00
|
|
|
recursors = append(recursors, ra)
|
2017-05-23 17:04:06 +00:00
|
|
|
}
|
2014-01-02 21:12:05 +00:00
|
|
|
|
2017-05-24 13:22:56 +00:00
|
|
|
// Make sure domain is FQDN, make it case insensitive for ServeMux
|
2017-09-25 18:40:42 +00:00
|
|
|
domain := dns.Fqdn(strings.ToLower(a.config.DNSDomain))
|
2014-01-02 21:12:05 +00:00
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
dnscfg := GetDNSConfig(a.config)
|
2014-01-02 21:12:05 +00:00
|
|
|
srv := &DNSServer{
|
2017-05-24 13:22:56 +00:00
|
|
|
agent: a,
|
2017-09-25 18:40:42 +00:00
|
|
|
config: dnscfg,
|
2017-05-24 13:22:56 +00:00
|
|
|
domain: domain,
|
|
|
|
logger: a.logger,
|
|
|
|
recursors: recursors,
|
2014-01-02 21:12:05 +00:00
|
|
|
}
|
2017-09-25 18:40:42 +00:00
|
|
|
srv.disableCompression.Store(a.config.DNSDisableCompression)
|
2014-01-02 21:12:05 +00:00
|
|
|
|
2017-05-24 13:22:56 +00:00
|
|
|
return srv, nil
|
|
|
|
}
|
2014-10-31 19:19:41 +00:00
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
func GetDNSConfig(conf *config.RuntimeConfig) *dnsConfig {
|
|
|
|
return &dnsConfig{
|
|
|
|
AllowStale: conf.DNSAllowStale,
|
2018-03-06 01:07:42 +00:00
|
|
|
ARecordLimit: conf.DNSARecordLimit,
|
2017-09-25 18:40:42 +00:00
|
|
|
Datacenter: conf.Datacenter,
|
|
|
|
EnableTruncate: conf.DNSEnableTruncate,
|
|
|
|
MaxStale: conf.DNSMaxStale,
|
|
|
|
NodeName: conf.NodeName,
|
|
|
|
NodeTTL: conf.DNSNodeTTL,
|
|
|
|
OnlyPassing: conf.DNSOnlyPassing,
|
|
|
|
RecursorTimeout: conf.DNSRecursorTimeout,
|
|
|
|
SegmentName: conf.SegmentName,
|
|
|
|
ServiceTTL: conf.DNSServiceTTL,
|
|
|
|
UDPAnswerLimit: conf.DNSUDPAnswerLimit,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-28 18:40:13 +00:00
|
|
|
func (d *DNSServer) ListenAndServe(network, addr string, notif func()) error {
|
2017-05-24 13:22:56 +00:00
|
|
|
mux := dns.NewServeMux()
|
2018-01-28 18:40:13 +00:00
|
|
|
mux.HandleFunc("arpa.", d.handlePtr)
|
2018-01-28 18:53:30 +00:00
|
|
|
mux.HandleFunc(d.domain, d.handleQuery)
|
2018-01-28 18:40:13 +00:00
|
|
|
if len(d.recursors) > 0 {
|
|
|
|
mux.HandleFunc(".", d.handleRecurse)
|
2014-01-03 23:43:35 +00:00
|
|
|
}
|
2014-01-02 21:12:05 +00:00
|
|
|
|
2018-01-28 18:53:30 +00:00
|
|
|
d.Server = &dns.Server{
|
2017-05-24 13:22:56 +00:00
|
|
|
Addr: addr,
|
|
|
|
Net: network,
|
|
|
|
Handler: mux,
|
|
|
|
NotifyStartedFunc: notif,
|
|
|
|
}
|
|
|
|
if network == "udp" {
|
2018-01-28 18:40:13 +00:00
|
|
|
d.UDPSize = 65535
|
2014-01-02 21:12:05 +00:00
|
|
|
}
|
2018-01-28 18:40:13 +00:00
|
|
|
return d.Server.ListenAndServe()
|
2014-01-02 21:12:05 +00:00
|
|
|
}
|
|
|
|
|
2014-02-23 01:31:11 +00:00
|
|
|
// recursorAddr is used to add a port to the recursor if omitted.
|
|
|
|
func recursorAddr(recursor string) (string, error) {
|
|
|
|
// Add the port if none
|
|
|
|
START:
|
|
|
|
_, _, err := net.SplitHostPort(recursor)
|
|
|
|
if ae, ok := err.(*net.AddrError); ok && ae.Err == "missing port in address" {
|
|
|
|
recursor = fmt.Sprintf("%s:%d", recursor, 53)
|
|
|
|
goto START
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the address
|
|
|
|
addr, err := net.ResolveTCPAddr("tcp", recursor)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return string
|
|
|
|
return addr.String(), nil
|
|
|
|
}
|
|
|
|
|
2014-11-23 08:16:37 +00:00
|
|
|
// handlePtr is used to handle "reverse" DNS queries
|
|
|
|
func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) {
|
|
|
|
q := req.Question[0]
|
|
|
|
defer func(s time.Time) {
|
2017-10-04 23:43:27 +00:00
|
|
|
metrics.MeasureSinceWithLabels([]string{"dns", "ptr_query"}, s,
|
|
|
|
[]metrics.Label{{Name: "node", Value: d.agent.config.NodeName}})
|
2015-08-11 07:47:02 +00:00
|
|
|
d.logger.Printf("[DEBUG] dns: request for %v (%v) from client %s (%s)",
|
2017-10-17 18:38:24 +00:00
|
|
|
q, time.Since(s), resp.RemoteAddr().String(),
|
2015-08-11 07:47:02 +00:00
|
|
|
resp.RemoteAddr().Network())
|
2014-11-23 08:16:37 +00:00
|
|
|
}(time.Now())
|
|
|
|
|
|
|
|
// Setup the message response
|
|
|
|
m := new(dns.Msg)
|
|
|
|
m.SetReply(req)
|
2017-06-29 14:42:17 +00:00
|
|
|
m.Compress = !d.disableCompression.Load().(bool)
|
2014-11-23 08:16:37 +00:00
|
|
|
m.Authoritative = true
|
|
|
|
m.RecursionAvailable = (len(d.recursors) > 0)
|
|
|
|
|
|
|
|
// Only add the SOA if requested
|
|
|
|
if req.Question[0].Qtype == dns.TypeSOA {
|
2017-08-04 11:24:04 +00:00
|
|
|
d.addSOA(m)
|
2014-11-23 08:16:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
datacenter := d.agent.config.Datacenter
|
|
|
|
|
|
|
|
// Get the QName without the domain suffix
|
|
|
|
qName := strings.ToLower(dns.Fqdn(req.Question[0].Name))
|
|
|
|
|
|
|
|
args := structs.DCSpecificRequest{
|
2015-06-12 22:58:53 +00:00
|
|
|
Datacenter: datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
2017-07-26 18:03:43 +00:00
|
|
|
Token: d.agent.tokens.UserToken(),
|
2017-09-25 18:40:42 +00:00
|
|
|
AllowStale: d.config.AllowStale,
|
2015-06-12 22:58:53 +00:00
|
|
|
},
|
2014-11-23 08:16:37 +00:00
|
|
|
}
|
|
|
|
var out structs.IndexedNodes
|
|
|
|
|
2014-11-24 19:09:04 +00:00
|
|
|
// TODO: Replace ListNodes with an internal RPC that can do the filter
|
2014-12-04 23:25:06 +00:00
|
|
|
// server side to avoid transferring the entire node list.
|
2014-11-23 08:16:37 +00:00
|
|
|
if err := d.agent.RPC("Catalog.ListNodes", &args, &out); err == nil {
|
|
|
|
for _, n := range out.Nodes {
|
|
|
|
arpa, _ := dns.ReverseAddr(n.Address)
|
|
|
|
if arpa == qName {
|
|
|
|
ptr := &dns.PTR{
|
|
|
|
Hdr: dns.RR_Header{Name: q.Name, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: 0},
|
2015-01-08 18:24:49 +00:00
|
|
|
Ptr: fmt.Sprintf("%s.node.%s.%s", n.Node, datacenter, d.domain),
|
2014-11-23 08:16:37 +00:00
|
|
|
}
|
|
|
|
m.Answer = append(m.Answer, ptr)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-27 21:22:36 +00:00
|
|
|
// nothing found locally, recurse
|
|
|
|
if len(m.Answer) == 0 {
|
|
|
|
d.handleRecurse(resp, req)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-06-14 23:22:54 +00:00
|
|
|
// Enable EDNS if enabled
|
|
|
|
if edns := req.IsEdns0(); edns != nil {
|
|
|
|
m.SetEdns0(edns.UDPSize(), false)
|
|
|
|
}
|
|
|
|
|
2014-11-23 08:16:37 +00:00
|
|
|
// Write out the complete response
|
|
|
|
if err := resp.WriteMsg(m); err != nil {
|
|
|
|
d.logger.Printf("[WARN] dns: failed to respond: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-15 12:22:08 +00:00
|
|
|
// handleQuery is used to handle DNS queries in the configured domain
|
2014-01-03 01:58:58 +00:00
|
|
|
func (d *DNSServer) handleQuery(resp dns.ResponseWriter, req *dns.Msg) {
|
|
|
|
q := req.Question[0]
|
|
|
|
defer func(s time.Time) {
|
2017-10-04 23:43:27 +00:00
|
|
|
metrics.MeasureSinceWithLabels([]string{"dns", "domain_query"}, s,
|
|
|
|
[]metrics.Label{{Name: "node", Value: d.agent.config.NodeName}})
|
2018-02-11 15:02:28 +00:00
|
|
|
d.logger.Printf("[DEBUG] dns: request for name %v type %v class %v (took %v) from client %s (%s)",
|
|
|
|
q.Name, dns.Type(q.Qtype), dns.Class(q.Qclass), time.Since(s), resp.RemoteAddr().String(),
|
2015-08-11 07:47:02 +00:00
|
|
|
resp.RemoteAddr().Network())
|
2014-01-03 01:58:58 +00:00
|
|
|
}(time.Now())
|
|
|
|
|
2014-02-14 22:22:49 +00:00
|
|
|
// Switch to TCP if the client is
|
|
|
|
network := "udp"
|
|
|
|
if _, ok := resp.RemoteAddr().(*net.TCPAddr); ok {
|
|
|
|
network = "tcp"
|
|
|
|
}
|
|
|
|
|
2014-01-03 01:58:58 +00:00
|
|
|
// Setup the message response
|
|
|
|
m := new(dns.Msg)
|
|
|
|
m.SetReply(req)
|
2017-06-29 14:42:17 +00:00
|
|
|
m.Compress = !d.disableCompression.Load().(bool)
|
2014-01-03 01:58:58 +00:00
|
|
|
m.Authoritative = true
|
2014-10-31 19:19:41 +00:00
|
|
|
m.RecursionAvailable = (len(d.recursors) > 0)
|
2014-02-25 20:46:11 +00:00
|
|
|
|
2017-08-04 11:24:04 +00:00
|
|
|
switch req.Question[0].Qtype {
|
|
|
|
case dns.TypeSOA:
|
2017-08-04 21:53:42 +00:00
|
|
|
ns, glue := d.nameservers(req.IsEdns0() != nil)
|
2017-08-04 11:24:04 +00:00
|
|
|
m.Answer = append(m.Answer, d.soa())
|
|
|
|
m.Ns = append(m.Ns, ns...)
|
|
|
|
m.Extra = append(m.Extra, glue...)
|
|
|
|
m.SetRcode(req, dns.RcodeSuccess)
|
|
|
|
|
|
|
|
case dns.TypeNS:
|
2017-08-04 21:53:42 +00:00
|
|
|
ns, glue := d.nameservers(req.IsEdns0() != nil)
|
2017-08-04 11:24:04 +00:00
|
|
|
m.Answer = ns
|
2017-08-04 21:53:42 +00:00
|
|
|
m.Extra = glue
|
2017-08-04 11:24:04 +00:00
|
|
|
m.SetRcode(req, dns.RcodeSuccess)
|
|
|
|
|
2017-08-07 09:09:41 +00:00
|
|
|
case dns.TypeAXFR:
|
|
|
|
m.SetRcode(req, dns.RcodeNotImplemented)
|
|
|
|
|
2017-08-04 11:24:04 +00:00
|
|
|
default:
|
2018-04-11 21:02:04 +00:00
|
|
|
d.dispatch(network, resp.RemoteAddr(), req, m)
|
2017-08-04 11:24:04 +00:00
|
|
|
}
|
2014-01-03 23:43:35 +00:00
|
|
|
|
2017-06-14 23:22:54 +00:00
|
|
|
// Handle EDNS
|
|
|
|
if edns := req.IsEdns0(); edns != nil {
|
|
|
|
m.SetEdns0(edns.UDPSize(), false)
|
|
|
|
}
|
|
|
|
|
2014-01-03 23:43:35 +00:00
|
|
|
// Write out the complete response
|
|
|
|
if err := resp.WriteMsg(m); err != nil {
|
|
|
|
d.logger.Printf("[WARN] dns: failed to respond: %v", err)
|
|
|
|
}
|
2014-01-03 01:58:58 +00:00
|
|
|
}
|
|
|
|
|
2017-08-04 11:24:04 +00:00
|
|
|
func (d *DNSServer) soa() *dns.SOA {
|
|
|
|
return &dns.SOA{
|
2014-01-02 23:10:13 +00:00
|
|
|
Hdr: dns.RR_Header{
|
2017-08-04 11:24:04 +00:00
|
|
|
Name: d.domain,
|
2014-01-02 23:10:13 +00:00
|
|
|
Rrtype: dns.TypeSOA,
|
|
|
|
Class: dns.ClassINET,
|
|
|
|
Ttl: 0,
|
|
|
|
},
|
2017-08-04 11:24:04 +00:00
|
|
|
Ns: "ns." + d.domain,
|
|
|
|
Serial: uint32(time.Now().Unix()),
|
|
|
|
|
|
|
|
// todo(fs): make these configurable
|
2017-08-07 09:09:58 +00:00
|
|
|
Mbox: "hostmaster." + d.domain,
|
2014-01-02 23:10:13 +00:00
|
|
|
Refresh: 3600,
|
|
|
|
Retry: 600,
|
|
|
|
Expire: 86400,
|
|
|
|
Minttl: 0,
|
|
|
|
}
|
2017-08-04 11:24:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// addSOA is used to add an SOA record to a message for the given domain
|
|
|
|
func (d *DNSServer) addSOA(msg *dns.Msg) {
|
|
|
|
msg.Ns = append(msg.Ns, d.soa())
|
|
|
|
}
|
|
|
|
|
|
|
|
// nameservers returns the names and ip addresses of up to three random servers
|
|
|
|
// in the current cluster which serve as authoritative name servers for zone.
|
2017-08-04 21:53:42 +00:00
|
|
|
func (d *DNSServer) nameservers(edns bool) (ns []dns.RR, extra []dns.RR) {
|
2017-08-21 12:16:41 +00:00
|
|
|
out, err := d.lookupServiceNodes(d.agent.config.Datacenter, structs.ConsulServiceName, "")
|
|
|
|
if err != nil {
|
|
|
|
d.logger.Printf("[WARN] dns: Unable to get list of servers: %s", err)
|
|
|
|
return nil, nil
|
|
|
|
}
|
2017-08-08 11:55:58 +00:00
|
|
|
|
2017-08-21 12:16:41 +00:00
|
|
|
if len(out.Nodes) == 0 {
|
|
|
|
d.logger.Printf("[WARN] dns: no servers found")
|
|
|
|
return
|
|
|
|
}
|
2017-08-04 11:24:04 +00:00
|
|
|
|
2017-08-21 12:16:41 +00:00
|
|
|
// shuffle the nodes to randomize the output
|
|
|
|
out.Nodes.Shuffle()
|
|
|
|
|
|
|
|
for _, o := range out.Nodes {
|
|
|
|
name, addr, dc := o.Node.Node, o.Node.Address, o.Node.Datacenter
|
|
|
|
|
|
|
|
if InvalidDnsRe.MatchString(name) {
|
|
|
|
d.logger.Printf("[WARN] dns: Skipping invalid node %q for NS records", name)
|
2017-08-07 21:02:33 +00:00
|
|
|
continue
|
|
|
|
}
|
2017-08-04 11:24:04 +00:00
|
|
|
|
2017-08-21 12:16:41 +00:00
|
|
|
fqdn := name + ".node." + dc + "." + d.domain
|
|
|
|
fqdn = dns.Fqdn(strings.ToLower(fqdn))
|
2017-08-04 11:24:04 +00:00
|
|
|
|
2017-08-08 11:55:58 +00:00
|
|
|
// NS record
|
2017-08-04 11:24:04 +00:00
|
|
|
nsrr := &dns.NS{
|
|
|
|
Hdr: dns.RR_Header{
|
|
|
|
Name: d.domain,
|
|
|
|
Rrtype: dns.TypeNS,
|
|
|
|
Class: dns.ClassINET,
|
2017-08-08 11:55:58 +00:00
|
|
|
Ttl: uint32(d.config.NodeTTL / time.Second),
|
2017-08-04 11:24:04 +00:00
|
|
|
},
|
2017-08-21 12:16:41 +00:00
|
|
|
Ns: fqdn,
|
2017-08-04 11:24:04 +00:00
|
|
|
}
|
|
|
|
ns = append(ns, nsrr)
|
2017-08-08 11:55:58 +00:00
|
|
|
|
2017-08-01 07:01:49 +00:00
|
|
|
glue := d.formatNodeRecord(nil, addr, fqdn, dns.TypeANY, d.config.NodeTTL, edns)
|
2017-08-08 11:55:58 +00:00
|
|
|
extra = append(extra, glue...)
|
2017-08-04 11:24:04 +00:00
|
|
|
|
|
|
|
// don't provide more than 3 servers
|
|
|
|
if len(ns) >= 3 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
2014-01-02 23:10:13 +00:00
|
|
|
}
|
2014-01-03 01:58:58 +00:00
|
|
|
|
|
|
|
// dispatch is used to parse a request and invoke the correct handler
|
2018-04-11 21:02:04 +00:00
|
|
|
func (d *DNSServer) dispatch(network string, remoteAddr net.Addr, req, resp *dns.Msg) {
|
2014-01-03 01:58:58 +00:00
|
|
|
// By default the query is in the default datacenter
|
|
|
|
datacenter := d.agent.config.Datacenter
|
|
|
|
|
|
|
|
// Get the QName without the domain suffix
|
2014-07-23 08:28:54 +00:00
|
|
|
qName := strings.ToLower(dns.Fqdn(req.Question[0].Name))
|
2014-01-03 01:58:58 +00:00
|
|
|
qName = strings.TrimSuffix(qName, d.domain)
|
|
|
|
|
|
|
|
// Split into the label parts
|
|
|
|
labels := dns.SplitDomainName(qName)
|
|
|
|
|
2017-10-20 23:49:17 +00:00
|
|
|
// Provide a flag for remembering whether the datacenter name was parsed already.
|
|
|
|
var dcParsed bool
|
|
|
|
|
2017-01-30 18:36:48 +00:00
|
|
|
// The last label is either "node", "service", "query", "_<protocol>", or a datacenter name
|
2014-01-03 01:58:58 +00:00
|
|
|
PARSE:
|
2014-04-21 22:33:01 +00:00
|
|
|
n := len(labels)
|
|
|
|
if n == 0 {
|
2014-01-03 01:58:58 +00:00
|
|
|
goto INVALID
|
|
|
|
}
|
2017-01-30 18:36:48 +00:00
|
|
|
|
|
|
|
// If this is a SRV query the "service" label is optional, we add it back to use the
|
|
|
|
// existing code-path.
|
|
|
|
if req.Question[0].Qtype == dns.TypeSRV && strings.HasPrefix(labels[n-1], "_") {
|
|
|
|
labels = append(labels, "service")
|
|
|
|
n = n + 1
|
|
|
|
}
|
|
|
|
|
2014-04-21 22:33:01 +00:00
|
|
|
switch labels[n-1] {
|
2014-01-03 01:58:58 +00:00
|
|
|
case "service":
|
2014-04-21 22:33:01 +00:00
|
|
|
if n == 1 {
|
2014-01-03 01:58:58 +00:00
|
|
|
goto INVALID
|
|
|
|
}
|
|
|
|
|
2014-08-18 19:45:56 +00:00
|
|
|
// Support RFC 2782 style syntax
|
|
|
|
if n == 3 && strings.HasPrefix(labels[n-2], "_") && strings.HasPrefix(labels[n-3], "_") {
|
2014-04-21 22:33:01 +00:00
|
|
|
|
2014-08-18 19:45:56 +00:00
|
|
|
// Grab the tag since we make nuke it if it's tcp
|
|
|
|
tag := labels[n-2][1:]
|
|
|
|
|
|
|
|
// Treat _name._tcp.service.consul as a default, no need to filter on that tag
|
|
|
|
if tag == "tcp" {
|
|
|
|
tag = ""
|
|
|
|
}
|
|
|
|
|
|
|
|
// _name._tag.service.consul
|
|
|
|
d.serviceLookup(network, datacenter, labels[n-3][1:], tag, req, resp)
|
2014-04-21 22:33:01 +00:00
|
|
|
|
2014-08-20 23:27:12 +00:00
|
|
|
// Consul 0.3 and prior format for SRV queries
|
2014-08-18 19:45:56 +00:00
|
|
|
} else {
|
|
|
|
|
|
|
|
// Support "." in the label, re-join all the parts
|
|
|
|
tag := ""
|
|
|
|
if n >= 3 {
|
|
|
|
tag = strings.Join(labels[:n-2], ".")
|
|
|
|
}
|
|
|
|
|
|
|
|
// tag[.tag].name.service.consul
|
|
|
|
d.serviceLookup(network, datacenter, labels[n-2], tag, req, resp)
|
|
|
|
}
|
2014-04-21 22:33:01 +00:00
|
|
|
|
2014-01-03 01:58:58 +00:00
|
|
|
case "node":
|
2015-11-17 16:40:47 +00:00
|
|
|
if n == 1 {
|
2014-01-03 01:58:58 +00:00
|
|
|
goto INVALID
|
|
|
|
}
|
2015-11-17 16:40:47 +00:00
|
|
|
|
2014-04-21 22:33:01 +00:00
|
|
|
// Allow a "." in the node name, just join all the parts
|
|
|
|
node := strings.Join(labels[:n-1], ".")
|
|
|
|
d.nodeLookup(network, datacenter, node, req, resp)
|
2014-01-03 01:58:58 +00:00
|
|
|
|
2015-11-12 17:28:05 +00:00
|
|
|
case "query":
|
2015-11-17 16:40:47 +00:00
|
|
|
if n == 1 {
|
2015-11-12 17:28:05 +00:00
|
|
|
goto INVALID
|
|
|
|
}
|
2015-11-17 16:40:47 +00:00
|
|
|
|
2015-11-12 17:28:05 +00:00
|
|
|
// Allow a "." in the query name, just join all the parts.
|
|
|
|
query := strings.Join(labels[:n-1], ".")
|
2018-04-11 21:02:04 +00:00
|
|
|
d.preparedQueryLookup(network, datacenter, query, remoteAddr, req, resp)
|
2015-11-12 17:28:05 +00:00
|
|
|
|
2016-10-28 02:01:32 +00:00
|
|
|
case "addr":
|
2016-10-28 03:41:24 +00:00
|
|
|
if n != 2 {
|
2016-10-28 02:01:32 +00:00
|
|
|
goto INVALID
|
|
|
|
}
|
|
|
|
|
|
|
|
switch len(labels[0]) / 2 {
|
|
|
|
// IPv4
|
|
|
|
case 4:
|
|
|
|
ip, err := hex.DecodeString(labels[0])
|
|
|
|
if err != nil {
|
|
|
|
goto INVALID
|
|
|
|
}
|
|
|
|
|
|
|
|
resp.Answer = append(resp.Answer, &dns.A{
|
|
|
|
Hdr: dns.RR_Header{
|
|
|
|
Name: qName + d.domain,
|
|
|
|
Rrtype: dns.TypeA,
|
|
|
|
Class: dns.ClassINET,
|
|
|
|
Ttl: uint32(d.config.NodeTTL / time.Second),
|
|
|
|
},
|
|
|
|
A: ip,
|
|
|
|
})
|
|
|
|
// IPv6
|
|
|
|
case 16:
|
|
|
|
ip, err := hex.DecodeString(labels[0])
|
|
|
|
if err != nil {
|
|
|
|
goto INVALID
|
|
|
|
}
|
|
|
|
|
|
|
|
resp.Answer = append(resp.Answer, &dns.AAAA{
|
|
|
|
Hdr: dns.RR_Header{
|
|
|
|
Name: qName + d.domain,
|
|
|
|
Rrtype: dns.TypeAAAA,
|
|
|
|
Class: dns.ClassINET,
|
|
|
|
Ttl: uint32(d.config.NodeTTL / time.Second),
|
|
|
|
},
|
|
|
|
AAAA: ip,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-01-03 01:58:58 +00:00
|
|
|
default:
|
2017-10-20 23:49:17 +00:00
|
|
|
// https://github.com/hashicorp/consul/issues/3200
|
|
|
|
//
|
|
|
|
// Since datacenter names cannot contain dots we can only allow one
|
|
|
|
// label between the query type and the domain to be the datacenter name.
|
|
|
|
// Since the datacenter name is optional and the parser strips off labels at the end until it finds a suitable
|
|
|
|
// query type label we return NXDOMAIN when we encounter another label
|
|
|
|
// which could be the datacenter name.
|
|
|
|
//
|
|
|
|
// If '.consul' is the domain then
|
|
|
|
// * foo.service.dc.consul is OK
|
|
|
|
// * foo.service.dc.stuff.consul is not OK
|
|
|
|
if dcParsed {
|
|
|
|
goto INVALID
|
|
|
|
}
|
|
|
|
dcParsed = true
|
|
|
|
|
2014-01-03 01:58:58 +00:00
|
|
|
// Store the DC, and re-parse
|
2014-04-21 22:33:01 +00:00
|
|
|
datacenter = labels[n-1]
|
|
|
|
labels = labels[:n-1]
|
2014-01-03 01:58:58 +00:00
|
|
|
goto PARSE
|
|
|
|
}
|
|
|
|
return
|
|
|
|
INVALID:
|
|
|
|
d.logger.Printf("[WARN] dns: QName invalid: %s", qName)
|
2017-08-04 11:24:04 +00:00
|
|
|
d.addSOA(resp)
|
2014-01-03 01:58:58 +00:00
|
|
|
resp.SetRcode(req, dns.RcodeNameError)
|
|
|
|
}
|
|
|
|
|
|
|
|
// nodeLookup is used to handle a node query
|
2014-02-14 22:22:49 +00:00
|
|
|
func (d *DNSServer) nodeLookup(network, datacenter, node string, req, resp *dns.Msg) {
|
2017-08-10 04:43:24 +00:00
|
|
|
// Only handle ANY, A, AAAA, and TXT type requests
|
2014-01-03 01:58:58 +00:00
|
|
|
qType := req.Question[0].Qtype
|
2017-08-01 07:01:49 +00:00
|
|
|
if qType != dns.TypeANY && qType != dns.TypeA && qType != dns.TypeAAAA && qType != dns.TypeTXT {
|
2014-01-03 01:58:58 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make an RPC request
|
2014-01-08 23:13:27 +00:00
|
|
|
args := structs.NodeSpecificRequest{
|
2015-06-12 22:58:53 +00:00
|
|
|
Datacenter: datacenter,
|
|
|
|
Node: node,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
2017-07-26 18:03:43 +00:00
|
|
|
Token: d.agent.tokens.UserToken(),
|
2017-09-25 18:40:42 +00:00
|
|
|
AllowStale: d.config.AllowStale,
|
2015-06-12 22:58:53 +00:00
|
|
|
},
|
2014-01-03 01:58:58 +00:00
|
|
|
}
|
2014-02-05 22:36:13 +00:00
|
|
|
var out structs.IndexedNodeServices
|
2014-06-08 22:49:24 +00:00
|
|
|
RPC:
|
2014-01-03 01:58:58 +00:00
|
|
|
if err := d.agent.RPC("Catalog.NodeServices", &args, &out); err != nil {
|
|
|
|
d.logger.Printf("[ERR] dns: rpc error: %v", err)
|
|
|
|
resp.SetRcode(req, dns.RcodeServerFailure)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-06-08 22:49:24 +00:00
|
|
|
// Verify that request is not too stale, redo the request
|
2016-11-08 19:45:12 +00:00
|
|
|
if args.AllowStale {
|
|
|
|
if out.LastContact > d.config.MaxStale {
|
|
|
|
args.AllowStale = false
|
|
|
|
d.logger.Printf("[WARN] dns: Query results too stale, re-requesting")
|
|
|
|
goto RPC
|
|
|
|
} else if out.LastContact > staleCounterThreshold {
|
2017-10-04 23:43:27 +00:00
|
|
|
metrics.IncrCounter([]string{"dns", "stale_queries"}, 1)
|
2016-11-08 19:45:12 +00:00
|
|
|
}
|
2014-06-08 22:49:24 +00:00
|
|
|
}
|
|
|
|
|
2014-01-03 01:58:58 +00:00
|
|
|
// If we have no address, return not found!
|
2014-03-05 23:03:23 +00:00
|
|
|
if out.NodeServices == nil {
|
2017-08-04 11:24:04 +00:00
|
|
|
d.addSOA(resp)
|
2014-01-03 01:58:58 +00:00
|
|
|
resp.SetRcode(req, dns.RcodeNameError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-02-25 20:07:20 +00:00
|
|
|
// Add the node record
|
2016-06-15 18:02:51 +00:00
|
|
|
n := out.NodeServices.Node
|
2017-06-14 23:22:54 +00:00
|
|
|
edns := req.IsEdns0() != nil
|
2017-06-30 19:42:53 +00:00
|
|
|
addr := d.agent.TranslateAddress(datacenter, n.Address, n.TaggedAddresses)
|
2017-08-01 07:01:49 +00:00
|
|
|
records := d.formatNodeRecord(out.NodeServices.Node, addr, req.Question[0].Name, qType, d.config.NodeTTL, edns)
|
2014-02-26 01:41:48 +00:00
|
|
|
if records != nil {
|
|
|
|
resp.Answer = append(resp.Answer, records...)
|
2014-01-03 01:58:58 +00:00
|
|
|
}
|
2014-02-25 20:07:20 +00:00
|
|
|
}
|
2014-01-03 01:58:58 +00:00
|
|
|
|
2017-08-10 05:00:06 +00:00
|
|
|
// encodeKVasRFC1464 encodes a key-value pair according to RFC1464
|
2017-08-10 21:55:50 +00:00
|
|
|
func encodeKVasRFC1464(key, value string) (txt string) {
|
2017-08-11 01:37:17 +00:00
|
|
|
// For details on these replacements c.f. https://www.ietf.org/rfc/rfc1464.txt
|
|
|
|
key = strings.Replace(key, "`", "``", -1)
|
|
|
|
key = strings.Replace(key, "=", "`=", -1)
|
|
|
|
|
|
|
|
// Backquote the leading spaces
|
|
|
|
leadingSpacesRE := regexp.MustCompile("^ +")
|
|
|
|
numLeadingSpaces := len(leadingSpacesRE.FindString(key))
|
|
|
|
key = leadingSpacesRE.ReplaceAllString(key, strings.Repeat("` ", numLeadingSpaces))
|
|
|
|
|
|
|
|
// Backquote the trailing spaces
|
|
|
|
trailingSpacesRE := regexp.MustCompile(" +$")
|
|
|
|
numTrailingSpaces := len(trailingSpacesRE.FindString(key))
|
|
|
|
key = trailingSpacesRE.ReplaceAllString(key, strings.Repeat("` ", numTrailingSpaces))
|
|
|
|
|
|
|
|
value = strings.Replace(value, "`", "``", -1)
|
|
|
|
|
|
|
|
return key + "=" + value
|
2017-08-01 07:01:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// formatNodeRecord takes a Node and returns an A, AAAA, TXT or CNAME record
|
|
|
|
func (d *DNSServer) formatNodeRecord(node *structs.Node, addr, qName string, qType uint16, ttl time.Duration, edns bool) (records []dns.RR) {
|
2014-02-25 20:07:20 +00:00
|
|
|
// Parse the IP
|
2015-01-02 21:10:05 +00:00
|
|
|
ip := net.ParseIP(addr)
|
2014-02-25 20:07:20 +00:00
|
|
|
var ipv4 net.IP
|
|
|
|
if ip != nil {
|
|
|
|
ipv4 = ip.To4()
|
|
|
|
}
|
2017-08-01 07:01:49 +00:00
|
|
|
|
2014-02-25 20:07:20 +00:00
|
|
|
switch {
|
|
|
|
case ipv4 != nil && (qType == dns.TypeANY || qType == dns.TypeA):
|
2017-08-01 07:01:49 +00:00
|
|
|
records = append(records, &dns.A{
|
2014-02-25 20:07:20 +00:00
|
|
|
Hdr: dns.RR_Header{
|
|
|
|
Name: qName,
|
|
|
|
Rrtype: dns.TypeA,
|
|
|
|
Class: dns.ClassINET,
|
2014-06-08 23:01:06 +00:00
|
|
|
Ttl: uint32(ttl / time.Second),
|
2014-02-25 20:07:20 +00:00
|
|
|
},
|
|
|
|
A: ip,
|
2017-08-01 07:01:49 +00:00
|
|
|
})
|
2014-02-25 20:07:20 +00:00
|
|
|
|
|
|
|
case ip != nil && ipv4 == nil && (qType == dns.TypeANY || qType == dns.TypeAAAA):
|
2017-08-01 07:01:49 +00:00
|
|
|
records = append(records, &dns.AAAA{
|
2014-02-25 20:07:20 +00:00
|
|
|
Hdr: dns.RR_Header{
|
|
|
|
Name: qName,
|
|
|
|
Rrtype: dns.TypeAAAA,
|
|
|
|
Class: dns.ClassINET,
|
2014-06-08 23:01:06 +00:00
|
|
|
Ttl: uint32(ttl / time.Second),
|
2014-02-25 20:07:20 +00:00
|
|
|
},
|
|
|
|
AAAA: ip,
|
2017-08-01 07:01:49 +00:00
|
|
|
})
|
2014-01-03 01:58:58 +00:00
|
|
|
|
2014-02-26 01:41:48 +00:00
|
|
|
case ip == nil && (qType == dns.TypeANY || qType == dns.TypeCNAME ||
|
2017-08-01 07:01:49 +00:00
|
|
|
qType == dns.TypeA || qType == dns.TypeAAAA || qType == dns.TypeTXT):
|
2014-02-26 01:41:48 +00:00
|
|
|
// Get the CNAME
|
|
|
|
cnRec := &dns.CNAME{
|
2014-02-25 20:07:20 +00:00
|
|
|
Hdr: dns.RR_Header{
|
|
|
|
Name: qName,
|
|
|
|
Rrtype: dns.TypeCNAME,
|
|
|
|
Class: dns.ClassINET,
|
2014-06-08 23:01:06 +00:00
|
|
|
Ttl: uint32(ttl / time.Second),
|
2014-02-25 20:07:20 +00:00
|
|
|
},
|
2015-01-02 21:10:05 +00:00
|
|
|
Target: dns.Fqdn(addr),
|
2014-02-25 20:07:20 +00:00
|
|
|
}
|
2014-02-26 01:41:48 +00:00
|
|
|
records = append(records, cnRec)
|
|
|
|
|
|
|
|
// Recurse
|
|
|
|
more := d.resolveCNAME(cnRec.Target)
|
2015-04-14 02:19:22 +00:00
|
|
|
extra := 0
|
2014-02-26 01:41:48 +00:00
|
|
|
MORE_REC:
|
2015-04-14 02:19:22 +00:00
|
|
|
for _, rr := range more {
|
2014-02-26 01:41:48 +00:00
|
|
|
switch rr.Header().Rrtype {
|
2017-08-01 07:01:49 +00:00
|
|
|
case dns.TypeCNAME, dns.TypeA, dns.TypeAAAA, dns.TypeTXT:
|
2014-02-26 01:41:48 +00:00
|
|
|
records = append(records, rr)
|
|
|
|
extra++
|
2017-06-14 23:22:54 +00:00
|
|
|
if extra == maxRecurseRecords && !edns {
|
2014-02-26 01:41:48 +00:00
|
|
|
break MORE_REC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-02-25 20:07:20 +00:00
|
|
|
}
|
2017-08-01 07:01:49 +00:00
|
|
|
|
2017-08-10 04:11:43 +00:00
|
|
|
if node != nil && (qType == dns.TypeANY || qType == dns.TypeTXT) {
|
2017-08-10 05:00:06 +00:00
|
|
|
for key, value := range node.Meta {
|
|
|
|
txt := value
|
|
|
|
if !strings.HasPrefix(strings.ToLower(key), "rfc1035-") {
|
2017-08-10 21:55:50 +00:00
|
|
|
txt = encodeKVasRFC1464(key, value)
|
2017-08-10 05:00:06 +00:00
|
|
|
}
|
2017-08-01 07:01:49 +00:00
|
|
|
records = append(records, &dns.TXT{
|
|
|
|
Hdr: dns.RR_Header{
|
|
|
|
Name: qName,
|
|
|
|
Rrtype: dns.TypeTXT,
|
|
|
|
Class: dns.ClassINET,
|
|
|
|
Ttl: uint32(ttl / time.Second),
|
|
|
|
},
|
|
|
|
Txt: []string{txt},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-26 01:41:48 +00:00
|
|
|
return records
|
2014-01-03 01:58:58 +00:00
|
|
|
}
|
|
|
|
|
2016-08-12 21:51:50 +00:00
|
|
|
// indexRRs populates a map which indexes a given list of RRs by name. NOTE that
|
2016-08-12 19:16:21 +00:00
|
|
|
// the names are all squashed to lower case so we can perform case-insensitive
|
|
|
|
// lookups; the RRs are not modified.
|
2016-08-12 21:51:50 +00:00
|
|
|
func indexRRs(rrs []dns.RR, index map[string]dns.RR) {
|
2016-08-12 04:46:14 +00:00
|
|
|
for _, rr := range rrs {
|
2016-08-12 19:16:21 +00:00
|
|
|
name := strings.ToLower(rr.Header().Name)
|
2016-08-12 04:46:14 +00:00
|
|
|
if _, ok := index[name]; !ok {
|
|
|
|
index[name] = rr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// syncExtra takes a DNS response message and sets the extra data to the most
|
|
|
|
// minimal set needed to cover the answer data. A pre-made index of RRs is given
|
|
|
|
// so that can be re-used between calls. This assumes that the extra data is
|
|
|
|
// only used to provide info for SRV records. If that's not the case, then this
|
|
|
|
// will wipe out any additional data.
|
|
|
|
func syncExtra(index map[string]dns.RR, resp *dns.Msg) {
|
|
|
|
extra := make([]dns.RR, 0, len(resp.Answer))
|
2016-08-12 05:01:23 +00:00
|
|
|
resolved := make(map[string]struct{}, len(resp.Answer))
|
2016-08-12 04:46:14 +00:00
|
|
|
for _, ansRR := range resp.Answer {
|
|
|
|
srv, ok := ansRR.(*dns.SRV)
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
2016-08-12 19:16:21 +00:00
|
|
|
|
|
|
|
// Note that we always use lower case when using the index so
|
|
|
|
// that compares are not case-sensitive. We don't alter the actual
|
|
|
|
// RRs we add into the extra section, however.
|
|
|
|
target := strings.ToLower(srv.Target)
|
2016-08-12 04:46:14 +00:00
|
|
|
|
|
|
|
RESOLVE:
|
2016-08-12 05:01:23 +00:00
|
|
|
if _, ok := resolved[target]; ok {
|
2016-08-12 04:46:14 +00:00
|
|
|
continue
|
|
|
|
}
|
2016-08-12 05:01:23 +00:00
|
|
|
resolved[target] = struct{}{}
|
2016-08-12 04:46:14 +00:00
|
|
|
|
|
|
|
extraRR, ok := index[target]
|
|
|
|
if ok {
|
|
|
|
extra = append(extra, extraRR)
|
|
|
|
if cname, ok := extraRR.(*dns.CNAME); ok {
|
2016-08-12 19:16:21 +00:00
|
|
|
target = strings.ToLower(cname.Target)
|
2016-08-12 04:46:14 +00:00
|
|
|
goto RESOLVE
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
resp.Extra = extra
|
|
|
|
}
|
|
|
|
|
2018-04-16 23:10:52 +00:00
|
|
|
// dnsBinaryTruncate find the optimal number of records using a fast binary search and return
|
|
|
|
// it in order to return a DNS answer lower than maxSize parameter.
|
2018-04-16 22:50:00 +00:00
|
|
|
func dnsBinaryTruncate(resp *dns.Msg, maxSize int, index map[string]dns.RR, hasExtra bool) int {
|
|
|
|
originalAnswser := resp.Answer
|
|
|
|
startIndex := 0
|
2018-04-17 07:31:30 +00:00
|
|
|
endIndex := len(resp.Answer) + 1
|
2018-04-16 22:50:00 +00:00
|
|
|
for endIndex-startIndex > 1 {
|
|
|
|
median := startIndex + (endIndex-startIndex)/2
|
|
|
|
|
|
|
|
resp.Answer = originalAnswser[:median]
|
|
|
|
if hasExtra {
|
|
|
|
syncExtra(index, resp)
|
|
|
|
}
|
|
|
|
aLen := resp.Len()
|
|
|
|
if aLen <= maxSize {
|
|
|
|
if maxSize-aLen < 10 {
|
|
|
|
// We are good, increasing will go out of bounds
|
|
|
|
return median
|
|
|
|
}
|
|
|
|
startIndex = median
|
|
|
|
} else {
|
|
|
|
endIndex = median
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return startIndex
|
|
|
|
}
|
|
|
|
|
2018-03-07 09:01:12 +00:00
|
|
|
// trimTCPResponse limit the MaximumSize of messages to 64k as it is the limit
|
|
|
|
// of DNS responses
|
2018-03-07 15:14:36 +00:00
|
|
|
func (d *DNSServer) trimTCPResponse(req, resp *dns.Msg) (trimmed bool) {
|
2018-03-07 09:01:12 +00:00
|
|
|
hasExtra := len(resp.Extra) > 0
|
2018-03-07 15:14:36 +00:00
|
|
|
// There is some overhead, 65535 does not work
|
2018-05-16 10:47:35 +00:00
|
|
|
maxSize := 65523 // 64k - 12 bytes DNS raw overhead
|
2018-03-07 09:01:12 +00:00
|
|
|
|
|
|
|
// We avoid some function calls and allocations by only handling the
|
|
|
|
// extra data when necessary.
|
|
|
|
var index map[string]dns.RR
|
2018-03-07 23:26:41 +00:00
|
|
|
originalSize := resp.Len()
|
|
|
|
originalNumRecords := len(resp.Answer)
|
|
|
|
|
|
|
|
// Beyond 2500 records, performance gets bad
|
|
|
|
// Limit the number of records at once, anyway, it won't fit in 64k
|
|
|
|
// For SRV Records, the max is around 500 records, for A, less than 2k
|
2018-05-16 10:11:49 +00:00
|
|
|
truncateAt := 4096
|
2018-03-09 17:25:29 +00:00
|
|
|
if req.Question[0].Qtype == dns.TypeSRV {
|
2018-05-16 10:11:49 +00:00
|
|
|
truncateAt = 1024
|
2018-03-09 17:25:29 +00:00
|
|
|
}
|
|
|
|
if len(resp.Answer) > truncateAt {
|
|
|
|
resp.Answer = resp.Answer[:truncateAt]
|
2018-03-07 23:26:41 +00:00
|
|
|
}
|
2018-03-07 09:01:12 +00:00
|
|
|
if hasExtra {
|
|
|
|
index = make(map[string]dns.RR, len(resp.Extra))
|
|
|
|
indexRRs(resp.Extra, index)
|
|
|
|
}
|
|
|
|
truncated := false
|
|
|
|
|
|
|
|
// This enforces the given limit on 64k, the max limit for DNS messages
|
2018-05-16 10:47:35 +00:00
|
|
|
for len(resp.Answer) > 1 && resp.Len() > maxSize {
|
2018-03-07 09:01:12 +00:00
|
|
|
truncated = true
|
2018-04-16 22:50:00 +00:00
|
|
|
// More than 100 bytes, find with a binary search
|
|
|
|
if resp.Len()-maxSize > 100 {
|
|
|
|
bestIndex := dnsBinaryTruncate(resp, maxSize, index, hasExtra)
|
|
|
|
resp.Answer = resp.Answer[:bestIndex]
|
|
|
|
} else {
|
|
|
|
resp.Answer = resp.Answer[:len(resp.Answer)-1]
|
|
|
|
}
|
2018-03-07 09:01:12 +00:00
|
|
|
if hasExtra {
|
|
|
|
syncExtra(index, resp)
|
|
|
|
}
|
|
|
|
}
|
2018-03-07 15:14:36 +00:00
|
|
|
if truncated {
|
|
|
|
d.logger.Printf("[DEBUG] dns: TCP answer to %v too large truncated recs:=%d/%d, size:=%d/%d",
|
|
|
|
req.Question,
|
|
|
|
len(resp.Answer), originalNumRecords, resp.Len(), originalSize)
|
|
|
|
}
|
2018-03-07 09:01:12 +00:00
|
|
|
return truncated
|
|
|
|
}
|
|
|
|
|
2016-08-12 04:46:14 +00:00
|
|
|
// trimUDPResponse makes sure a UDP response is not longer than allowed by RFC
|
2016-08-11 23:24:44 +00:00
|
|
|
// 1035. Enforce an arbitrary limit that can be further ratcheted down by
|
2016-08-12 04:46:14 +00:00
|
|
|
// config, and then make sure the response doesn't exceed 512 bytes. Any extra
|
|
|
|
// records will be trimmed along with answers.
|
2017-09-25 18:40:42 +00:00
|
|
|
func trimUDPResponse(req, resp *dns.Msg, udpAnswerLimit int) (trimmed bool) {
|
2016-02-18 00:54:28 +00:00
|
|
|
numAnswers := len(resp.Answer)
|
2016-08-12 17:29:57 +00:00
|
|
|
hasExtra := len(resp.Extra) > 0
|
2017-06-14 23:22:54 +00:00
|
|
|
maxSize := defaultMaxUDPSize
|
|
|
|
|
|
|
|
// Update to the maximum edns size
|
|
|
|
if edns := req.IsEdns0(); edns != nil {
|
|
|
|
if size := edns.UDPSize(); size > uint16(maxSize) {
|
|
|
|
maxSize = int(size)
|
|
|
|
}
|
|
|
|
}
|
2016-08-12 17:29:57 +00:00
|
|
|
|
|
|
|
// We avoid some function calls and allocations by only handling the
|
|
|
|
// extra data when necessary.
|
|
|
|
var index map[string]dns.RR
|
|
|
|
if hasExtra {
|
2016-08-12 21:51:50 +00:00
|
|
|
index = make(map[string]dns.RR, len(resp.Extra))
|
|
|
|
indexRRs(resp.Extra, index)
|
2016-08-12 17:29:57 +00:00
|
|
|
}
|
2016-02-18 00:54:28 +00:00
|
|
|
|
2016-03-07 18:37:54 +00:00
|
|
|
// This cuts UDP responses to a useful but limited number of responses.
|
2017-09-25 18:40:42 +00:00
|
|
|
maxAnswers := lib.MinInt(maxUDPAnswerLimit, udpAnswerLimit)
|
2018-05-16 09:00:51 +00:00
|
|
|
compress := resp.Compress
|
2017-06-14 23:22:54 +00:00
|
|
|
if maxSize == defaultMaxUDPSize && numAnswers > maxAnswers {
|
2018-05-16 09:00:51 +00:00
|
|
|
// We disable computation of Len ONLY for non-eDNS request (512 bytes)
|
|
|
|
resp.Compress = false
|
2016-03-30 02:27:02 +00:00
|
|
|
resp.Answer = resp.Answer[:maxAnswers]
|
2016-08-12 17:29:57 +00:00
|
|
|
if hasExtra {
|
|
|
|
syncExtra(index, resp)
|
|
|
|
}
|
2016-02-18 00:54:28 +00:00
|
|
|
}
|
|
|
|
|
2017-06-14 23:22:54 +00:00
|
|
|
// This enforces the given limit on the number bytes. The default is 512 as
|
|
|
|
// per the RFC, but EDNS0 allows for the user to specify larger sizes. Note
|
|
|
|
// that we temporarily switch to uncompressed so that we limit to a response
|
|
|
|
// that will not exceed 512 bytes uncompressed, which is more conservative and
|
|
|
|
// will allow our responses to be compliant even if some downstream server
|
|
|
|
// uncompresses them.
|
2018-05-16 09:00:51 +00:00
|
|
|
// Even when size is too big for one single record, try to send it anyway
|
|
|
|
// (usefull for 512 bytes messages)
|
|
|
|
for len(resp.Answer) > 1 && resp.Len() > maxSize {
|
2018-04-16 22:50:00 +00:00
|
|
|
// More than 100 bytes, find with a binary search
|
|
|
|
if resp.Len()-maxSize > 100 {
|
|
|
|
bestIndex := dnsBinaryTruncate(resp, maxSize, index, hasExtra)
|
|
|
|
resp.Answer = resp.Answer[:bestIndex]
|
|
|
|
} else {
|
|
|
|
resp.Answer = resp.Answer[:len(resp.Answer)-1]
|
|
|
|
}
|
2016-08-12 17:29:57 +00:00
|
|
|
if hasExtra {
|
|
|
|
syncExtra(index, resp)
|
|
|
|
}
|
2016-02-18 00:54:28 +00:00
|
|
|
}
|
2018-05-16 09:00:51 +00:00
|
|
|
// For 512 non-eDNS responses, while we compute size non-compressed,
|
|
|
|
// we send result compressed
|
2016-08-11 23:24:44 +00:00
|
|
|
resp.Compress = compress
|
2016-02-18 00:54:28 +00:00
|
|
|
|
2016-03-30 02:52:31 +00:00
|
|
|
return len(resp.Answer) < numAnswers
|
2016-02-18 00:54:28 +00:00
|
|
|
}
|
|
|
|
|
2018-03-07 09:01:12 +00:00
|
|
|
// trimDNSResponse will trim the response for UDP and TCP
|
|
|
|
func (d *DNSServer) trimDNSResponse(network string, req, resp *dns.Msg) (trimmed bool) {
|
|
|
|
if network != "tcp" {
|
|
|
|
trimmed = trimUDPResponse(req, resp, d.config.UDPAnswerLimit)
|
|
|
|
} else {
|
2018-03-07 15:14:36 +00:00
|
|
|
trimmed = d.trimTCPResponse(req, resp)
|
2018-03-07 09:01:12 +00:00
|
|
|
}
|
|
|
|
// Flag that there are more records to return in the UDP response
|
|
|
|
if trimmed && d.config.EnableTruncate {
|
|
|
|
resp.Truncated = true
|
|
|
|
}
|
|
|
|
return trimmed
|
|
|
|
}
|
|
|
|
|
2017-08-21 12:05:39 +00:00
|
|
|
// lookupServiceNodes returns nodes with a given service.
|
|
|
|
func (d *DNSServer) lookupServiceNodes(datacenter, service, tag string) (structs.IndexedCheckServiceNodes, error) {
|
2014-01-08 23:13:27 +00:00
|
|
|
args := structs.ServiceSpecificRequest{
|
2015-06-12 22:58:53 +00:00
|
|
|
Datacenter: datacenter,
|
|
|
|
ServiceName: service,
|
|
|
|
ServiceTag: tag,
|
|
|
|
TagFilter: tag != "",
|
|
|
|
QueryOptions: structs.QueryOptions{
|
2017-07-26 18:03:43 +00:00
|
|
|
Token: d.agent.tokens.UserToken(),
|
2017-09-25 18:40:42 +00:00
|
|
|
AllowStale: d.config.AllowStale,
|
2015-06-12 22:58:53 +00:00
|
|
|
},
|
2014-01-03 21:00:03 +00:00
|
|
|
}
|
2017-08-21 09:01:36 +00:00
|
|
|
|
2014-02-05 22:36:13 +00:00
|
|
|
var out structs.IndexedCheckServiceNodes
|
2014-01-15 21:20:01 +00:00
|
|
|
if err := d.agent.RPC("Health.ServiceNodes", &args, &out); err != nil {
|
2017-08-21 12:05:39 +00:00
|
|
|
return structs.IndexedCheckServiceNodes{}, err
|
2014-01-03 21:00:03 +00:00
|
|
|
}
|
|
|
|
|
2017-08-21 09:01:36 +00:00
|
|
|
if args.AllowStale && out.LastContact > staleCounterThreshold {
|
2017-10-04 23:43:27 +00:00
|
|
|
metrics.IncrCounter([]string{"dns", "stale_queries"}, 1)
|
2017-08-21 09:01:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// redo the request the response was too stale
|
|
|
|
if args.AllowStale && out.LastContact > d.config.MaxStale {
|
|
|
|
args.AllowStale = false
|
|
|
|
d.logger.Printf("[WARN] dns: Query results too stale, re-requesting")
|
|
|
|
|
|
|
|
if err := d.agent.RPC("Health.ServiceNodes", &args, &out); err != nil {
|
2017-08-21 12:05:39 +00:00
|
|
|
return structs.IndexedCheckServiceNodes{}, err
|
2016-11-08 19:45:12 +00:00
|
|
|
}
|
2014-06-08 22:49:24 +00:00
|
|
|
}
|
|
|
|
|
2014-01-15 21:30:04 +00:00
|
|
|
// Filter out any service nodes due to health checks
|
2015-11-07 01:02:05 +00:00
|
|
|
out.Nodes = out.Nodes.Filter(d.config.OnlyPassing)
|
2017-08-21 12:05:39 +00:00
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// serviceLookup is used to handle a service query
|
|
|
|
func (d *DNSServer) serviceLookup(network, datacenter, service, tag string, req, resp *dns.Msg) {
|
|
|
|
out, err := d.lookupServiceNodes(datacenter, service, tag)
|
|
|
|
if err != nil {
|
|
|
|
d.logger.Printf("[ERR] dns: rpc error: %v", err)
|
|
|
|
resp.SetRcode(req, dns.RcodeServerFailure)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-07-29 21:16:48 +00:00
|
|
|
// If we have no nodes, return not found!
|
|
|
|
if len(out.Nodes) == 0 {
|
2017-08-04 11:24:04 +00:00
|
|
|
d.addSOA(resp)
|
2015-07-29 21:16:48 +00:00
|
|
|
resp.SetRcode(req, dns.RcodeNameError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-02-14 20:26:51 +00:00
|
|
|
// Perform a random shuffle
|
2015-11-07 01:02:05 +00:00
|
|
|
out.Nodes.Shuffle()
|
2014-02-14 20:26:51 +00:00
|
|
|
|
2017-08-21 08:48:01 +00:00
|
|
|
// Determine the TTL
|
|
|
|
var ttl time.Duration
|
|
|
|
if d.config.ServiceTTL != nil {
|
|
|
|
var ok bool
|
|
|
|
ttl, ok = d.config.ServiceTTL[service]
|
|
|
|
if !ok {
|
|
|
|
ttl = d.config.ServiceTTL["*"]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-03 21:00:03 +00:00
|
|
|
// Add various responses depending on the request
|
|
|
|
qType := req.Question[0].Qtype
|
2014-02-26 01:41:48 +00:00
|
|
|
if qType == dns.TypeSRV {
|
2016-02-07 21:39:37 +00:00
|
|
|
d.serviceSRVRecords(datacenter, out.Nodes, req, resp, ttl)
|
2016-08-12 04:46:14 +00:00
|
|
|
} else {
|
|
|
|
d.serviceNodeRecords(datacenter, out.Nodes, req, resp, ttl)
|
2014-01-03 21:00:03 +00:00
|
|
|
}
|
2015-08-25 20:37:33 +00:00
|
|
|
|
2018-03-07 09:01:12 +00:00
|
|
|
d.trimDNSResponse(network, req, resp)
|
2015-08-25 19:54:11 +00:00
|
|
|
|
2016-02-18 00:54:28 +00:00
|
|
|
// If the answer is empty and the response isn't truncated, return not found
|
|
|
|
if len(resp.Answer) == 0 && !resp.Truncated {
|
2017-08-04 11:24:04 +00:00
|
|
|
d.addSOA(resp)
|
2015-11-12 17:28:05 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-13 16:57:25 +00:00
|
|
|
func ednsSubnetForRequest(req *dns.Msg) *dns.EDNS0_SUBNET {
|
2018-04-12 14:40:46 +00:00
|
|
|
// IsEdns0 returns the EDNS RR if present or nil otherwise
|
2018-04-10 18:50:50 +00:00
|
|
|
edns := req.IsEdns0()
|
2018-04-13 16:57:25 +00:00
|
|
|
|
2018-04-10 18:50:50 +00:00
|
|
|
if edns == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2018-04-13 16:57:25 +00:00
|
|
|
|
2018-04-10 18:50:50 +00:00
|
|
|
for _, o := range edns.Option {
|
|
|
|
if subnet, ok := o.(*dns.EDNS0_SUBNET); ok {
|
|
|
|
return subnet
|
|
|
|
}
|
|
|
|
}
|
2018-04-13 16:57:25 +00:00
|
|
|
|
|
|
|
return nil
|
2018-04-10 18:50:50 +00:00
|
|
|
}
|
|
|
|
|
2015-11-12 17:28:05 +00:00
|
|
|
// preparedQueryLookup is used to handle a prepared query.
|
2018-04-11 21:02:04 +00:00
|
|
|
func (d *DNSServer) preparedQueryLookup(network, datacenter, query string, remoteAddr net.Addr, req, resp *dns.Msg) {
|
2015-11-12 17:28:05 +00:00
|
|
|
// Execute the prepared query.
|
|
|
|
args := structs.PreparedQueryExecuteRequest{
|
|
|
|
Datacenter: datacenter,
|
|
|
|
QueryIDOrName: query,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
2017-07-26 18:03:43 +00:00
|
|
|
Token: d.agent.tokens.UserToken(),
|
2017-09-25 18:40:42 +00:00
|
|
|
AllowStale: d.config.AllowStale,
|
2015-11-12 17:28:05 +00:00
|
|
|
},
|
2016-06-30 19:11:48 +00:00
|
|
|
|
2016-06-30 23:51:18 +00:00
|
|
|
// Always pass the local agent through. In the DNS interface, there
|
|
|
|
// is no provision for passing additional query parameters, so we
|
|
|
|
// send the local agent's data through to allow distance sorting
|
|
|
|
// relative to ourself on the server side.
|
|
|
|
Agent: structs.QuerySource{
|
2016-06-30 19:11:48 +00:00
|
|
|
Datacenter: d.agent.config.Datacenter,
|
2017-09-25 18:40:42 +00:00
|
|
|
Segment: d.agent.config.SegmentName,
|
2016-06-30 19:11:48 +00:00
|
|
|
Node: d.agent.config.NodeName,
|
|
|
|
},
|
2015-11-12 17:28:05 +00:00
|
|
|
}
|
2018-04-13 16:57:25 +00:00
|
|
|
|
2018-04-10 18:50:50 +00:00
|
|
|
subnet := ednsSubnetForRequest(req)
|
2018-04-13 16:57:25 +00:00
|
|
|
|
2018-04-10 18:50:50 +00:00
|
|
|
if subnet != nil {
|
|
|
|
args.Source.Ip = subnet.Address.String()
|
2018-04-11 21:02:04 +00:00
|
|
|
} else {
|
|
|
|
switch v := remoteAddr.(type) {
|
2018-04-13 16:57:25 +00:00
|
|
|
case *net.UDPAddr:
|
|
|
|
args.Source.Ip = v.IP.String()
|
|
|
|
case *net.TCPAddr:
|
|
|
|
args.Source.Ip = v.IP.String()
|
|
|
|
case *net.IPAddr:
|
|
|
|
args.Source.Ip = v.IP.String()
|
2018-04-11 21:02:04 +00:00
|
|
|
}
|
2018-04-10 18:50:50 +00:00
|
|
|
}
|
2015-11-12 17:28:05 +00:00
|
|
|
|
2015-11-13 11:39:07 +00:00
|
|
|
// TODO (slackpad) - What's a safe limit we can set here? It seems like
|
|
|
|
// with dup filtering done at this level we need to get everything to
|
|
|
|
// match the previous behavior. We can optimize by pushing more filtering
|
|
|
|
// into the query execution, but for now I think we need to get the full
|
2015-11-14 01:18:15 +00:00
|
|
|
// response. We could also choose a large arbitrary number that will
|
2016-02-12 07:58:48 +00:00
|
|
|
// likely work in practice, like 10*maxUDPAnswerLimit which should help
|
2015-11-14 01:18:15 +00:00
|
|
|
// reduce bandwidth if there are thousands of nodes available.
|
2015-11-12 17:28:05 +00:00
|
|
|
|
|
|
|
var out structs.PreparedQueryExecuteResponse
|
|
|
|
RPC:
|
2017-06-16 07:54:09 +00:00
|
|
|
if err := d.agent.RPC("PreparedQuery.Execute", &args, &out); err != nil {
|
2015-11-13 11:39:07 +00:00
|
|
|
// If they give a bogus query name, treat that as a name error,
|
|
|
|
// not a full on server error. We have to use a string compare
|
|
|
|
// here since the RPC layer loses the type information.
|
|
|
|
if err.Error() == consul.ErrQueryNotFound.Error() {
|
2017-08-04 11:24:04 +00:00
|
|
|
d.addSOA(resp)
|
2015-11-13 11:39:07 +00:00
|
|
|
resp.SetRcode(req, dns.RcodeNameError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-11-12 17:28:05 +00:00
|
|
|
d.logger.Printf("[ERR] dns: rpc error: %v", err)
|
|
|
|
resp.SetRcode(req, dns.RcodeServerFailure)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that request is not too stale, redo the request.
|
2016-11-08 19:45:12 +00:00
|
|
|
if args.AllowStale {
|
|
|
|
if out.LastContact > d.config.MaxStale {
|
|
|
|
args.AllowStale = false
|
|
|
|
d.logger.Printf("[WARN] dns: Query results too stale, re-requesting")
|
|
|
|
goto RPC
|
|
|
|
} else if out.LastContact > staleCounterThreshold {
|
2017-10-04 23:43:27 +00:00
|
|
|
metrics.IncrCounter([]string{"dns", "stale_queries"}, 1)
|
2016-11-08 19:45:12 +00:00
|
|
|
}
|
2015-11-12 17:28:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Determine the TTL. The parse should never fail since we vet it when
|
2015-11-13 18:38:44 +00:00
|
|
|
// the query is created, but we check anyway. If the query didn't
|
|
|
|
// specify a TTL then we will try to use the agent's service-specific
|
|
|
|
// TTL configs.
|
2015-11-12 17:28:05 +00:00
|
|
|
var ttl time.Duration
|
|
|
|
if out.DNS.TTL != "" {
|
|
|
|
var err error
|
|
|
|
ttl, err = time.ParseDuration(out.DNS.TTL)
|
|
|
|
if err != nil {
|
|
|
|
d.logger.Printf("[WARN] dns: Failed to parse TTL '%s' for prepared query '%s', ignoring", out.DNS.TTL, query)
|
|
|
|
}
|
2015-11-13 18:38:44 +00:00
|
|
|
} else if d.config.ServiceTTL != nil {
|
|
|
|
var ok bool
|
|
|
|
ttl, ok = d.config.ServiceTTL[out.Service]
|
|
|
|
if !ok {
|
|
|
|
ttl = d.config.ServiceTTL["*"]
|
|
|
|
}
|
2015-11-12 17:28:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we have no nodes, return not found!
|
|
|
|
if len(out.Nodes) == 0 {
|
2017-08-04 11:24:04 +00:00
|
|
|
d.addSOA(resp)
|
2015-11-12 17:28:05 +00:00
|
|
|
resp.SetRcode(req, dns.RcodeNameError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add various responses depending on the request.
|
|
|
|
qType := req.Question[0].Qtype
|
2016-08-13 00:26:23 +00:00
|
|
|
if qType == dns.TypeSRV {
|
2016-07-27 22:11:42 +00:00
|
|
|
d.serviceSRVRecords(out.Datacenter, out.Nodes, req, resp, ttl)
|
2016-08-12 04:46:14 +00:00
|
|
|
} else {
|
2016-07-27 22:11:42 +00:00
|
|
|
d.serviceNodeRecords(out.Datacenter, out.Nodes, req, resp, ttl)
|
2015-11-12 17:28:05 +00:00
|
|
|
}
|
|
|
|
|
2018-03-07 09:01:12 +00:00
|
|
|
d.trimDNSResponse(network, req, resp)
|
2015-11-12 17:28:05 +00:00
|
|
|
|
2016-02-18 00:54:28 +00:00
|
|
|
// If the answer is empty and the response isn't truncated, return not found
|
|
|
|
if len(resp.Answer) == 0 && !resp.Truncated {
|
2017-08-04 11:24:04 +00:00
|
|
|
d.addSOA(resp)
|
2015-08-25 20:37:33 +00:00
|
|
|
return
|
|
|
|
}
|
2014-01-03 21:00:03 +00:00
|
|
|
}
|
|
|
|
|
2014-02-25 20:07:20 +00:00
|
|
|
// serviceNodeRecords is used to add the node records for a service lookup
|
2016-02-07 21:39:37 +00:00
|
|
|
func (d *DNSServer) serviceNodeRecords(dc string, nodes structs.CheckServiceNodes, req, resp *dns.Msg, ttl time.Duration) {
|
2014-02-25 20:07:20 +00:00
|
|
|
qName := req.Question[0].Name
|
|
|
|
qType := req.Question[0].Qtype
|
2014-01-06 22:56:41 +00:00
|
|
|
handled := make(map[string]struct{})
|
2017-06-14 23:22:54 +00:00
|
|
|
edns := req.IsEdns0() != nil
|
2016-02-12 07:58:48 +00:00
|
|
|
|
2018-03-06 01:07:42 +00:00
|
|
|
count := 0
|
2014-01-03 21:00:03 +00:00
|
|
|
for _, node := range nodes {
|
2016-02-07 21:39:37 +00:00
|
|
|
// Start with the translated address but use the service address,
|
|
|
|
// if specified.
|
2017-06-30 19:42:53 +00:00
|
|
|
addr := d.agent.TranslateAddress(dc, node.Node.Address, node.Node.TaggedAddresses)
|
2015-01-02 21:10:05 +00:00
|
|
|
if node.Service.Address != "" {
|
|
|
|
addr = node.Service.Address
|
|
|
|
}
|
|
|
|
|
2017-05-29 14:08:54 +00:00
|
|
|
// If the service address is a CNAME for the service we are looking
|
|
|
|
// for then use the node address.
|
|
|
|
if qName == strings.TrimSuffix(addr, ".")+"." {
|
|
|
|
addr = node.Node.Address
|
|
|
|
}
|
|
|
|
|
2015-12-22 11:31:40 +00:00
|
|
|
// Avoid duplicate entries, possible if a node has
|
|
|
|
// the same service on multiple ports, etc.
|
2014-01-15 21:20:01 +00:00
|
|
|
if _, ok := handled[addr]; ok {
|
2014-01-06 22:56:41 +00:00
|
|
|
continue
|
|
|
|
}
|
2014-01-15 21:20:01 +00:00
|
|
|
handled[addr] = struct{}{}
|
2014-01-06 22:56:41 +00:00
|
|
|
|
2014-02-25 20:07:20 +00:00
|
|
|
// Add the node record
|
2017-08-01 07:01:49 +00:00
|
|
|
records := d.formatNodeRecord(node.Node, addr, qName, qType, ttl, edns)
|
2014-02-26 01:41:48 +00:00
|
|
|
if records != nil {
|
|
|
|
resp.Answer = append(resp.Answer, records...)
|
2018-03-06 01:07:42 +00:00
|
|
|
count++
|
|
|
|
if count == d.config.ARecordLimit {
|
|
|
|
// We stop only if greater than 0 or we reached the limit
|
|
|
|
return
|
|
|
|
}
|
2014-01-03 21:00:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// serviceARecords is used to add the SRV records for a service lookup
|
2016-02-07 21:39:37 +00:00
|
|
|
func (d *DNSServer) serviceSRVRecords(dc string, nodes structs.CheckServiceNodes, req, resp *dns.Msg, ttl time.Duration) {
|
2014-01-06 22:56:41 +00:00
|
|
|
handled := make(map[string]struct{})
|
2017-06-14 23:22:54 +00:00
|
|
|
edns := req.IsEdns0() != nil
|
|
|
|
|
2014-01-03 21:00:03 +00:00
|
|
|
for _, node := range nodes {
|
2014-01-06 22:56:41 +00:00
|
|
|
// Avoid duplicate entries, possible if a node has
|
|
|
|
// the same service the same port, etc.
|
2015-01-08 18:47:41 +00:00
|
|
|
tuple := fmt.Sprintf("%s:%s:%d", node.Node.Node, node.Service.Address, node.Service.Port)
|
2014-01-06 22:56:41 +00:00
|
|
|
if _, ok := handled[tuple]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
handled[tuple] = struct{}{}
|
|
|
|
|
|
|
|
// Add the SRV record
|
2014-01-03 21:00:03 +00:00
|
|
|
srvRec := &dns.SRV{
|
|
|
|
Hdr: dns.RR_Header{
|
|
|
|
Name: req.Question[0].Name,
|
|
|
|
Rrtype: dns.TypeSRV,
|
|
|
|
Class: dns.ClassINET,
|
2014-06-08 23:01:06 +00:00
|
|
|
Ttl: uint32(ttl / time.Second),
|
2014-01-03 21:00:03 +00:00
|
|
|
},
|
|
|
|
Priority: 1,
|
|
|
|
Weight: 1,
|
2014-01-15 21:20:01 +00:00
|
|
|
Port: uint16(node.Service.Port),
|
|
|
|
Target: fmt.Sprintf("%s.node.%s.%s", node.Node.Node, dc, d.domain),
|
2014-01-03 21:00:03 +00:00
|
|
|
}
|
|
|
|
resp.Answer = append(resp.Answer, srvRec)
|
|
|
|
|
2016-02-07 21:39:37 +00:00
|
|
|
// Start with the translated address but use the service address,
|
|
|
|
// if specified.
|
2017-06-30 19:42:53 +00:00
|
|
|
addr := d.agent.TranslateAddress(dc, node.Node.Address, node.Node.TaggedAddresses)
|
2015-01-05 22:48:30 +00:00
|
|
|
if node.Service.Address != "" {
|
|
|
|
addr = node.Service.Address
|
|
|
|
}
|
|
|
|
|
2014-02-25 20:07:20 +00:00
|
|
|
// Add the extra record
|
2017-08-01 07:01:49 +00:00
|
|
|
records := d.formatNodeRecord(node.Node, addr, srvRec.Target, dns.TypeANY, ttl, edns)
|
2017-02-01 03:33:41 +00:00
|
|
|
if len(records) > 0 {
|
2016-10-28 02:01:32 +00:00
|
|
|
// Use the node address if it doesn't differ from the service address
|
|
|
|
if addr == node.Node.Address {
|
|
|
|
resp.Extra = append(resp.Extra, records...)
|
|
|
|
} else {
|
|
|
|
// If it differs from the service address, give a special response in the
|
|
|
|
// 'addr.consul' domain with the service IP encoded in it. We have to do
|
|
|
|
// this because we can't put an IP in the target field of an SRV record.
|
|
|
|
switch record := records[0].(type) {
|
|
|
|
// IPv4
|
|
|
|
case *dns.A:
|
|
|
|
addr := hex.EncodeToString(record.A)
|
|
|
|
|
|
|
|
// Take the last 8 chars (4 bytes) of the encoded address to avoid junk bytes
|
|
|
|
srvRec.Target = fmt.Sprintf("%s.addr.%s.%s", addr[len(addr)-(net.IPv4len*2):], dc, d.domain)
|
|
|
|
record.Hdr.Name = srvRec.Target
|
|
|
|
resp.Extra = append(resp.Extra, record)
|
|
|
|
|
|
|
|
// IPv6
|
|
|
|
case *dns.AAAA:
|
|
|
|
srvRec.Target = fmt.Sprintf("%s.addr.%s.%s", hex.EncodeToString(record.AAAA), dc, d.domain)
|
|
|
|
record.Hdr.Name = srvRec.Target
|
|
|
|
resp.Extra = append(resp.Extra, record)
|
2017-02-01 03:33:41 +00:00
|
|
|
|
|
|
|
// Something else (probably a CNAME; just add the records).
|
|
|
|
default:
|
|
|
|
resp.Extra = append(resp.Extra, records...)
|
2016-10-28 02:01:32 +00:00
|
|
|
}
|
|
|
|
}
|
2014-01-03 21:00:03 +00:00
|
|
|
}
|
|
|
|
}
|
2014-01-03 01:58:58 +00:00
|
|
|
}
|
2014-01-03 23:43:35 +00:00
|
|
|
|
|
|
|
// handleRecurse is used to handle recursive DNS queries
|
|
|
|
func (d *DNSServer) handleRecurse(resp dns.ResponseWriter, req *dns.Msg) {
|
|
|
|
q := req.Question[0]
|
|
|
|
network := "udp"
|
|
|
|
defer func(s time.Time) {
|
2015-08-11 07:47:02 +00:00
|
|
|
d.logger.Printf("[DEBUG] dns: request for %v (%s) (%v) from client %s (%s)",
|
2017-10-17 18:38:24 +00:00
|
|
|
q, network, time.Since(s), resp.RemoteAddr().String(),
|
2015-08-11 07:47:02 +00:00
|
|
|
resp.RemoteAddr().Network())
|
2014-01-03 23:43:35 +00:00
|
|
|
}(time.Now())
|
|
|
|
|
|
|
|
// Switch to TCP if the client is
|
|
|
|
if _, ok := resp.RemoteAddr().(*net.TCPAddr); ok {
|
|
|
|
network = "tcp"
|
|
|
|
}
|
|
|
|
|
|
|
|
// Recursively resolve
|
2016-08-26 19:22:04 +00:00
|
|
|
c := &dns.Client{Net: network, Timeout: d.config.RecursorTimeout}
|
2014-11-03 19:40:55 +00:00
|
|
|
var r *dns.Msg
|
|
|
|
var rtt time.Duration
|
|
|
|
var err error
|
|
|
|
for _, recursor := range d.recursors {
|
|
|
|
r, rtt, err = c.Exchange(req, recursor)
|
2016-11-03 19:21:16 +00:00
|
|
|
if err == nil || err == dns.ErrTruncated {
|
2016-08-11 23:24:44 +00:00
|
|
|
// Compress the response; we don't know if the incoming
|
|
|
|
// response was compressed or not, so by not compressing
|
|
|
|
// we might generate an invalid packet on the way out.
|
2017-06-29 14:42:17 +00:00
|
|
|
r.Compress = !d.disableCompression.Load().(bool)
|
2016-08-11 23:24:44 +00:00
|
|
|
|
2014-11-03 19:40:55 +00:00
|
|
|
// Forward the response
|
|
|
|
d.logger.Printf("[DEBUG] dns: recurse RTT for %v (%v)", q, rtt)
|
|
|
|
if err := resp.WriteMsg(r); err != nil {
|
|
|
|
d.logger.Printf("[WARN] dns: failed to respond: %v", err)
|
|
|
|
}
|
2014-10-31 19:19:41 +00:00
|
|
|
return
|
|
|
|
}
|
2014-11-03 19:40:55 +00:00
|
|
|
d.logger.Printf("[ERR] dns: recurse failed: %v", err)
|
2014-01-03 23:43:35 +00:00
|
|
|
}
|
2014-11-03 19:40:55 +00:00
|
|
|
|
|
|
|
// If all resolvers fail, return a SERVFAIL message
|
2015-08-11 07:47:02 +00:00
|
|
|
d.logger.Printf("[ERR] dns: all resolvers failed for %v from client %s (%s)",
|
|
|
|
q, resp.RemoteAddr().String(), resp.RemoteAddr().Network())
|
2014-11-03 19:40:55 +00:00
|
|
|
m := &dns.Msg{}
|
|
|
|
m.SetReply(req)
|
2017-06-29 14:42:17 +00:00
|
|
|
m.Compress = !d.disableCompression.Load().(bool)
|
2014-11-03 19:40:55 +00:00
|
|
|
m.RecursionAvailable = true
|
|
|
|
m.SetRcode(req, dns.RcodeServerFailure)
|
2017-06-14 23:22:54 +00:00
|
|
|
if edns := req.IsEdns0(); edns != nil {
|
|
|
|
m.SetEdns0(edns.UDPSize(), false)
|
|
|
|
}
|
2014-11-03 19:40:55 +00:00
|
|
|
resp.WriteMsg(m)
|
2014-01-03 23:43:35 +00:00
|
|
|
}
|
2014-02-25 20:46:11 +00:00
|
|
|
|
|
|
|
// resolveCNAME is used to recursively resolve CNAME records
|
|
|
|
func (d *DNSServer) resolveCNAME(name string) []dns.RR {
|
2016-10-27 02:23:51 +00:00
|
|
|
// If the CNAME record points to a Consul address, resolve it internally
|
|
|
|
// Convert query to lowercase because DNS is case insensitive; d.domain is
|
|
|
|
// already converted
|
|
|
|
if strings.HasSuffix(strings.ToLower(name), "."+d.domain) {
|
|
|
|
req := &dns.Msg{}
|
|
|
|
resp := &dns.Msg{}
|
|
|
|
|
|
|
|
req.SetQuestion(name, dns.TypeANY)
|
2018-04-11 21:02:04 +00:00
|
|
|
d.dispatch("udp", nil, req, resp)
|
2016-10-27 02:23:51 +00:00
|
|
|
|
|
|
|
return resp.Answer
|
|
|
|
}
|
|
|
|
|
2014-02-25 20:46:11 +00:00
|
|
|
// Do nothing if we don't have a recursor
|
2014-11-03 19:40:55 +00:00
|
|
|
if len(d.recursors) == 0 {
|
2014-02-25 20:46:11 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ask for any A records
|
|
|
|
m := new(dns.Msg)
|
|
|
|
m.SetQuestion(name, dns.TypeA)
|
|
|
|
|
|
|
|
// Make a DNS lookup request
|
2016-08-26 19:22:04 +00:00
|
|
|
c := &dns.Client{Net: "udp", Timeout: d.config.RecursorTimeout}
|
2014-11-03 19:40:55 +00:00
|
|
|
var r *dns.Msg
|
|
|
|
var rtt time.Duration
|
|
|
|
var err error
|
|
|
|
for _, recursor := range d.recursors {
|
|
|
|
r, rtt, err = c.Exchange(m, recursor)
|
|
|
|
if err == nil {
|
|
|
|
d.logger.Printf("[DEBUG] dns: cname recurse RTT for %v (%v)", name, rtt)
|
|
|
|
return r.Answer
|
2014-10-31 19:19:41 +00:00
|
|
|
}
|
2014-11-03 19:40:55 +00:00
|
|
|
d.logger.Printf("[ERR] dns: cname recurse failed for %v: %v", name, err)
|
2014-02-25 20:46:11 +00:00
|
|
|
}
|
2014-11-03 19:40:55 +00:00
|
|
|
d.logger.Printf("[ERR] dns: all resolvers failed for %v", name)
|
2014-10-31 19:19:41 +00:00
|
|
|
return nil
|
2014-02-25 20:46:11 +00:00
|
|
|
}
|