Merge remote-tracking branch 'origin/master' into release-build-updates
This commit is contained in:
commit
d587a60662
|
@ -5,6 +5,7 @@
|
||||||
*.swp
|
*.swp
|
||||||
*.test
|
*.test
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
.fseventsd
|
||||||
.vagrant/
|
.vagrant/
|
||||||
/pkg
|
/pkg
|
||||||
Thumbs.db
|
Thumbs.db
|
||||||
|
|
11
CHANGELOG.md
11
CHANGELOG.md
|
@ -4,6 +4,17 @@ FEATURES:
|
||||||
|
|
||||||
* dns: Enable PTR record lookups for services with IPs that have no registered node [[PR-4083](https://github.com/hashicorp/consul/pull/4083)]
|
* dns: Enable PTR record lookups for services with IPs that have no registered node [[PR-4083](https://github.com/hashicorp/consul/pull/4083)]
|
||||||
|
|
||||||
|
IMPROVEMENTS:
|
||||||
|
|
||||||
|
* agent: A Consul user-agent string is now sent to providers when making retry-join requests [GH-4013](https://github.com/hashicorp/consul/pull/4013)
|
||||||
|
|
||||||
|
BUG FIXES:
|
||||||
|
|
||||||
|
* agent: Fixed an issue where watches were being duplicated on reload. [[GH-4179](https://github.com/hashicorp/consul/issues/4179)]
|
||||||
|
* agent: Fixed an issue with Agent watches on a HTTPS only agent would fail to use TLS. [[GH-4076](https://github.com/hashicorp/consul/issues/4076)]
|
||||||
|
* agent: Fixed bug that would cause unnecessary and frequent logging yamux keepalives [[GH-3040](https://github.com/hashicorp/consul/issues/3040)]
|
||||||
|
* dns: Re-enable full DNS compression [[GH-4071](https://github.com/hashicorp/consul/issues/4071)]
|
||||||
|
|
||||||
## 1.1.0 (May 11, 2018)
|
## 1.1.0 (May 11, 2018)
|
||||||
|
|
||||||
FEATURES:
|
FEATURES:
|
||||||
|
|
|
@ -73,6 +73,7 @@ type delegate interface {
|
||||||
SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io.Writer, replyFn structs.SnapshotReplyFn) error
|
SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io.Writer, replyFn structs.SnapshotReplyFn) error
|
||||||
Shutdown() error
|
Shutdown() error
|
||||||
Stats() map[string]map[string]string
|
Stats() map[string]map[string]string
|
||||||
|
enterpriseDelegate
|
||||||
}
|
}
|
||||||
|
|
||||||
// notifier is called after a successful JoinLAN.
|
// notifier is called after a successful JoinLAN.
|
||||||
|
@ -646,14 +647,19 @@ func (a *Agent) reloadWatches(cfg *config.RuntimeConfig) error {
|
||||||
|
|
||||||
// Determine the primary http(s) endpoint.
|
// Determine the primary http(s) endpoint.
|
||||||
var netaddr net.Addr
|
var netaddr net.Addr
|
||||||
|
https := false
|
||||||
if len(cfg.HTTPAddrs) > 0 {
|
if len(cfg.HTTPAddrs) > 0 {
|
||||||
netaddr = cfg.HTTPAddrs[0]
|
netaddr = cfg.HTTPAddrs[0]
|
||||||
} else {
|
} else {
|
||||||
netaddr = cfg.HTTPSAddrs[0]
|
netaddr = cfg.HTTPSAddrs[0]
|
||||||
|
https = true
|
||||||
}
|
}
|
||||||
addr := netaddr.String()
|
addr := netaddr.String()
|
||||||
if netaddr.Network() == "unix" {
|
if netaddr.Network() == "unix" {
|
||||||
addr = "unix://" + addr
|
addr = "unix://" + addr
|
||||||
|
https = false
|
||||||
|
} else if https {
|
||||||
|
addr = "https://" + addr
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fire off a goroutine for each new watch plan.
|
// Fire off a goroutine for each new watch plan.
|
||||||
|
@ -669,7 +675,19 @@ func (a *Agent) reloadWatches(cfg *config.RuntimeConfig) error {
|
||||||
wp.Handler = makeHTTPWatchHandler(a.LogOutput, httpConfig)
|
wp.Handler = makeHTTPWatchHandler(a.LogOutput, httpConfig)
|
||||||
}
|
}
|
||||||
wp.LogOutput = a.LogOutput
|
wp.LogOutput = a.LogOutput
|
||||||
if err := wp.Run(addr); err != nil {
|
|
||||||
|
config := api.DefaultConfig()
|
||||||
|
if https {
|
||||||
|
if a.config.CAPath != "" {
|
||||||
|
config.TLSConfig.CAPath = a.config.CAPath
|
||||||
|
}
|
||||||
|
if a.config.CAFile != "" {
|
||||||
|
config.TLSConfig.CAFile = a.config.CAFile
|
||||||
|
}
|
||||||
|
config.TLSConfig.Address = addr
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := wp.RunWithConfig(addr, config); err != nil {
|
||||||
a.logger.Printf("[ERR] agent: Failed to run watch: %v", err)
|
a.logger.Printf("[ERR] agent: Failed to run watch: %v", err)
|
||||||
}
|
}
|
||||||
}(wp)
|
}(wp)
|
||||||
|
|
|
@ -2206,3 +2206,23 @@ func TestAgent_reloadWatches(t *testing.T) {
|
||||||
t.Fatalf("bad: %s", err)
|
t.Fatalf("bad: %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAgent_reloadWatchesHTTPS(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
a := TestAgent{Name: t.Name(), UseTLS: true}
|
||||||
|
a.Start()
|
||||||
|
defer a.Shutdown()
|
||||||
|
|
||||||
|
// Normal watch with http addr set, should succeed
|
||||||
|
newConf := *a.config
|
||||||
|
newConf.Watches = []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"type": "key",
|
||||||
|
"key": "asdf",
|
||||||
|
"args": []interface{}{"ls"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := a.reloadWatches(&newConf); err != nil {
|
||||||
|
t.Fatalf("bad: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -72,6 +72,9 @@ type Client struct {
|
||||||
shutdown bool
|
shutdown bool
|
||||||
shutdownCh chan struct{}
|
shutdownCh chan struct{}
|
||||||
shutdownLock sync.Mutex
|
shutdownLock sync.Mutex
|
||||||
|
|
||||||
|
// embedded struct to hold all the enterprise specific data
|
||||||
|
EnterpriseClient
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient is used to construct a new Consul client from the
|
// NewClient is used to construct a new Consul client from the
|
||||||
|
@ -131,6 +134,11 @@ func NewClientLogger(config *Config, logger *log.Logger) (*Client, error) {
|
||||||
shutdownCh: make(chan struct{}),
|
shutdownCh: make(chan struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := c.initEnterprise(); err != nil {
|
||||||
|
c.Shutdown()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize the LAN Serf
|
// Initialize the LAN Serf
|
||||||
c.serf, err = c.setupSerf(config.SerfLANConfig,
|
c.serf, err = c.setupSerf(config.SerfLANConfig,
|
||||||
c.eventCh, serfLANSnapshot)
|
c.eventCh, serfLANSnapshot)
|
||||||
|
@ -147,6 +155,11 @@ func NewClientLogger(config *Config, logger *log.Logger) (*Client, error) {
|
||||||
// handlers depend on the router and the router depends on Serf.
|
// handlers depend on the router and the router depends on Serf.
|
||||||
go c.lanEventHandler()
|
go c.lanEventHandler()
|
||||||
|
|
||||||
|
if err := c.startEnterprise(); err != nil {
|
||||||
|
c.Shutdown()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -342,6 +355,17 @@ func (c *Client) Stats() map[string]map[string]string {
|
||||||
"serf_lan": c.serf.Stats(),
|
"serf_lan": c.serf.Stats(),
|
||||||
"runtime": runtimeStats(),
|
"runtime": runtimeStats(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for outerKey, outerValue := range c.enterpriseStats() {
|
||||||
|
if _, ok := stats[outerKey]; ok {
|
||||||
|
for innerKey, innerValue := range outerValue {
|
||||||
|
stats[outerKey][innerKey] = innerValue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
stats[outerKey] = outerValue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return stats
|
return stats
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -135,6 +135,8 @@ func (c *Client) localEvent(event serf.UserEvent) {
|
||||||
c.config.UserEventHandler(event)
|
c.config.UserEventHandler(event)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
c.logger.Printf("[WARN] consul: Unhandled local event: %v", event)
|
if !c.handleEnterpriseUserEvents(event) {
|
||||||
|
c.logger.Printf("[WARN] consul: Unhandled local event: %v", event)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
// +build !ent
|
||||||
|
|
||||||
|
package consul
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/serf/serf"
|
||||||
|
)
|
||||||
|
|
||||||
|
type EnterpriseClient struct{}
|
||||||
|
|
||||||
|
func (c *Client) initEnterprise() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) startEnterprise() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) handleEnterpriseUserEvents(event serf.UserEvent) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) enterpriseStats() map[string]map[string]string {
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
// +build !ent
|
||||||
|
|
||||||
|
package consul
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/pool"
|
||||||
|
"github.com/hashicorp/serf/serf"
|
||||||
|
)
|
||||||
|
|
||||||
|
type EnterpriseServer struct{}
|
||||||
|
|
||||||
|
func (s *Server) initEnterprise() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) startEnterprise() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleEnterpriseUserEvents(event serf.UserEvent) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) handleEnterpriseRPCConn(rtype pool.RPCType, conn net.Conn, isTLS bool) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) enterpriseStats() map[string]map[string]string {
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -115,9 +115,10 @@ func (s *Server) handleConn(conn net.Conn, isTLS bool) {
|
||||||
s.handleSnapshotConn(conn)
|
s.handleSnapshotConn(conn)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
s.logger.Printf("[ERR] consul.rpc: unrecognized RPC byte: %v %s", typ, logConn(conn))
|
if !s.handleEnterpriseRPCConn(typ, conn, isTLS) {
|
||||||
conn.Close()
|
s.logger.Printf("[ERR] consul.rpc: unrecognized RPC byte: %v %s", typ, logConn(conn))
|
||||||
return
|
conn.Close()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -208,6 +208,9 @@ type Server struct {
|
||||||
shutdown bool
|
shutdown bool
|
||||||
shutdownCh chan struct{}
|
shutdownCh chan struct{}
|
||||||
shutdownLock sync.Mutex
|
shutdownLock sync.Mutex
|
||||||
|
|
||||||
|
// embedded struct to hold all the enterprise specific data
|
||||||
|
EnterpriseServer
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewServer(config *Config) (*Server, error) {
|
func NewServer(config *Config) (*Server, error) {
|
||||||
|
@ -297,6 +300,12 @@ func NewServerLogger(config *Config, logger *log.Logger, tokens *token.Store) (*
|
||||||
shutdownCh: shutdownCh,
|
shutdownCh: shutdownCh,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize enterprise specific server functionality
|
||||||
|
if err := s.initEnterprise(); err != nil {
|
||||||
|
s.Shutdown()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize the stats fetcher that autopilot will use.
|
// Initialize the stats fetcher that autopilot will use.
|
||||||
s.statsFetcher = NewStatsFetcher(logger, s.connPool, s.config.Datacenter)
|
s.statsFetcher = NewStatsFetcher(logger, s.connPool, s.config.Datacenter)
|
||||||
|
|
||||||
|
@ -338,6 +347,12 @@ func NewServerLogger(config *Config, logger *log.Logger, tokens *token.Store) (*
|
||||||
return nil, fmt.Errorf("Failed to start Raft: %v", err)
|
return nil, fmt.Errorf("Failed to start Raft: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start enterprise specific functionality
|
||||||
|
if err := s.startEnterprise(); err != nil {
|
||||||
|
s.Shutdown()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Serf and dynamic bind ports
|
// Serf and dynamic bind ports
|
||||||
//
|
//
|
||||||
// The LAN serf cluster announces the port of the WAN serf cluster
|
// The LAN serf cluster announces the port of the WAN serf cluster
|
||||||
|
@ -1019,6 +1034,17 @@ func (s *Server) Stats() map[string]map[string]string {
|
||||||
if s.serfWAN != nil {
|
if s.serfWAN != nil {
|
||||||
stats["serf_wan"] = s.serfWAN.Stats()
|
stats["serf_wan"] = s.serfWAN.Stats()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for outerKey, outerValue := range s.enterpriseStats() {
|
||||||
|
if _, ok := stats[outerKey]; ok {
|
||||||
|
for innerKey, innerValue := range outerValue {
|
||||||
|
stats[outerKey][innerKey] = innerValue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
stats[outerKey] = outerValue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return stats
|
return stats
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -198,7 +198,9 @@ func (s *Server) localEvent(event serf.UserEvent) {
|
||||||
s.config.UserEventHandler(event)
|
s.config.UserEventHandler(event)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
s.logger.Printf("[WARN] consul: Unhandled local event: %v", event)
|
if !s.handleEnterpriseUserEvents(event) {
|
||||||
|
s.logger.Printf("[WARN] consul: Unhandled local event: %v", event)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
31
agent/dns.go
31
agent/dns.go
|
@ -773,10 +773,7 @@ func dnsBinaryTruncate(resp *dns.Msg, maxSize int, index map[string]dns.RR, hasE
|
||||||
func (d *DNSServer) trimTCPResponse(req, resp *dns.Msg) (trimmed bool) {
|
func (d *DNSServer) trimTCPResponse(req, resp *dns.Msg) (trimmed bool) {
|
||||||
hasExtra := len(resp.Extra) > 0
|
hasExtra := len(resp.Extra) > 0
|
||||||
// There is some overhead, 65535 does not work
|
// There is some overhead, 65535 does not work
|
||||||
maxSize := 65533 // 64k - 2 bytes
|
maxSize := 65523 // 64k - 12 bytes DNS raw overhead
|
||||||
// In order to compute properly, we have to avoid compress first
|
|
||||||
compressed := resp.Compress
|
|
||||||
resp.Compress = false
|
|
||||||
|
|
||||||
// We avoid some function calls and allocations by only handling the
|
// We avoid some function calls and allocations by only handling the
|
||||||
// extra data when necessary.
|
// extra data when necessary.
|
||||||
|
@ -784,12 +781,13 @@ func (d *DNSServer) trimTCPResponse(req, resp *dns.Msg) (trimmed bool) {
|
||||||
originalSize := resp.Len()
|
originalSize := resp.Len()
|
||||||
originalNumRecords := len(resp.Answer)
|
originalNumRecords := len(resp.Answer)
|
||||||
|
|
||||||
// Beyond 2500 records, performance gets bad
|
// It is not possible to return more than 4k records even with compression
|
||||||
// Limit the number of records at once, anyway, it won't fit in 64k
|
// Since we are performing binary search it is not a big deal, but it
|
||||||
// For SRV Records, the max is around 500 records, for A, less than 2k
|
// improves a bit performance, even with binary search
|
||||||
truncateAt := 2048
|
truncateAt := 4096
|
||||||
if req.Question[0].Qtype == dns.TypeSRV {
|
if req.Question[0].Qtype == dns.TypeSRV {
|
||||||
truncateAt = 640
|
// More than 1024 SRV records do not fit in 64k
|
||||||
|
truncateAt = 1024
|
||||||
}
|
}
|
||||||
if len(resp.Answer) > truncateAt {
|
if len(resp.Answer) > truncateAt {
|
||||||
resp.Answer = resp.Answer[:truncateAt]
|
resp.Answer = resp.Answer[:truncateAt]
|
||||||
|
@ -801,7 +799,7 @@ func (d *DNSServer) trimTCPResponse(req, resp *dns.Msg) (trimmed bool) {
|
||||||
truncated := false
|
truncated := false
|
||||||
|
|
||||||
// This enforces the given limit on 64k, the max limit for DNS messages
|
// This enforces the given limit on 64k, the max limit for DNS messages
|
||||||
for len(resp.Answer) > 0 && resp.Len() > maxSize {
|
for len(resp.Answer) > 1 && resp.Len() > maxSize {
|
||||||
truncated = true
|
truncated = true
|
||||||
// More than 100 bytes, find with a binary search
|
// More than 100 bytes, find with a binary search
|
||||||
if resp.Len()-maxSize > 100 {
|
if resp.Len()-maxSize > 100 {
|
||||||
|
@ -819,8 +817,6 @@ func (d *DNSServer) trimTCPResponse(req, resp *dns.Msg) (trimmed bool) {
|
||||||
req.Question,
|
req.Question,
|
||||||
len(resp.Answer), originalNumRecords, resp.Len(), originalSize)
|
len(resp.Answer), originalNumRecords, resp.Len(), originalSize)
|
||||||
}
|
}
|
||||||
// Restore compression if any
|
|
||||||
resp.Compress = compressed
|
|
||||||
return truncated
|
return truncated
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -850,7 +846,10 @@ func trimUDPResponse(req, resp *dns.Msg, udpAnswerLimit int) (trimmed bool) {
|
||||||
|
|
||||||
// This cuts UDP responses to a useful but limited number of responses.
|
// This cuts UDP responses to a useful but limited number of responses.
|
||||||
maxAnswers := lib.MinInt(maxUDPAnswerLimit, udpAnswerLimit)
|
maxAnswers := lib.MinInt(maxUDPAnswerLimit, udpAnswerLimit)
|
||||||
|
compress := resp.Compress
|
||||||
if maxSize == defaultMaxUDPSize && numAnswers > maxAnswers {
|
if maxSize == defaultMaxUDPSize && numAnswers > maxAnswers {
|
||||||
|
// We disable computation of Len ONLY for non-eDNS request (512 bytes)
|
||||||
|
resp.Compress = false
|
||||||
resp.Answer = resp.Answer[:maxAnswers]
|
resp.Answer = resp.Answer[:maxAnswers]
|
||||||
if hasExtra {
|
if hasExtra {
|
||||||
syncExtra(index, resp)
|
syncExtra(index, resp)
|
||||||
|
@ -863,9 +862,9 @@ func trimUDPResponse(req, resp *dns.Msg, udpAnswerLimit int) (trimmed bool) {
|
||||||
// that will not exceed 512 bytes uncompressed, which is more conservative and
|
// that will not exceed 512 bytes uncompressed, which is more conservative and
|
||||||
// will allow our responses to be compliant even if some downstream server
|
// will allow our responses to be compliant even if some downstream server
|
||||||
// uncompresses them.
|
// uncompresses them.
|
||||||
compress := resp.Compress
|
// Even when size is too big for one single record, try to send it anyway
|
||||||
resp.Compress = false
|
// (usefull for 512 bytes messages)
|
||||||
for len(resp.Answer) > 0 && resp.Len() > maxSize {
|
for len(resp.Answer) > 1 && resp.Len() > maxSize {
|
||||||
// More than 100 bytes, find with a binary search
|
// More than 100 bytes, find with a binary search
|
||||||
if resp.Len()-maxSize > 100 {
|
if resp.Len()-maxSize > 100 {
|
||||||
bestIndex := dnsBinaryTruncate(resp, maxSize, index, hasExtra)
|
bestIndex := dnsBinaryTruncate(resp, maxSize, index, hasExtra)
|
||||||
|
@ -877,6 +876,8 @@ func trimUDPResponse(req, resp *dns.Msg, udpAnswerLimit int) (trimmed bool) {
|
||||||
syncExtra(index, resp)
|
syncExtra(index, resp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// For 512 non-eDNS responses, while we compute size non-compressed,
|
||||||
|
// we send result compressed
|
||||||
resp.Compress = compress
|
resp.Compress = compress
|
||||||
|
|
||||||
return len(resp.Answer) < numAnswers
|
return len(resp.Answer) < numAnswers
|
||||||
|
|
|
@ -3229,7 +3229,7 @@ func TestDNS_TCP_and_UDP_Truncate(t *testing.T) {
|
||||||
args := &structs.RegisterRequest{
|
args := &structs.RegisterRequest{
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
Node: fmt.Sprintf("%s-%d.acme.com", service, i),
|
Node: fmt.Sprintf("%s-%d.acme.com", service, i),
|
||||||
Address: fmt.Sprintf("127.%d.%d.%d", index, (i / 255), i%255),
|
Address: fmt.Sprintf("127.%d.%d.%d", 0, (i / 255), i%255),
|
||||||
Service: &structs.NodeService{
|
Service: &structs.NodeService{
|
||||||
Service: service,
|
Service: service,
|
||||||
Port: 8000,
|
Port: 8000,
|
||||||
|
@ -3270,32 +3270,39 @@ func TestDNS_TCP_and_UDP_Truncate(t *testing.T) {
|
||||||
"tcp",
|
"tcp",
|
||||||
"udp",
|
"udp",
|
||||||
}
|
}
|
||||||
for _, qType := range []uint16{dns.TypeANY, dns.TypeA, dns.TypeSRV} {
|
for _, maxSize := range []uint16{8192, 65535} {
|
||||||
for _, question := range questions {
|
for _, qType := range []uint16{dns.TypeANY, dns.TypeA, dns.TypeSRV} {
|
||||||
for _, protocol := range protocols {
|
for _, question := range questions {
|
||||||
for _, compress := range []bool{true, false} {
|
for _, protocol := range protocols {
|
||||||
t.Run(fmt.Sprintf("lookup %s %s (qType:=%d) compressed=%v", question, protocol, qType, compress), func(t *testing.T) {
|
for _, compress := range []bool{true, false} {
|
||||||
m := new(dns.Msg)
|
t.Run(fmt.Sprintf("lookup %s %s (qType:=%d) compressed=%v", question, protocol, qType, compress), func(t *testing.T) {
|
||||||
m.SetQuestion(question, dns.TypeANY)
|
m := new(dns.Msg)
|
||||||
if protocol == "udp" {
|
m.SetQuestion(question, dns.TypeANY)
|
||||||
m.SetEdns0(8192, true)
|
maxSz := maxSize
|
||||||
}
|
if protocol == "udp" {
|
||||||
c := new(dns.Client)
|
maxSz = 8192
|
||||||
c.Net = protocol
|
}
|
||||||
m.Compress = compress
|
m.SetEdns0(uint16(maxSz), true)
|
||||||
in, out, err := c.Exchange(m, a.DNSAddr())
|
c := new(dns.Client)
|
||||||
if err != nil && err != dns.ErrTruncated {
|
c.Net = protocol
|
||||||
t.Fatalf("err: %v", err)
|
m.Compress = compress
|
||||||
}
|
in, _, err := c.Exchange(m, a.DNSAddr())
|
||||||
// Check for the truncate bit
|
if err != nil && err != dns.ErrTruncated {
|
||||||
shouldBeTruncated := numServices > 5000
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if shouldBeTruncated != in.Truncated || len(in.Answer) > 2000 || len(in.Answer) < 1 || in.Len() > 65535 {
|
// Check for the truncate bit
|
||||||
|
buf, err := m.Pack()
|
||||||
info := fmt.Sprintf("service %s question:=%s (%s) (%d total records) sz:= %d in %v",
|
info := fmt.Sprintf("service %s question:=%s (%s) (%d total records) sz:= %d in %v",
|
||||||
service, question, protocol, numServices, len(in.Answer), out)
|
service, question, protocol, numServices, len(in.Answer), in)
|
||||||
t.Fatalf("Should have truncated:=%v for %s", shouldBeTruncated, info)
|
if err != nil {
|
||||||
}
|
t.Fatalf("Error while packing: %v ; info:=%s", err, info)
|
||||||
})
|
}
|
||||||
|
if len(buf) > int(maxSz) {
|
||||||
|
t.Fatalf("len(buf) := %d > maxSz=%d for %v", len(buf), maxSz, info)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
// +build !ent
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
// enterpriseDelegate has no functions in OSS
|
||||||
|
type enterpriseDelegate interface{}
|
|
@ -31,6 +31,15 @@ func (e MethodNotAllowedError) Error() string {
|
||||||
return fmt.Sprintf("method %s not allowed", e.Method)
|
return fmt.Sprintf("method %s not allowed", e.Method)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BadRequestError should be returned by a handler when parameters or the payload are not valid
|
||||||
|
type BadRequestError struct {
|
||||||
|
Reason string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e BadRequestError) Error() string {
|
||||||
|
return fmt.Sprintf("Bad request: %s", e.Reason)
|
||||||
|
}
|
||||||
|
|
||||||
// HTTPServer provides an HTTP api for an agent.
|
// HTTPServer provides an HTTP api for an agent.
|
||||||
type HTTPServer struct {
|
type HTTPServer struct {
|
||||||
*http.Server
|
*http.Server
|
||||||
|
@ -249,6 +258,11 @@ func (s *HTTPServer) wrap(handler endpoint, methods []string) http.HandlerFunc {
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
isBadRequest := func(err error) bool {
|
||||||
|
_, ok := err.(BadRequestError)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
addAllowHeader := func(methods []string) {
|
addAllowHeader := func(methods []string) {
|
||||||
resp.Header().Add("Allow", strings.Join(methods, ","))
|
resp.Header().Add("Allow", strings.Join(methods, ","))
|
||||||
}
|
}
|
||||||
|
@ -269,6 +283,9 @@ func (s *HTTPServer) wrap(handler endpoint, methods []string) http.HandlerFunc {
|
||||||
addAllowHeader(err.(MethodNotAllowedError).Allow)
|
addAllowHeader(err.(MethodNotAllowedError).Allow)
|
||||||
resp.WriteHeader(http.StatusMethodNotAllowed) // 405
|
resp.WriteHeader(http.StatusMethodNotAllowed) // 405
|
||||||
fmt.Fprint(resp, err.Error())
|
fmt.Fprint(resp, err.Error())
|
||||||
|
case isBadRequest(err):
|
||||||
|
resp.WriteHeader(http.StatusBadRequest)
|
||||||
|
fmt.Fprint(resp, err.Error())
|
||||||
default:
|
default:
|
||||||
resp.WriteHeader(http.StatusInternalServerError)
|
resp.WriteHeader(http.StatusInternalServerError)
|
||||||
fmt.Fprint(resp, err.Error())
|
fmt.Fprint(resp, err.Error())
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/lib"
|
||||||
discover "github.com/hashicorp/go-discover"
|
discover "github.com/hashicorp/go-discover"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -67,7 +68,11 @@ func (r *retryJoiner) retryJoin() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
disco := discover.Discover{}
|
disco, err := discover.New(discover.WithUserAgent(lib.UserAgent()))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
r.logger.Printf("[INFO] agent: Retry join %s is supported for: %s", r.cluster, strings.Join(disco.Names(), " "))
|
r.logger.Printf("[INFO] agent: Retry join %s is supported for: %s", r.cluster, strings.Join(disco.Names(), " "))
|
||||||
r.logger.Printf("[INFO] agent: Joining %s cluster...", r.cluster)
|
r.logger.Printf("[INFO] agent: Joining %s cluster...", r.cluster)
|
||||||
attempt := 0
|
attempt := 0
|
||||||
|
|
|
@ -8,7 +8,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGoDiscoverRegistration(t *testing.T) {
|
func TestGoDiscoverRegistration(t *testing.T) {
|
||||||
d := discover.Discover{}
|
d, err := discover.New()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
got := d.Names()
|
got := d.Names()
|
||||||
want := []string{"aliyun", "aws", "azure", "digitalocean", "gce", "os", "scaleway", "softlayer", "triton"}
|
want := []string{"aliyun", "aws", "azure", "digitalocean", "gce", "os", "scaleway", "softlayer", "triton"}
|
||||||
if !reflect.DeepEqual(got, want) {
|
if !reflect.DeepEqual(got, want) {
|
||||||
|
|
|
@ -61,7 +61,6 @@ type cmd struct {
|
||||||
versionPrerelease string
|
versionPrerelease string
|
||||||
versionHuman string
|
versionHuman string
|
||||||
shutdownCh <-chan struct{}
|
shutdownCh <-chan struct{}
|
||||||
args []string
|
|
||||||
flagArgs config.Flags
|
flagArgs config.Flags
|
||||||
logFilter *logutils.LevelFilter
|
logFilter *logutils.LevelFilter
|
||||||
logOutput io.Writer
|
logOutput io.Writer
|
||||||
|
@ -85,14 +84,6 @@ func (c *cmd) Run(args []string) int {
|
||||||
// readConfig is responsible for setup of our configuration using
|
// readConfig is responsible for setup of our configuration using
|
||||||
// the command line and any file configs
|
// the command line and any file configs
|
||||||
func (c *cmd) readConfig() *config.RuntimeConfig {
|
func (c *cmd) readConfig() *config.RuntimeConfig {
|
||||||
if err := c.flags.Parse(c.args); err != nil {
|
|
||||||
if !strings.Contains(err.Error(), "help requested") {
|
|
||||||
c.UI.Error(fmt.Sprintf("error parsing flags: %v", err))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
c.flagArgs.Args = c.flags.Args()
|
|
||||||
|
|
||||||
b, err := config.NewBuilder(c.flagArgs)
|
b, err := config.NewBuilder(c.flagArgs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.UI.Error(err.Error())
|
c.UI.Error(err.Error())
|
||||||
|
@ -315,7 +306,13 @@ func startupTelemetry(conf *config.RuntimeConfig) (*metrics.InmemSink, error) {
|
||||||
|
|
||||||
func (c *cmd) run(args []string) int {
|
func (c *cmd) run(args []string) int {
|
||||||
// Parse our configs
|
// Parse our configs
|
||||||
c.args = args
|
if err := c.flags.Parse(args); err != nil {
|
||||||
|
if !strings.Contains(err.Error(), "help requested") {
|
||||||
|
c.UI.Error(fmt.Sprintf("error parsing flags: %v", err))
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
c.flagArgs.Args = c.flags.Args()
|
||||||
config := c.readConfig()
|
config := c.readConfig()
|
||||||
if config == nil {
|
if config == nil {
|
||||||
return 1
|
return 1
|
||||||
|
@ -506,7 +503,7 @@ func (c *cmd) handleReload(agent *agent.Agent, cfg *config.RuntimeConfig) (*conf
|
||||||
"Failed to reload configs: %v", err))
|
"Failed to reload configs: %v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
return cfg, errs
|
return newCfg, errs
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cmd) Synopsis() string {
|
func (c *cmd) Synopsis() string {
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
package helpers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func LoadDataSource(data string, testStdin io.Reader) (string, error) {
|
||||||
|
var stdin io.Reader = os.Stdin
|
||||||
|
if testStdin != nil {
|
||||||
|
stdin = testStdin
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle empty quoted shell parameters
|
||||||
|
if len(data) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch data[0] {
|
||||||
|
case '@':
|
||||||
|
data, err := ioutil.ReadFile(data[1:])
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("Failed to read file: %s", err)
|
||||||
|
} else {
|
||||||
|
return string(data), nil
|
||||||
|
}
|
||||||
|
case '-':
|
||||||
|
if len(data) > 1 {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
var b bytes.Buffer
|
||||||
|
if _, err := io.Copy(&b, stdin); err != nil {
|
||||||
|
return "", fmt.Errorf("Failed to read stdin: %s", err)
|
||||||
|
}
|
||||||
|
return b.String(), nil
|
||||||
|
default:
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
}
|
|
@ -84,6 +84,7 @@ func (c *cmd) Run(args []string) int {
|
||||||
// Specifying a ModifyIndex for a non-CAS operation is not possible.
|
// Specifying a ModifyIndex for a non-CAS operation is not possible.
|
||||||
if c.modifyIndex != 0 && !c.cas {
|
if c.modifyIndex != 0 && !c.cas {
|
||||||
c.UI.Error("Cannot specify -modify-index without -cas!")
|
c.UI.Error("Cannot specify -modify-index without -cas!")
|
||||||
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// It is not valid to use a CAS and recurse in the same call
|
// It is not valid to use a CAS and recurse in the same call
|
||||||
|
|
|
@ -1,16 +1,14 @@
|
||||||
package put
|
package put
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/command/flags"
|
"github.com/hashicorp/consul/command/flags"
|
||||||
|
"github.com/hashicorp/consul/command/helpers"
|
||||||
"github.com/mitchellh/cli"
|
"github.com/mitchellh/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -173,11 +171,6 @@ func (c *cmd) Run(args []string) int {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cmd) dataFromArgs(args []string) (string, string, error) {
|
func (c *cmd) dataFromArgs(args []string) (string, string, error) {
|
||||||
var stdin io.Reader = os.Stdin
|
|
||||||
if c.testStdin != nil {
|
|
||||||
stdin = c.testStdin
|
|
||||||
}
|
|
||||||
|
|
||||||
switch len(args) {
|
switch len(args) {
|
||||||
case 0:
|
case 0:
|
||||||
return "", "", fmt.Errorf("Missing KEY argument")
|
return "", "", fmt.Errorf("Missing KEY argument")
|
||||||
|
@ -189,30 +182,11 @@ func (c *cmd) dataFromArgs(args []string) (string, string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
key := args[0]
|
key := args[0]
|
||||||
data := args[1]
|
data, err := helpers.LoadDataSource(args[1], c.testStdin)
|
||||||
|
|
||||||
// Handle empty quoted shell parameters
|
if err != nil {
|
||||||
if len(data) == 0 {
|
return "", "", err
|
||||||
return key, "", nil
|
} else {
|
||||||
}
|
|
||||||
|
|
||||||
switch data[0] {
|
|
||||||
case '@':
|
|
||||||
data, err := ioutil.ReadFile(data[1:])
|
|
||||||
if err != nil {
|
|
||||||
return "", "", fmt.Errorf("Failed to read file: %s", err)
|
|
||||||
}
|
|
||||||
return key, string(data), nil
|
|
||||||
case '-':
|
|
||||||
if len(data) > 1 {
|
|
||||||
return key, data, nil
|
|
||||||
}
|
|
||||||
var b bytes.Buffer
|
|
||||||
if _, err := io.Copy(&b, stdin); err != nil {
|
|
||||||
return "", "", fmt.Errorf("Failed to read stdin: %s", err)
|
|
||||||
}
|
|
||||||
return key, b.String(), nil
|
|
||||||
default:
|
|
||||||
return key, data, nil
|
return key, data, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,29 @@
|
||||||
|
package lib
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/version"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// projectURL is the project URL.
|
||||||
|
projectURL = "https://www.consul.io/"
|
||||||
|
|
||||||
|
// rt is the runtime - variable for tests.
|
||||||
|
rt = runtime.Version()
|
||||||
|
|
||||||
|
// versionFunc is the func that returns the current version. This is a
|
||||||
|
// function to take into account the different build processes and distinguish
|
||||||
|
// between enterprise and oss builds.
|
||||||
|
versionFunc = func() string {
|
||||||
|
return version.GetHumanVersion()
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// UserAgent returns the consistent user-agent string for Consul.
|
||||||
|
func UserAgent() string {
|
||||||
|
return fmt.Sprintf("Consul/%s (+%s; %s)",
|
||||||
|
versionFunc(), projectURL, rt)
|
||||||
|
}
|
|
@ -0,0 +1,18 @@
|
||||||
|
package lib
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestUserAgent(t *testing.T) {
|
||||||
|
projectURL = "https://consul-test.com"
|
||||||
|
rt = "go5.0"
|
||||||
|
versionFunc = func() string { return "1.2.3" }
|
||||||
|
|
||||||
|
act := UserAgent()
|
||||||
|
|
||||||
|
exp := "Consul/1.2.3 (+https://consul-test.com; go5.0)"
|
||||||
|
if exp != act {
|
||||||
|
t.Errorf("expected %q to be %q", act, exp)
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,3 +1,3 @@
|
||||||
AWS SDK for Go
|
AWS SDK for Go
|
||||||
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
Copyright 2014-2015 Stripe, Inc.
|
Copyright 2014-2015 Stripe, Inc.
|
||||||
|
|
|
@ -52,4 +52,3 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
|
@ -9,10 +9,8 @@ License at
|
||||||
Unless required by applicable law or agreed to in writing, software distributed
|
Unless required by applicable law or agreed to in writing, software distributed
|
||||||
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||||
CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
------
|
|
||||||
|
|
||||||
Apache License
|
Apache License
|
||||||
Version 2.0, January 2004
|
Version 2.0, January 2004
|
||||||
http://www.apache.org/licenses/
|
http://www.apache.org/licenses/
|
||||||
|
|
|
@ -309,8 +309,10 @@ func (s *Session) keepalive() {
|
||||||
case <-time.After(s.config.KeepAliveInterval):
|
case <-time.After(s.config.KeepAliveInterval):
|
||||||
_, err := s.Ping()
|
_, err := s.Ping()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Printf("[ERR] yamux: keepalive failed: %v", err)
|
if err != ErrSessionShutdown {
|
||||||
s.exitErr(ErrKeepAliveTimeout)
|
s.logger.Printf("[ERR] yamux: keepalive failed: %v", err)
|
||||||
|
s.exitErr(ErrKeepAliveTimeout)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case <-s.shutdownCh:
|
case <-s.shutdownCh:
|
||||||
|
|
|
@ -7,8 +7,12 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
@ -16,13 +20,13 @@ import (
|
||||||
const dnsTimeout time.Duration = 2 * time.Second
|
const dnsTimeout time.Duration = 2 * time.Second
|
||||||
const tcpIdleTimeout time.Duration = 8 * time.Second
|
const tcpIdleTimeout time.Duration = 8 * time.Second
|
||||||
|
|
||||||
|
const dohMimeType = "application/dns-udpwireformat"
|
||||||
|
|
||||||
// A Conn represents a connection to a DNS server.
|
// A Conn represents a connection to a DNS server.
|
||||||
type Conn struct {
|
type Conn struct {
|
||||||
net.Conn // a net.Conn holding the connection
|
net.Conn // a net.Conn holding the connection
|
||||||
UDPSize uint16 // minimum receive buffer for UDP messages
|
UDPSize uint16 // minimum receive buffer for UDP messages
|
||||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||||
rtt time.Duration
|
|
||||||
t time.Time
|
|
||||||
tsigRequestMAC string
|
tsigRequestMAC string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,6 +43,7 @@ type Client struct {
|
||||||
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
|
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
|
||||||
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||||
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||||
|
HTTPClient *http.Client // The http.Client to use for DNS-over-HTTPS
|
||||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||||
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
|
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
|
||||||
group singleflight
|
group singleflight
|
||||||
|
@ -136,6 +141,11 @@ func (c *Client) Dial(address string) (conn *Conn, err error) {
|
||||||
// attribute appropriately
|
// attribute appropriately
|
||||||
func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
|
func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
|
||||||
if !c.SingleInflight {
|
if !c.SingleInflight {
|
||||||
|
if c.Net == "https" {
|
||||||
|
// TODO(tmthrgd): pipe timeouts into exchangeDOH
|
||||||
|
return c.exchangeDOH(context.TODO(), m, address)
|
||||||
|
}
|
||||||
|
|
||||||
return c.exchange(m, address)
|
return c.exchange(m, address)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,6 +158,11 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er
|
||||||
cl = cl1
|
cl = cl1
|
||||||
}
|
}
|
||||||
r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
|
r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
|
||||||
|
if c.Net == "https" {
|
||||||
|
// TODO(tmthrgd): pipe timeouts into exchangeDOH
|
||||||
|
return c.exchangeDOH(context.TODO(), m, address)
|
||||||
|
}
|
||||||
|
|
||||||
return c.exchange(m, address)
|
return c.exchange(m, address)
|
||||||
})
|
})
|
||||||
if r != nil && shared {
|
if r != nil && shared {
|
||||||
|
@ -177,8 +192,9 @@ func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
|
||||||
}
|
}
|
||||||
|
|
||||||
co.TsigSecret = c.TsigSecret
|
co.TsigSecret = c.TsigSecret
|
||||||
|
t := time.Now()
|
||||||
// write with the appropriate write timeout
|
// write with the appropriate write timeout
|
||||||
co.SetWriteDeadline(time.Now().Add(c.getTimeoutForRequest(c.writeTimeout())))
|
co.SetWriteDeadline(t.Add(c.getTimeoutForRequest(c.writeTimeout())))
|
||||||
if err = co.WriteMsg(m); err != nil {
|
if err = co.WriteMsg(m); err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
@ -188,7 +204,79 @@ func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
|
||||||
if err == nil && r.Id != m.Id {
|
if err == nil && r.Id != m.Id {
|
||||||
err = ErrId
|
err = ErrId
|
||||||
}
|
}
|
||||||
return r, co.rtt, err
|
rtt = time.Since(t)
|
||||||
|
return r, rtt, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) exchangeDOH(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||||
|
p, err := m.Pack()
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(tmthrgd): Allow the path to be customised?
|
||||||
|
u := &url.URL{
|
||||||
|
Scheme: "https",
|
||||||
|
Host: a,
|
||||||
|
Path: "/.well-known/dns-query",
|
||||||
|
}
|
||||||
|
if u.Port() == "443" {
|
||||||
|
u.Host = u.Hostname()
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodPost, u.String(), bytes.NewReader(p))
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", dohMimeType)
|
||||||
|
req.Header.Set("Accept", dohMimeType)
|
||||||
|
|
||||||
|
t := time.Now()
|
||||||
|
|
||||||
|
hc := http.DefaultClient
|
||||||
|
if c.HTTPClient != nil {
|
||||||
|
hc = c.HTTPClient
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx != context.Background() && ctx != context.TODO() {
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := hc.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
defer closeHTTPBody(resp.Body)
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, 0, fmt.Errorf("dns: server returned HTTP %d error: %q", resp.StatusCode, resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ct := resp.Header.Get("Content-Type"); ct != dohMimeType {
|
||||||
|
return nil, 0, fmt.Errorf("dns: unexpected Content-Type %q; expected %q", ct, dohMimeType)
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err = ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
rtt = time.Since(t)
|
||||||
|
|
||||||
|
r = new(Msg)
|
||||||
|
if err := r.Unpack(p); err != nil {
|
||||||
|
return r, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: TSIG? Is it even supported over DoH?
|
||||||
|
|
||||||
|
return r, rtt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func closeHTTPBody(r io.ReadCloser) error {
|
||||||
|
io.Copy(ioutil.Discard, io.LimitReader(r, 8<<20))
|
||||||
|
return r.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadMsg reads a message from the connection co.
|
// ReadMsg reads a message from the connection co.
|
||||||
|
@ -240,7 +328,6 @@ func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
|
||||||
}
|
}
|
||||||
p = make([]byte, l)
|
p = make([]byte, l)
|
||||||
n, err = tcpRead(r, p)
|
n, err = tcpRead(r, p)
|
||||||
co.rtt = time.Since(co.t)
|
|
||||||
default:
|
default:
|
||||||
if co.UDPSize > MinMsgSize {
|
if co.UDPSize > MinMsgSize {
|
||||||
p = make([]byte, co.UDPSize)
|
p = make([]byte, co.UDPSize)
|
||||||
|
@ -248,7 +335,6 @@ func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
|
||||||
p = make([]byte, MinMsgSize)
|
p = make([]byte, MinMsgSize)
|
||||||
}
|
}
|
||||||
n, err = co.Read(p)
|
n, err = co.Read(p)
|
||||||
co.rtt = time.Since(co.t)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -361,7 +447,6 @@ func (co *Conn) WriteMsg(m *Msg) (err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
co.t = time.Now()
|
|
||||||
if _, err = co.Write(out); err != nil {
|
if _, err = co.Write(out); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -493,6 +578,10 @@ func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout
|
||||||
// context, if present. If there is both a context deadline and a configured
|
// context, if present. If there is both a context deadline and a configured
|
||||||
// timeout on the client, the earliest of the two takes effect.
|
// timeout on the client, the earliest of the two takes effect.
|
||||||
func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||||
|
if !c.SingleInflight && c.Net == "https" {
|
||||||
|
return c.exchangeDOH(ctx, m, a)
|
||||||
|
}
|
||||||
|
|
||||||
var timeout time.Duration
|
var timeout time.Duration
|
||||||
if deadline, ok := ctx.Deadline(); !ok {
|
if deadline, ok := ctx.Deadline(); !ok {
|
||||||
timeout = 0
|
timeout = 0
|
||||||
|
@ -501,6 +590,7 @@ func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg,
|
||||||
}
|
}
|
||||||
// not passing the context to the underlying calls, as the API does not support
|
// not passing the context to the underlying calls, as the API does not support
|
||||||
// context. For timeouts you should set up Client.Dialer and call Client.Exchange.
|
// context. For timeouts you should set up Client.Dialer and call Client.Exchange.
|
||||||
|
// TODO(tmthrgd): this is a race condition
|
||||||
c.Dialer = &net.Dialer{Timeout: timeout}
|
c.Dialer = &net.Dialer{Timeout: timeout}
|
||||||
return c.Exchange(m, a)
|
return c.Exchange(m, a)
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,7 +91,7 @@ func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) {
|
||||||
n = 1
|
n = 1
|
||||||
}
|
}
|
||||||
c.Timeout = n
|
c.Timeout = n
|
||||||
case len(s) >= 8 && s[:9] == "attempts:":
|
case len(s) >= 9 && s[:9] == "attempts:":
|
||||||
n, _ := strconv.Atoi(s[9:])
|
n, _ := strconv.Atoi(s[9:])
|
||||||
if n < 1 {
|
if n < 1 {
|
||||||
n = 1
|
n = 1
|
||||||
|
|
|
@ -101,7 +101,8 @@ Names:
|
||||||
|
|
||||||
// compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names
|
// compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names
|
||||||
|
|
||||||
fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR) {\n")
|
fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR, initLen int) int {\n")
|
||||||
|
fmt.Fprint(b, "currentLen := initLen\n")
|
||||||
fmt.Fprint(b, "switch x := r.(type) {\n")
|
fmt.Fprint(b, "switch x := r.(type) {\n")
|
||||||
for _, name := range domainTypes {
|
for _, name := range domainTypes {
|
||||||
o := scope.Lookup(name)
|
o := scope.Lookup(name)
|
||||||
|
@ -109,7 +110,10 @@ Names:
|
||||||
|
|
||||||
fmt.Fprintf(b, "case *%s:\n", name)
|
fmt.Fprintf(b, "case *%s:\n", name)
|
||||||
for i := 1; i < st.NumFields(); i++ {
|
for i := 1; i < st.NumFields(); i++ {
|
||||||
out := func(s string) { fmt.Fprintf(b, "compressionLenHelper(c, x.%s)\n", st.Field(i).Name()) }
|
out := func(s string) {
|
||||||
|
fmt.Fprintf(b, "currentLen -= len(x.%s) + 1\n", st.Field(i).Name())
|
||||||
|
fmt.Fprintf(b, "currentLen += compressionLenHelper(c, x.%s, currentLen)\n", st.Field(i).Name())
|
||||||
|
}
|
||||||
|
|
||||||
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||||
switch st.Tag(i) {
|
switch st.Tag(i) {
|
||||||
|
@ -118,8 +122,12 @@ Names:
|
||||||
case `dns:"cdomain-name"`:
|
case `dns:"cdomain-name"`:
|
||||||
// For HIP we need to slice over the elements in this slice.
|
// For HIP we need to slice over the elements in this slice.
|
||||||
fmt.Fprintf(b, `for i := range x.%s {
|
fmt.Fprintf(b, `for i := range x.%s {
|
||||||
compressionLenHelper(c, x.%s[i])
|
currentLen -= len(x.%s[i]) + 1
|
||||||
}
|
}
|
||||||
|
`, st.Field(i).Name(), st.Field(i).Name())
|
||||||
|
fmt.Fprintf(b, `for i := range x.%s {
|
||||||
|
currentLen += compressionLenHelper(c, x.%s[i], currentLen)
|
||||||
|
}
|
||||||
`, st.Field(i).Name(), st.Field(i).Name())
|
`, st.Field(i).Name(), st.Field(i).Name())
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
|
@ -133,11 +141,11 @@ Names:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fmt.Fprintln(b, "}\n}\n\n")
|
fmt.Fprintln(b, "}\nreturn currentLen - initLen\n}\n\n")
|
||||||
|
|
||||||
// compressionLenSearchType - search cdomain-tags types for compressible names.
|
// compressionLenSearchType - search cdomain-tags types for compressible names.
|
||||||
|
|
||||||
fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool) {\n")
|
fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool, int) {\n")
|
||||||
fmt.Fprint(b, "switch x := r.(type) {\n")
|
fmt.Fprint(b, "switch x := r.(type) {\n")
|
||||||
for _, name := range cdomainTypes {
|
for _, name := range cdomainTypes {
|
||||||
o := scope.Lookup(name)
|
o := scope.Lookup(name)
|
||||||
|
@ -147,7 +155,7 @@ Names:
|
||||||
j := 1
|
j := 1
|
||||||
for i := 1; i < st.NumFields(); i++ {
|
for i := 1; i < st.NumFields(); i++ {
|
||||||
out := func(s string, j int) {
|
out := func(s string, j int) {
|
||||||
fmt.Fprintf(b, "k%d, ok%d := compressionLenSearch(c, x.%s)\n", j, j, st.Field(i).Name())
|
fmt.Fprintf(b, "k%d, ok%d, sz%d := compressionLenSearch(c, x.%s)\n", j, j, j, st.Field(i).Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
// There are no slice types with names that can be compressed.
|
// There are no slice types with names that can be compressed.
|
||||||
|
@ -160,13 +168,15 @@ Names:
|
||||||
}
|
}
|
||||||
k := "k1"
|
k := "k1"
|
||||||
ok := "ok1"
|
ok := "ok1"
|
||||||
|
sz := "sz1"
|
||||||
for i := 2; i < j; i++ {
|
for i := 2; i < j; i++ {
|
||||||
k += fmt.Sprintf(" + k%d", i)
|
k += fmt.Sprintf(" + k%d", i)
|
||||||
ok += fmt.Sprintf(" && ok%d", i)
|
ok += fmt.Sprintf(" && ok%d", i)
|
||||||
|
sz += fmt.Sprintf(" + sz%d", i)
|
||||||
}
|
}
|
||||||
fmt.Fprintf(b, "return %s, %s\n", k, ok)
|
fmt.Fprintf(b, "return %s, %s, %s\n", k, ok, sz)
|
||||||
}
|
}
|
||||||
fmt.Fprintln(b, "}\nreturn 0, false\n}\n\n")
|
fmt.Fprintln(b, "}\nreturn 0, false, 0\n}\n\n")
|
||||||
|
|
||||||
// gofmt
|
// gofmt
|
||||||
res, err := format.Source(b.Bytes())
|
res, err := format.Source(b.Bytes())
|
||||||
|
|
|
@ -55,16 +55,6 @@ func (h *RR_Header) Header() *RR_Header { return h }
|
||||||
// Just to implement the RR interface.
|
// Just to implement the RR interface.
|
||||||
func (h *RR_Header) copy() RR { return nil }
|
func (h *RR_Header) copy() RR { return nil }
|
||||||
|
|
||||||
func (h *RR_Header) copyHeader() *RR_Header {
|
|
||||||
r := new(RR_Header)
|
|
||||||
r.Name = h.Name
|
|
||||||
r.Rrtype = h.Rrtype
|
|
||||||
r.Class = h.Class
|
|
||||||
r.Ttl = h.Ttl
|
|
||||||
r.Rdlength = h.Rdlength
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *RR_Header) String() string {
|
func (h *RR_Header) String() string {
|
||||||
var s string
|
var s string
|
||||||
|
|
||||||
|
|
|
@ -73,6 +73,7 @@ var StringToAlgorithm = reverseInt8(AlgorithmToString)
|
||||||
// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's.
|
// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's.
|
||||||
var AlgorithmToHash = map[uint8]crypto.Hash{
|
var AlgorithmToHash = map[uint8]crypto.Hash{
|
||||||
RSAMD5: crypto.MD5, // Deprecated in RFC 6725
|
RSAMD5: crypto.MD5, // Deprecated in RFC 6725
|
||||||
|
DSA: crypto.SHA1,
|
||||||
RSASHA1: crypto.SHA1,
|
RSASHA1: crypto.SHA1,
|
||||||
RSASHA1NSEC3SHA1: crypto.SHA1,
|
RSASHA1NSEC3SHA1: crypto.SHA1,
|
||||||
RSASHA256: crypto.SHA256,
|
RSASHA256: crypto.SHA256,
|
||||||
|
@ -239,7 +240,7 @@ func (k *DNSKEY) ToDS(h uint8) *DS {
|
||||||
// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record.
|
// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record.
|
||||||
func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
|
func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
|
||||||
c := &CDNSKEY{DNSKEY: *k}
|
c := &CDNSKEY{DNSKEY: *k}
|
||||||
c.Hdr = *k.Hdr.copyHeader()
|
c.Hdr = k.Hdr
|
||||||
c.Hdr.Rrtype = TypeCDNSKEY
|
c.Hdr.Rrtype = TypeCDNSKEY
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
@ -247,7 +248,7 @@ func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
|
||||||
// ToCDS converts a DS record to a CDS record.
|
// ToCDS converts a DS record to a CDS record.
|
||||||
func (d *DS) ToCDS() *CDS {
|
func (d *DS) ToCDS() *CDS {
|
||||||
c := &CDS{DS: *d}
|
c := &CDS{DS: *d}
|
||||||
c.Hdr = *d.Hdr.copyHeader()
|
c.Hdr = d.Hdr
|
||||||
c.Hdr.Rrtype = TypeCDS
|
c.Hdr.Rrtype = TypeCDS
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,11 +73,11 @@ and port to use for the connection:
|
||||||
Port: 12345,
|
Port: 12345,
|
||||||
Zone: "",
|
Zone: "",
|
||||||
}
|
}
|
||||||
d := net.Dialer{
|
c.Dialer := &net.Dialer{
|
||||||
Timeout: 200 * time.Millisecond,
|
Timeout: 200 * time.Millisecond,
|
||||||
LocalAddr: &laddr,
|
LocalAddr: &laddr,
|
||||||
}
|
}
|
||||||
in, rtt, err := c.ExchangeWithDialer(&d, m1, "8.8.8.8:53")
|
in, rtt, err := c.Exchange(m1, "8.8.8.8:53")
|
||||||
|
|
||||||
If these "advanced" features are not needed, a simple UDP query can be sent,
|
If these "advanced" features are not needed, a simple UDP query can be sent,
|
||||||
with:
|
with:
|
||||||
|
|
|
@ -595,6 +595,13 @@ func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, len(msg), err
|
return nil, len(msg), err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return UnpackRRWithHeader(h, msg, off)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnpackRRWithHeader unpacks the record type specific payload given an existing
|
||||||
|
// RR_Header.
|
||||||
|
func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err error) {
|
||||||
end := off + int(h.Rdlength)
|
end := off + int(h.Rdlength)
|
||||||
|
|
||||||
if fn, known := typeToUnpack[h.Rrtype]; !known {
|
if fn, known := typeToUnpack[h.Rrtype]; !known {
|
||||||
|
@ -684,18 +691,20 @@ func (dns *Msg) Pack() (msg []byte, err error) {
|
||||||
return dns.PackBuffer(nil)
|
return dns.PackBuffer(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PackBuffer packs a Msg, using the given buffer buf. If buf is too small
|
// PackBuffer packs a Msg, using the given buffer buf. If buf is too small a new buffer is allocated.
|
||||||
// a new buffer is allocated.
|
|
||||||
func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
|
func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
|
||||||
// We use a similar function in tsig.go's stripTsig.
|
var compression map[string]int
|
||||||
var (
|
|
||||||
dh Header
|
|
||||||
compression map[string]int
|
|
||||||
)
|
|
||||||
|
|
||||||
if dns.Compress {
|
if dns.Compress {
|
||||||
compression = make(map[string]int) // Compression pointer mappings
|
compression = make(map[string]int) // Compression pointer mappings.
|
||||||
}
|
}
|
||||||
|
return dns.packBufferWithCompressionMap(buf, compression)
|
||||||
|
}
|
||||||
|
|
||||||
|
// packBufferWithCompressionMap packs a Msg, using the given buffer buf.
|
||||||
|
func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression map[string]int) (msg []byte, err error) {
|
||||||
|
// We use a similar function in tsig.go's stripTsig.
|
||||||
|
|
||||||
|
var dh Header
|
||||||
|
|
||||||
if dns.Rcode < 0 || dns.Rcode > 0xFFF {
|
if dns.Rcode < 0 || dns.Rcode > 0xFFF {
|
||||||
return nil, ErrRcode
|
return nil, ErrRcode
|
||||||
|
@ -707,12 +716,11 @@ func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
|
||||||
return nil, ErrExtendedRcode
|
return nil, ErrExtendedRcode
|
||||||
}
|
}
|
||||||
opt.SetExtendedRcode(uint8(dns.Rcode >> 4))
|
opt.SetExtendedRcode(uint8(dns.Rcode >> 4))
|
||||||
dns.Rcode &= 0xF
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert convenient Msg into wire-like Header.
|
// Convert convenient Msg into wire-like Header.
|
||||||
dh.Id = dns.Id
|
dh.Id = dns.Id
|
||||||
dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode)
|
dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode&0xF)
|
||||||
if dns.Response {
|
if dns.Response {
|
||||||
dh.Bits |= _QR
|
dh.Bits |= _QR
|
||||||
}
|
}
|
||||||
|
@ -915,94 +923,138 @@ func (dns *Msg) String() string {
|
||||||
// than packing it, measuring the size and discarding the buffer.
|
// than packing it, measuring the size and discarding the buffer.
|
||||||
func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) }
|
func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) }
|
||||||
|
|
||||||
|
func compressedLenWithCompressionMap(dns *Msg, compression map[string]int) int {
|
||||||
|
l := 12 // Message header is always 12 bytes
|
||||||
|
for _, r := range dns.Question {
|
||||||
|
compressionLenHelper(compression, r.Name, l)
|
||||||
|
l += r.len()
|
||||||
|
}
|
||||||
|
l += compressionLenSlice(l, compression, dns.Answer)
|
||||||
|
l += compressionLenSlice(l, compression, dns.Ns)
|
||||||
|
l += compressionLenSlice(l, compression, dns.Extra)
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
// compressedLen returns the message length when in compressed wire format
|
// compressedLen returns the message length when in compressed wire format
|
||||||
// when compress is true, otherwise the uncompressed length is returned.
|
// when compress is true, otherwise the uncompressed length is returned.
|
||||||
func compressedLen(dns *Msg, compress bool) int {
|
func compressedLen(dns *Msg, compress bool) int {
|
||||||
// We always return one more than needed.
|
// We always return one more than needed.
|
||||||
l := 12 // Message header is always 12 bytes
|
|
||||||
if compress {
|
if compress {
|
||||||
compression := map[string]int{}
|
compression := map[string]int{}
|
||||||
for _, r := range dns.Question {
|
return compressedLenWithCompressionMap(dns, compression)
|
||||||
|
}
|
||||||
|
l := 12 // Message header is always 12 bytes
|
||||||
|
|
||||||
|
for _, r := range dns.Question {
|
||||||
|
l += r.len()
|
||||||
|
}
|
||||||
|
for _, r := range dns.Answer {
|
||||||
|
if r != nil {
|
||||||
l += r.len()
|
l += r.len()
|
||||||
compressionLenHelper(compression, r.Name)
|
|
||||||
}
|
|
||||||
l += compressionLenSlice(compression, dns.Answer)
|
|
||||||
l += compressionLenSlice(compression, dns.Ns)
|
|
||||||
l += compressionLenSlice(compression, dns.Extra)
|
|
||||||
} else {
|
|
||||||
for _, r := range dns.Question {
|
|
||||||
l += r.len()
|
|
||||||
}
|
|
||||||
for _, r := range dns.Answer {
|
|
||||||
if r != nil {
|
|
||||||
l += r.len()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, r := range dns.Ns {
|
|
||||||
if r != nil {
|
|
||||||
l += r.len()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, r := range dns.Extra {
|
|
||||||
if r != nil {
|
|
||||||
l += r.len()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for _, r := range dns.Ns {
|
||||||
|
if r != nil {
|
||||||
|
l += r.len()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, r := range dns.Extra {
|
||||||
|
if r != nil {
|
||||||
|
l += r.len()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
func compressionLenSlice(c map[string]int, rs []RR) int {
|
func compressionLenSlice(lenp int, c map[string]int, rs []RR) int {
|
||||||
var l int
|
initLen := lenp
|
||||||
for _, r := range rs {
|
for _, r := range rs {
|
||||||
if r == nil {
|
if r == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
l += r.len()
|
// TmpLen is to track len of record at 14bits boudaries
|
||||||
k, ok := compressionLenSearch(c, r.Header().Name)
|
tmpLen := lenp
|
||||||
|
|
||||||
|
x := r.len()
|
||||||
|
// track this length, and the global length in len, while taking compression into account for both.
|
||||||
|
k, ok, _ := compressionLenSearch(c, r.Header().Name)
|
||||||
if ok {
|
if ok {
|
||||||
l += 1 - k
|
// Size of x is reduced by k, but we add 1 since k includes the '.' and label descriptor take 2 bytes
|
||||||
|
// so, basically x:= x - k - 1 + 2
|
||||||
|
x += 1 - k
|
||||||
}
|
}
|
||||||
compressionLenHelper(c, r.Header().Name)
|
|
||||||
k, ok = compressionLenSearchType(c, r)
|
tmpLen += compressionLenHelper(c, r.Header().Name, tmpLen)
|
||||||
|
k, ok, _ = compressionLenSearchType(c, r)
|
||||||
if ok {
|
if ok {
|
||||||
l += 1 - k
|
x += 1 - k
|
||||||
}
|
}
|
||||||
compressionLenHelperType(c, r)
|
lenp += x
|
||||||
|
tmpLen = lenp
|
||||||
|
tmpLen += compressionLenHelperType(c, r, tmpLen)
|
||||||
|
|
||||||
}
|
}
|
||||||
return l
|
return lenp - initLen
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put the parts of the name in the compression map.
|
// Put the parts of the name in the compression map, return the size in bytes added in payload
|
||||||
func compressionLenHelper(c map[string]int, s string) {
|
func compressionLenHelper(c map[string]int, s string, currentLen int) int {
|
||||||
|
if currentLen > maxCompressionOffset {
|
||||||
|
// We won't be able to add any label that could be re-used later anyway
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if _, ok := c[s]; ok {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
initLen := currentLen
|
||||||
pref := ""
|
pref := ""
|
||||||
|
prev := s
|
||||||
lbs := Split(s)
|
lbs := Split(s)
|
||||||
for j := len(lbs) - 1; j >= 0; j-- {
|
for j := 0; j < len(lbs); j++ {
|
||||||
pref = s[lbs[j]:]
|
pref = s[lbs[j]:]
|
||||||
|
currentLen += len(prev) - len(pref)
|
||||||
|
prev = pref
|
||||||
if _, ok := c[pref]; !ok {
|
if _, ok := c[pref]; !ok {
|
||||||
c[pref] = len(pref)
|
// If first byte label is within the first 14bits, it might be re-used later
|
||||||
|
if currentLen < maxCompressionOffset {
|
||||||
|
c[pref] = currentLen
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
added := currentLen - initLen
|
||||||
|
if j > 0 {
|
||||||
|
// We added a new PTR
|
||||||
|
added += 2
|
||||||
|
}
|
||||||
|
return added
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return currentLen - initLen
|
||||||
}
|
}
|
||||||
|
|
||||||
// Look for each part in the compression map and returns its length,
|
// Look for each part in the compression map and returns its length,
|
||||||
// keep on searching so we get the longest match.
|
// keep on searching so we get the longest match.
|
||||||
func compressionLenSearch(c map[string]int, s string) (int, bool) {
|
// Will return the size of compression found, whether a match has been
|
||||||
|
// found and the size of record if added in payload
|
||||||
|
func compressionLenSearch(c map[string]int, s string) (int, bool, int) {
|
||||||
off := 0
|
off := 0
|
||||||
end := false
|
end := false
|
||||||
if s == "" { // don't bork on bogus data
|
if s == "" { // don't bork on bogus data
|
||||||
return 0, false
|
return 0, false, 0
|
||||||
}
|
}
|
||||||
|
fullSize := 0
|
||||||
for {
|
for {
|
||||||
if _, ok := c[s[off:]]; ok {
|
if _, ok := c[s[off:]]; ok {
|
||||||
return len(s[off:]), true
|
return len(s[off:]), true, fullSize + off
|
||||||
}
|
}
|
||||||
if end {
|
if end {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
// Each label descriptor takes 2 bytes, add it
|
||||||
|
fullSize += 2
|
||||||
off, end = NextLabel(s, off)
|
off, end = NextLabel(s, off)
|
||||||
}
|
}
|
||||||
return 0, false
|
return 0, false, fullSize + len(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy returns a new RR which is a deep-copy of r.
|
// Copy returns a new RR which is a deep-copy of r.
|
||||||
|
|
|
@ -56,8 +56,7 @@ func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() }
|
||||||
func (r *PrivateRR) copy() RR {
|
func (r *PrivateRR) copy() RR {
|
||||||
// make new RR like this:
|
// make new RR like this:
|
||||||
rr := mkPrivateRR(r.Hdr.Rrtype)
|
rr := mkPrivateRR(r.Hdr.Rrtype)
|
||||||
newh := r.Hdr.copyHeader()
|
rr.Hdr = r.Hdr
|
||||||
rr.Hdr = *newh
|
|
||||||
|
|
||||||
err := r.Data.Copy(rr.Data)
|
err := r.Data.Copy(rr.Data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -1255,8 +1255,10 @@ func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||||
if len(l.token) == 0 || l.err {
|
if len(l.token) == 0 || l.err {
|
||||||
return nil, &ParseError{f, "bad NSEC3 Salt", l}, ""
|
return nil, &ParseError{f, "bad NSEC3 Salt", l}, ""
|
||||||
}
|
}
|
||||||
rr.SaltLength = uint8(len(l.token)) / 2
|
if l.token != "-" {
|
||||||
rr.Salt = l.token
|
rr.SaltLength = uint8(len(l.token)) / 2
|
||||||
|
rr.Salt = l.token
|
||||||
|
}
|
||||||
|
|
||||||
<-c
|
<-c
|
||||||
l = <-c
|
l = <-c
|
||||||
|
@ -1321,8 +1323,10 @@ func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, strin
|
||||||
rr.Iterations = uint16(i)
|
rr.Iterations = uint16(i)
|
||||||
<-c
|
<-c
|
||||||
l = <-c
|
l = <-c
|
||||||
rr.SaltLength = uint8(len(l.token))
|
if l.token != "-" {
|
||||||
rr.Salt = l.token
|
rr.SaltLength = uint8(len(l.token))
|
||||||
|
rr.Salt = l.token
|
||||||
|
}
|
||||||
return rr, nil, ""
|
return rr, nil, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,12 +9,19 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Maximum number of TCP queries before we close the socket.
|
// Default maximum number of TCP queries before we close the socket.
|
||||||
const maxTCPQueries = 128
|
const maxTCPQueries = 128
|
||||||
|
|
||||||
|
// Interval for stop worker if no load
|
||||||
|
const idleWorkerTimeout = 10 * time.Second
|
||||||
|
|
||||||
|
// Maximum number of workers
|
||||||
|
const maxWorkersCount = 10000
|
||||||
|
|
||||||
// Handler is implemented by any value that implements ServeDNS.
|
// Handler is implemented by any value that implements ServeDNS.
|
||||||
type Handler interface {
|
type Handler interface {
|
||||||
ServeDNS(w ResponseWriter, r *Msg)
|
ServeDNS(w ResponseWriter, r *Msg)
|
||||||
|
@ -43,6 +50,7 @@ type ResponseWriter interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type response struct {
|
type response struct {
|
||||||
|
msg []byte
|
||||||
hijacked bool // connection has been hijacked by handler
|
hijacked bool // connection has been hijacked by handler
|
||||||
tsigStatus error
|
tsigStatus error
|
||||||
tsigTimersOnly bool
|
tsigTimersOnly bool
|
||||||
|
@ -51,7 +59,6 @@ type response struct {
|
||||||
udp *net.UDPConn // i/o connection if UDP was used
|
udp *net.UDPConn // i/o connection if UDP was used
|
||||||
tcp net.Conn // i/o connection if TCP was used
|
tcp net.Conn // i/o connection if TCP was used
|
||||||
udpSession *SessionUDP // oob data to get egress interface right
|
udpSession *SessionUDP // oob data to get egress interface right
|
||||||
remoteAddr net.Addr // address of the client
|
|
||||||
writer Writer // writer to output the raw DNS bits
|
writer Writer // writer to output the raw DNS bits
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -296,12 +303,63 @@ type Server struct {
|
||||||
DecorateReader DecorateReader
|
DecorateReader DecorateReader
|
||||||
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
|
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
|
||||||
DecorateWriter DecorateWriter
|
DecorateWriter DecorateWriter
|
||||||
|
// Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1).
|
||||||
|
MaxTCPQueries int
|
||||||
|
|
||||||
|
// UDP packet or TCP connection queue
|
||||||
|
queue chan *response
|
||||||
|
// Workers count
|
||||||
|
workersCount int32
|
||||||
// Shutdown handling
|
// Shutdown handling
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
started bool
|
started bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (srv *Server) worker(w *response) {
|
||||||
|
srv.serve(w)
|
||||||
|
|
||||||
|
for {
|
||||||
|
count := atomic.LoadInt32(&srv.workersCount)
|
||||||
|
if count > maxWorkersCount {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if atomic.CompareAndSwapInt32(&srv.workersCount, count, count+1) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
defer atomic.AddInt32(&srv.workersCount, -1)
|
||||||
|
|
||||||
|
inUse := false
|
||||||
|
timeout := time.NewTimer(idleWorkerTimeout)
|
||||||
|
defer timeout.Stop()
|
||||||
|
LOOP:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case w, ok := <-srv.queue:
|
||||||
|
if !ok {
|
||||||
|
break LOOP
|
||||||
|
}
|
||||||
|
inUse = true
|
||||||
|
srv.serve(w)
|
||||||
|
case <-timeout.C:
|
||||||
|
if !inUse {
|
||||||
|
break LOOP
|
||||||
|
}
|
||||||
|
inUse = false
|
||||||
|
timeout.Reset(idleWorkerTimeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (srv *Server) spawnWorker(w *response) {
|
||||||
|
select {
|
||||||
|
case srv.queue <- w:
|
||||||
|
default:
|
||||||
|
go srv.worker(w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ListenAndServe starts a nameserver on the configured address in *Server.
|
// ListenAndServe starts a nameserver on the configured address in *Server.
|
||||||
func (srv *Server) ListenAndServe() error {
|
func (srv *Server) ListenAndServe() error {
|
||||||
srv.lock.Lock()
|
srv.lock.Lock()
|
||||||
|
@ -309,6 +367,7 @@ func (srv *Server) ListenAndServe() error {
|
||||||
if srv.started {
|
if srv.started {
|
||||||
return &Error{err: "server already started"}
|
return &Error{err: "server already started"}
|
||||||
}
|
}
|
||||||
|
|
||||||
addr := srv.Addr
|
addr := srv.Addr
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
addr = ":domain"
|
addr = ":domain"
|
||||||
|
@ -316,6 +375,8 @@ func (srv *Server) ListenAndServe() error {
|
||||||
if srv.UDPSize == 0 {
|
if srv.UDPSize == 0 {
|
||||||
srv.UDPSize = MinMsgSize
|
srv.UDPSize = MinMsgSize
|
||||||
}
|
}
|
||||||
|
srv.queue = make(chan *response)
|
||||||
|
defer close(srv.queue)
|
||||||
switch srv.Net {
|
switch srv.Net {
|
||||||
case "tcp", "tcp4", "tcp6":
|
case "tcp", "tcp4", "tcp6":
|
||||||
a, err := net.ResolveTCPAddr(srv.Net, addr)
|
a, err := net.ResolveTCPAddr(srv.Net, addr)
|
||||||
|
@ -380,8 +441,11 @@ func (srv *Server) ActivateAndServe() error {
|
||||||
if srv.started {
|
if srv.started {
|
||||||
return &Error{err: "server already started"}
|
return &Error{err: "server already started"}
|
||||||
}
|
}
|
||||||
|
|
||||||
pConn := srv.PacketConn
|
pConn := srv.PacketConn
|
||||||
l := srv.Listener
|
l := srv.Listener
|
||||||
|
srv.queue = make(chan *response)
|
||||||
|
defer close(srv.queue)
|
||||||
if pConn != nil {
|
if pConn != nil {
|
||||||
if srv.UDPSize == 0 {
|
if srv.UDPSize == 0 {
|
||||||
srv.UDPSize = MinMsgSize
|
srv.UDPSize = MinMsgSize
|
||||||
|
@ -439,7 +503,6 @@ func (srv *Server) getReadTimeout() time.Duration {
|
||||||
}
|
}
|
||||||
|
|
||||||
// serveTCP starts a TCP listener for the server.
|
// serveTCP starts a TCP listener for the server.
|
||||||
// Each request is handled in a separate goroutine.
|
|
||||||
func (srv *Server) serveTCP(l net.Listener) error {
|
func (srv *Server) serveTCP(l net.Listener) error {
|
||||||
defer l.Close()
|
defer l.Close()
|
||||||
|
|
||||||
|
@ -447,17 +510,6 @@ func (srv *Server) serveTCP(l net.Listener) error {
|
||||||
srv.NotifyStartedFunc()
|
srv.NotifyStartedFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
reader := Reader(&defaultReader{srv})
|
|
||||||
if srv.DecorateReader != nil {
|
|
||||||
reader = srv.DecorateReader(reader)
|
|
||||||
}
|
|
||||||
|
|
||||||
handler := srv.Handler
|
|
||||||
if handler == nil {
|
|
||||||
handler = DefaultServeMux
|
|
||||||
}
|
|
||||||
rtimeout := srv.getReadTimeout()
|
|
||||||
// deadline is not used here
|
|
||||||
for {
|
for {
|
||||||
rw, err := l.Accept()
|
rw, err := l.Accept()
|
||||||
srv.lock.RLock()
|
srv.lock.RLock()
|
||||||
|
@ -472,19 +524,11 @@ func (srv *Server) serveTCP(l net.Listener) error {
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
go func() {
|
srv.spawnWorker(&response{tsigSecret: srv.TsigSecret, tcp: rw})
|
||||||
m, err := reader.ReadTCP(rw, rtimeout)
|
|
||||||
if err != nil {
|
|
||||||
rw.Close()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw)
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// serveUDP starts a UDP listener for the server.
|
// serveUDP starts a UDP listener for the server.
|
||||||
// Each request is handled in a separate goroutine.
|
|
||||||
func (srv *Server) serveUDP(l *net.UDPConn) error {
|
func (srv *Server) serveUDP(l *net.UDPConn) error {
|
||||||
defer l.Close()
|
defer l.Close()
|
||||||
|
|
||||||
|
@ -497,10 +541,6 @@ func (srv *Server) serveUDP(l *net.UDPConn) error {
|
||||||
reader = srv.DecorateReader(reader)
|
reader = srv.DecorateReader(reader)
|
||||||
}
|
}
|
||||||
|
|
||||||
handler := srv.Handler
|
|
||||||
if handler == nil {
|
|
||||||
handler = DefaultServeMux
|
|
||||||
}
|
|
||||||
rtimeout := srv.getReadTimeout()
|
rtimeout := srv.getReadTimeout()
|
||||||
// deadline is not used here
|
// deadline is not used here
|
||||||
for {
|
for {
|
||||||
|
@ -520,80 +560,98 @@ func (srv *Server) serveUDP(l *net.UDPConn) error {
|
||||||
if len(m) < headerSize {
|
if len(m) < headerSize {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
go srv.serve(s.RemoteAddr(), handler, m, l, s, nil)
|
srv.spawnWorker(&response{msg: m, tsigSecret: srv.TsigSecret, udp: l, udpSession: s})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serve a new connection.
|
func (srv *Server) serve(w *response) {
|
||||||
func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t net.Conn) {
|
|
||||||
w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s}
|
|
||||||
if srv.DecorateWriter != nil {
|
if srv.DecorateWriter != nil {
|
||||||
w.writer = srv.DecorateWriter(w)
|
w.writer = srv.DecorateWriter(w)
|
||||||
} else {
|
} else {
|
||||||
w.writer = w
|
w.writer = w
|
||||||
}
|
}
|
||||||
|
|
||||||
q := 0 // counter for the amount of TCP queries we get
|
if w.udp != nil {
|
||||||
|
// serve UDP
|
||||||
|
srv.serveDNS(w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
reader := Reader(&defaultReader{srv})
|
reader := Reader(&defaultReader{srv})
|
||||||
if srv.DecorateReader != nil {
|
if srv.DecorateReader != nil {
|
||||||
reader = srv.DecorateReader(reader)
|
reader = srv.DecorateReader(reader)
|
||||||
}
|
}
|
||||||
Redo:
|
|
||||||
|
defer func() {
|
||||||
|
if !w.hijacked {
|
||||||
|
w.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
idleTimeout := tcpIdleTimeout
|
||||||
|
if srv.IdleTimeout != nil {
|
||||||
|
idleTimeout = srv.IdleTimeout()
|
||||||
|
}
|
||||||
|
|
||||||
|
timeout := srv.getReadTimeout()
|
||||||
|
|
||||||
|
limit := srv.MaxTCPQueries
|
||||||
|
if limit == 0 {
|
||||||
|
limit = maxTCPQueries
|
||||||
|
}
|
||||||
|
|
||||||
|
for q := 0; q < limit || limit == -1; q++ {
|
||||||
|
var err error
|
||||||
|
w.msg, err = reader.ReadTCP(w.tcp, timeout)
|
||||||
|
if err != nil {
|
||||||
|
// TODO(tmthrgd): handle error
|
||||||
|
break
|
||||||
|
}
|
||||||
|
srv.serveDNS(w)
|
||||||
|
if w.tcp == nil {
|
||||||
|
break // Close() was called
|
||||||
|
}
|
||||||
|
if w.hijacked {
|
||||||
|
break // client will call Close() themselves
|
||||||
|
}
|
||||||
|
// The first read uses the read timeout, the rest use the
|
||||||
|
// idle timeout.
|
||||||
|
timeout = idleTimeout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (srv *Server) serveDNS(w *response) {
|
||||||
req := new(Msg)
|
req := new(Msg)
|
||||||
err := req.Unpack(m)
|
err := req.Unpack(w.msg)
|
||||||
if err != nil { // Send a FormatError back
|
if err != nil { // Send a FormatError back
|
||||||
x := new(Msg)
|
x := new(Msg)
|
||||||
x.SetRcodeFormatError(req)
|
x.SetRcodeFormatError(req)
|
||||||
w.WriteMsg(x)
|
w.WriteMsg(x)
|
||||||
goto Exit
|
return
|
||||||
}
|
}
|
||||||
if !srv.Unsafe && req.Response {
|
if !srv.Unsafe && req.Response {
|
||||||
goto Exit
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
w.tsigStatus = nil
|
w.tsigStatus = nil
|
||||||
if w.tsigSecret != nil {
|
if w.tsigSecret != nil {
|
||||||
if t := req.IsTsig(); t != nil {
|
if t := req.IsTsig(); t != nil {
|
||||||
secret := t.Hdr.Name
|
if secret, ok := w.tsigSecret[t.Hdr.Name]; ok {
|
||||||
if _, ok := w.tsigSecret[secret]; !ok {
|
w.tsigStatus = TsigVerify(w.msg, secret, "", false)
|
||||||
w.tsigStatus = ErrKeyAlg
|
} else {
|
||||||
|
w.tsigStatus = ErrSecret
|
||||||
}
|
}
|
||||||
w.tsigStatus = TsigVerify(m, w.tsigSecret[secret], "", false)
|
|
||||||
w.tsigTimersOnly = false
|
w.tsigTimersOnly = false
|
||||||
w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC
|
w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
h.ServeDNS(w, req) // Writes back to the client
|
|
||||||
|
|
||||||
Exit:
|
handler := srv.Handler
|
||||||
if w.tcp == nil {
|
if handler == nil {
|
||||||
return
|
handler = DefaultServeMux
|
||||||
}
|
|
||||||
// TODO(miek): make this number configurable?
|
|
||||||
if q > maxTCPQueries { // close socket after this many queries
|
|
||||||
w.Close()
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if w.hijacked {
|
handler.ServeDNS(w, req) // Writes back to the client
|
||||||
return // client calls Close()
|
|
||||||
}
|
|
||||||
if u != nil { // UDP, "close" and return
|
|
||||||
w.Close()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
idleTimeout := tcpIdleTimeout
|
|
||||||
if srv.IdleTimeout != nil {
|
|
||||||
idleTimeout = srv.IdleTimeout()
|
|
||||||
}
|
|
||||||
m, err = reader.ReadTCP(w.tcp, idleTimeout)
|
|
||||||
if err == nil {
|
|
||||||
q++
|
|
||||||
goto Redo
|
|
||||||
}
|
|
||||||
w.Close()
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
|
func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
|
||||||
|
@ -696,7 +754,12 @@ func (w *response) LocalAddr() net.Addr {
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoteAddr implements the ResponseWriter.RemoteAddr method.
|
// RemoteAddr implements the ResponseWriter.RemoteAddr method.
|
||||||
func (w *response) RemoteAddr() net.Addr { return w.remoteAddr }
|
func (w *response) RemoteAddr() net.Addr {
|
||||||
|
if w.tcp != nil {
|
||||||
|
return w.tcp.RemoteAddr()
|
||||||
|
}
|
||||||
|
return w.udpSession.RemoteAddr()
|
||||||
|
}
|
||||||
|
|
||||||
// TsigStatus implements the ResponseWriter.TsigStatus method.
|
// TsigStatus implements the ResponseWriter.TsigStatus method.
|
||||||
func (w *response) TsigStatus() error { return w.tsigStatus }
|
func (w *response) TsigStatus() error { return w.tsigStatus }
|
||||||
|
|
|
@ -226,7 +226,7 @@ func main() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name)
|
fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name)
|
||||||
fields := []string{"*rr.Hdr.copyHeader()"}
|
fields := []string{"rr.Hdr"}
|
||||||
for i := 1; i < st.NumFields(); i++ {
|
for i := 1; i < st.NumFields(); i++ {
|
||||||
f := st.Field(i).Name()
|
f := st.Field(i).Name()
|
||||||
if sl, ok := st.Field(i).Type().(*types.Slice); ok {
|
if sl, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||||
|
|
|
@ -9,6 +9,22 @@ import (
|
||||||
"golang.org/x/net/ipv6"
|
"golang.org/x/net/ipv6"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// This is the required size of the OOB buffer to pass to ReadMsgUDP.
|
||||||
|
var udpOOBSize = func() int {
|
||||||
|
// We can't know whether we'll get an IPv4 control message or an
|
||||||
|
// IPv6 control message ahead of time. To get around this, we size
|
||||||
|
// the buffer equal to the largest of the two.
|
||||||
|
|
||||||
|
oob4 := ipv4.NewControlMessage(ipv4.FlagDst | ipv4.FlagInterface)
|
||||||
|
oob6 := ipv6.NewControlMessage(ipv6.FlagDst | ipv6.FlagInterface)
|
||||||
|
|
||||||
|
if len(oob4) > len(oob6) {
|
||||||
|
return len(oob4)
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(oob6)
|
||||||
|
}()
|
||||||
|
|
||||||
// SessionUDP holds the remote address and the associated
|
// SessionUDP holds the remote address and the associated
|
||||||
// out-of-band data.
|
// out-of-band data.
|
||||||
type SessionUDP struct {
|
type SessionUDP struct {
|
||||||
|
@ -22,7 +38,7 @@ func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
||||||
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
||||||
// net.UDPAddr.
|
// net.UDPAddr.
|
||||||
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
||||||
oob := make([]byte, 40)
|
oob := make([]byte, udpOOBSize)
|
||||||
n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
|
n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return n, nil, err
|
return n, nil, err
|
||||||
|
@ -53,18 +69,15 @@ func parseDstFromOOB(oob []byte) net.IP {
|
||||||
// Start with IPv6 and then fallback to IPv4
|
// Start with IPv6 and then fallback to IPv4
|
||||||
// TODO(fastest963): Figure out a way to prefer one or the other. Looking at
|
// TODO(fastest963): Figure out a way to prefer one or the other. Looking at
|
||||||
// the lvl of the header for a 0 or 41 isn't cross-platform.
|
// the lvl of the header for a 0 or 41 isn't cross-platform.
|
||||||
var dst net.IP
|
|
||||||
cm6 := new(ipv6.ControlMessage)
|
cm6 := new(ipv6.ControlMessage)
|
||||||
if cm6.Parse(oob) == nil {
|
if cm6.Parse(oob) == nil && cm6.Dst != nil {
|
||||||
dst = cm6.Dst
|
return cm6.Dst
|
||||||
}
|
}
|
||||||
if dst == nil {
|
cm4 := new(ipv4.ControlMessage)
|
||||||
cm4 := new(ipv4.ControlMessage)
|
if cm4.Parse(oob) == nil && cm4.Dst != nil {
|
||||||
if cm4.Parse(oob) == nil {
|
return cm4.Dst
|
||||||
dst = cm4.Dst
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return dst
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// correctSource takes oob data and returns new oob data with the Src equal to the Dst
|
// correctSource takes oob data and returns new oob data with the Src equal to the Dst
|
||||||
|
|
|
@ -3,7 +3,7 @@ package dns
|
||||||
import "fmt"
|
import "fmt"
|
||||||
|
|
||||||
// Version is current version of this library.
|
// Version is current version of this library.
|
||||||
var Version = V{1, 0, 4}
|
var Version = V{1, 0, 7}
|
||||||
|
|
||||||
// V holds the version of this library.
|
// V holds the version of this library.
|
||||||
type V struct {
|
type V struct {
|
||||||
|
|
|
@ -2,117 +2,154 @@
|
||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
func compressionLenHelperType(c map[string]int, r RR) {
|
func compressionLenHelperType(c map[string]int, r RR, initLen int) int {
|
||||||
|
currentLen := initLen
|
||||||
switch x := r.(type) {
|
switch x := r.(type) {
|
||||||
case *AFSDB:
|
case *AFSDB:
|
||||||
compressionLenHelper(c, x.Hostname)
|
currentLen -= len(x.Hostname) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Hostname, currentLen)
|
||||||
case *CNAME:
|
case *CNAME:
|
||||||
compressionLenHelper(c, x.Target)
|
currentLen -= len(x.Target) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Target, currentLen)
|
||||||
case *DNAME:
|
case *DNAME:
|
||||||
compressionLenHelper(c, x.Target)
|
currentLen -= len(x.Target) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Target, currentLen)
|
||||||
case *HIP:
|
case *HIP:
|
||||||
for i := range x.RendezvousServers {
|
for i := range x.RendezvousServers {
|
||||||
compressionLenHelper(c, x.RendezvousServers[i])
|
currentLen -= len(x.RendezvousServers[i]) + 1
|
||||||
|
}
|
||||||
|
for i := range x.RendezvousServers {
|
||||||
|
currentLen += compressionLenHelper(c, x.RendezvousServers[i], currentLen)
|
||||||
}
|
}
|
||||||
case *KX:
|
case *KX:
|
||||||
compressionLenHelper(c, x.Exchanger)
|
currentLen -= len(x.Exchanger) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Exchanger, currentLen)
|
||||||
case *LP:
|
case *LP:
|
||||||
compressionLenHelper(c, x.Fqdn)
|
currentLen -= len(x.Fqdn) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Fqdn, currentLen)
|
||||||
case *MB:
|
case *MB:
|
||||||
compressionLenHelper(c, x.Mb)
|
currentLen -= len(x.Mb) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Mb, currentLen)
|
||||||
case *MD:
|
case *MD:
|
||||||
compressionLenHelper(c, x.Md)
|
currentLen -= len(x.Md) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Md, currentLen)
|
||||||
case *MF:
|
case *MF:
|
||||||
compressionLenHelper(c, x.Mf)
|
currentLen -= len(x.Mf) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Mf, currentLen)
|
||||||
case *MG:
|
case *MG:
|
||||||
compressionLenHelper(c, x.Mg)
|
currentLen -= len(x.Mg) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Mg, currentLen)
|
||||||
case *MINFO:
|
case *MINFO:
|
||||||
compressionLenHelper(c, x.Rmail)
|
currentLen -= len(x.Rmail) + 1
|
||||||
compressionLenHelper(c, x.Email)
|
currentLen += compressionLenHelper(c, x.Rmail, currentLen)
|
||||||
|
currentLen -= len(x.Email) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Email, currentLen)
|
||||||
case *MR:
|
case *MR:
|
||||||
compressionLenHelper(c, x.Mr)
|
currentLen -= len(x.Mr) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Mr, currentLen)
|
||||||
case *MX:
|
case *MX:
|
||||||
compressionLenHelper(c, x.Mx)
|
currentLen -= len(x.Mx) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Mx, currentLen)
|
||||||
case *NAPTR:
|
case *NAPTR:
|
||||||
compressionLenHelper(c, x.Replacement)
|
currentLen -= len(x.Replacement) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Replacement, currentLen)
|
||||||
case *NS:
|
case *NS:
|
||||||
compressionLenHelper(c, x.Ns)
|
currentLen -= len(x.Ns) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Ns, currentLen)
|
||||||
case *NSAPPTR:
|
case *NSAPPTR:
|
||||||
compressionLenHelper(c, x.Ptr)
|
currentLen -= len(x.Ptr) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Ptr, currentLen)
|
||||||
case *NSEC:
|
case *NSEC:
|
||||||
compressionLenHelper(c, x.NextDomain)
|
currentLen -= len(x.NextDomain) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.NextDomain, currentLen)
|
||||||
case *PTR:
|
case *PTR:
|
||||||
compressionLenHelper(c, x.Ptr)
|
currentLen -= len(x.Ptr) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Ptr, currentLen)
|
||||||
case *PX:
|
case *PX:
|
||||||
compressionLenHelper(c, x.Map822)
|
currentLen -= len(x.Map822) + 1
|
||||||
compressionLenHelper(c, x.Mapx400)
|
currentLen += compressionLenHelper(c, x.Map822, currentLen)
|
||||||
|
currentLen -= len(x.Mapx400) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Mapx400, currentLen)
|
||||||
case *RP:
|
case *RP:
|
||||||
compressionLenHelper(c, x.Mbox)
|
currentLen -= len(x.Mbox) + 1
|
||||||
compressionLenHelper(c, x.Txt)
|
currentLen += compressionLenHelper(c, x.Mbox, currentLen)
|
||||||
|
currentLen -= len(x.Txt) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Txt, currentLen)
|
||||||
case *RRSIG:
|
case *RRSIG:
|
||||||
compressionLenHelper(c, x.SignerName)
|
currentLen -= len(x.SignerName) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.SignerName, currentLen)
|
||||||
case *RT:
|
case *RT:
|
||||||
compressionLenHelper(c, x.Host)
|
currentLen -= len(x.Host) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Host, currentLen)
|
||||||
case *SIG:
|
case *SIG:
|
||||||
compressionLenHelper(c, x.SignerName)
|
currentLen -= len(x.SignerName) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.SignerName, currentLen)
|
||||||
case *SOA:
|
case *SOA:
|
||||||
compressionLenHelper(c, x.Ns)
|
currentLen -= len(x.Ns) + 1
|
||||||
compressionLenHelper(c, x.Mbox)
|
currentLen += compressionLenHelper(c, x.Ns, currentLen)
|
||||||
|
currentLen -= len(x.Mbox) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Mbox, currentLen)
|
||||||
case *SRV:
|
case *SRV:
|
||||||
compressionLenHelper(c, x.Target)
|
currentLen -= len(x.Target) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Target, currentLen)
|
||||||
case *TALINK:
|
case *TALINK:
|
||||||
compressionLenHelper(c, x.PreviousName)
|
currentLen -= len(x.PreviousName) + 1
|
||||||
compressionLenHelper(c, x.NextName)
|
currentLen += compressionLenHelper(c, x.PreviousName, currentLen)
|
||||||
|
currentLen -= len(x.NextName) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.NextName, currentLen)
|
||||||
case *TKEY:
|
case *TKEY:
|
||||||
compressionLenHelper(c, x.Algorithm)
|
currentLen -= len(x.Algorithm) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Algorithm, currentLen)
|
||||||
case *TSIG:
|
case *TSIG:
|
||||||
compressionLenHelper(c, x.Algorithm)
|
currentLen -= len(x.Algorithm) + 1
|
||||||
|
currentLen += compressionLenHelper(c, x.Algorithm, currentLen)
|
||||||
}
|
}
|
||||||
|
return currentLen - initLen
|
||||||
}
|
}
|
||||||
|
|
||||||
func compressionLenSearchType(c map[string]int, r RR) (int, bool) {
|
func compressionLenSearchType(c map[string]int, r RR) (int, bool, int) {
|
||||||
switch x := r.(type) {
|
switch x := r.(type) {
|
||||||
case *AFSDB:
|
case *AFSDB:
|
||||||
k1, ok1 := compressionLenSearch(c, x.Hostname)
|
k1, ok1, sz1 := compressionLenSearch(c, x.Hostname)
|
||||||
return k1, ok1
|
return k1, ok1, sz1
|
||||||
case *CNAME:
|
case *CNAME:
|
||||||
k1, ok1 := compressionLenSearch(c, x.Target)
|
k1, ok1, sz1 := compressionLenSearch(c, x.Target)
|
||||||
return k1, ok1
|
return k1, ok1, sz1
|
||||||
case *MB:
|
case *MB:
|
||||||
k1, ok1 := compressionLenSearch(c, x.Mb)
|
k1, ok1, sz1 := compressionLenSearch(c, x.Mb)
|
||||||
return k1, ok1
|
return k1, ok1, sz1
|
||||||
case *MD:
|
case *MD:
|
||||||
k1, ok1 := compressionLenSearch(c, x.Md)
|
k1, ok1, sz1 := compressionLenSearch(c, x.Md)
|
||||||
return k1, ok1
|
return k1, ok1, sz1
|
||||||
case *MF:
|
case *MF:
|
||||||
k1, ok1 := compressionLenSearch(c, x.Mf)
|
k1, ok1, sz1 := compressionLenSearch(c, x.Mf)
|
||||||
return k1, ok1
|
return k1, ok1, sz1
|
||||||
case *MG:
|
case *MG:
|
||||||
k1, ok1 := compressionLenSearch(c, x.Mg)
|
k1, ok1, sz1 := compressionLenSearch(c, x.Mg)
|
||||||
return k1, ok1
|
return k1, ok1, sz1
|
||||||
case *MINFO:
|
case *MINFO:
|
||||||
k1, ok1 := compressionLenSearch(c, x.Rmail)
|
k1, ok1, sz1 := compressionLenSearch(c, x.Rmail)
|
||||||
k2, ok2 := compressionLenSearch(c, x.Email)
|
k2, ok2, sz2 := compressionLenSearch(c, x.Email)
|
||||||
return k1 + k2, ok1 && ok2
|
return k1 + k2, ok1 && ok2, sz1 + sz2
|
||||||
case *MR:
|
case *MR:
|
||||||
k1, ok1 := compressionLenSearch(c, x.Mr)
|
k1, ok1, sz1 := compressionLenSearch(c, x.Mr)
|
||||||
return k1, ok1
|
return k1, ok1, sz1
|
||||||
case *MX:
|
case *MX:
|
||||||
k1, ok1 := compressionLenSearch(c, x.Mx)
|
k1, ok1, sz1 := compressionLenSearch(c, x.Mx)
|
||||||
return k1, ok1
|
return k1, ok1, sz1
|
||||||
case *NS:
|
case *NS:
|
||||||
k1, ok1 := compressionLenSearch(c, x.Ns)
|
k1, ok1, sz1 := compressionLenSearch(c, x.Ns)
|
||||||
return k1, ok1
|
return k1, ok1, sz1
|
||||||
case *PTR:
|
case *PTR:
|
||||||
k1, ok1 := compressionLenSearch(c, x.Ptr)
|
k1, ok1, sz1 := compressionLenSearch(c, x.Ptr)
|
||||||
return k1, ok1
|
return k1, ok1, sz1
|
||||||
case *RT:
|
case *RT:
|
||||||
k1, ok1 := compressionLenSearch(c, x.Host)
|
k1, ok1, sz1 := compressionLenSearch(c, x.Host)
|
||||||
return k1, ok1
|
return k1, ok1, sz1
|
||||||
case *SOA:
|
case *SOA:
|
||||||
k1, ok1 := compressionLenSearch(c, x.Ns)
|
k1, ok1, sz1 := compressionLenSearch(c, x.Ns)
|
||||||
k2, ok2 := compressionLenSearch(c, x.Mbox)
|
k2, ok2, sz2 := compressionLenSearch(c, x.Mbox)
|
||||||
return k1 + k2, ok1 && ok2
|
return k1 + k2, ok1 && ok2, sz1 + sz2
|
||||||
}
|
}
|
||||||
return 0, false
|
return 0, false, 0
|
||||||
}
|
}
|
||||||
|
|
|
@ -649,215 +649,215 @@ func (rr *X25) len() int {
|
||||||
|
|
||||||
// copy() functions
|
// copy() functions
|
||||||
func (rr *A) copy() RR {
|
func (rr *A) copy() RR {
|
||||||
return &A{*rr.Hdr.copyHeader(), copyIP(rr.A)}
|
return &A{rr.Hdr, copyIP(rr.A)}
|
||||||
}
|
}
|
||||||
func (rr *AAAA) copy() RR {
|
func (rr *AAAA) copy() RR {
|
||||||
return &AAAA{*rr.Hdr.copyHeader(), copyIP(rr.AAAA)}
|
return &AAAA{rr.Hdr, copyIP(rr.AAAA)}
|
||||||
}
|
}
|
||||||
func (rr *AFSDB) copy() RR {
|
func (rr *AFSDB) copy() RR {
|
||||||
return &AFSDB{*rr.Hdr.copyHeader(), rr.Subtype, rr.Hostname}
|
return &AFSDB{rr.Hdr, rr.Subtype, rr.Hostname}
|
||||||
}
|
}
|
||||||
func (rr *ANY) copy() RR {
|
func (rr *ANY) copy() RR {
|
||||||
return &ANY{*rr.Hdr.copyHeader()}
|
return &ANY{rr.Hdr}
|
||||||
}
|
}
|
||||||
func (rr *AVC) copy() RR {
|
func (rr *AVC) copy() RR {
|
||||||
Txt := make([]string, len(rr.Txt))
|
Txt := make([]string, len(rr.Txt))
|
||||||
copy(Txt, rr.Txt)
|
copy(Txt, rr.Txt)
|
||||||
return &AVC{*rr.Hdr.copyHeader(), Txt}
|
return &AVC{rr.Hdr, Txt}
|
||||||
}
|
}
|
||||||
func (rr *CAA) copy() RR {
|
func (rr *CAA) copy() RR {
|
||||||
return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value}
|
return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value}
|
||||||
}
|
}
|
||||||
func (rr *CERT) copy() RR {
|
func (rr *CERT) copy() RR {
|
||||||
return &CERT{*rr.Hdr.copyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate}
|
return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate}
|
||||||
}
|
}
|
||||||
func (rr *CNAME) copy() RR {
|
func (rr *CNAME) copy() RR {
|
||||||
return &CNAME{*rr.Hdr.copyHeader(), rr.Target}
|
return &CNAME{rr.Hdr, rr.Target}
|
||||||
}
|
}
|
||||||
func (rr *CSYNC) copy() RR {
|
func (rr *CSYNC) copy() RR {
|
||||||
TypeBitMap := make([]uint16, len(rr.TypeBitMap))
|
TypeBitMap := make([]uint16, len(rr.TypeBitMap))
|
||||||
copy(TypeBitMap, rr.TypeBitMap)
|
copy(TypeBitMap, rr.TypeBitMap)
|
||||||
return &CSYNC{*rr.Hdr.copyHeader(), rr.Serial, rr.Flags, TypeBitMap}
|
return &CSYNC{rr.Hdr, rr.Serial, rr.Flags, TypeBitMap}
|
||||||
}
|
}
|
||||||
func (rr *DHCID) copy() RR {
|
func (rr *DHCID) copy() RR {
|
||||||
return &DHCID{*rr.Hdr.copyHeader(), rr.Digest}
|
return &DHCID{rr.Hdr, rr.Digest}
|
||||||
}
|
}
|
||||||
func (rr *DNAME) copy() RR {
|
func (rr *DNAME) copy() RR {
|
||||||
return &DNAME{*rr.Hdr.copyHeader(), rr.Target}
|
return &DNAME{rr.Hdr, rr.Target}
|
||||||
}
|
}
|
||||||
func (rr *DNSKEY) copy() RR {
|
func (rr *DNSKEY) copy() RR {
|
||||||
return &DNSKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
|
return &DNSKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
|
||||||
}
|
}
|
||||||
func (rr *DS) copy() RR {
|
func (rr *DS) copy() RR {
|
||||||
return &DS{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
|
return &DS{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
|
||||||
}
|
}
|
||||||
func (rr *EID) copy() RR {
|
func (rr *EID) copy() RR {
|
||||||
return &EID{*rr.Hdr.copyHeader(), rr.Endpoint}
|
return &EID{rr.Hdr, rr.Endpoint}
|
||||||
}
|
}
|
||||||
func (rr *EUI48) copy() RR {
|
func (rr *EUI48) copy() RR {
|
||||||
return &EUI48{*rr.Hdr.copyHeader(), rr.Address}
|
return &EUI48{rr.Hdr, rr.Address}
|
||||||
}
|
}
|
||||||
func (rr *EUI64) copy() RR {
|
func (rr *EUI64) copy() RR {
|
||||||
return &EUI64{*rr.Hdr.copyHeader(), rr.Address}
|
return &EUI64{rr.Hdr, rr.Address}
|
||||||
}
|
}
|
||||||
func (rr *GID) copy() RR {
|
func (rr *GID) copy() RR {
|
||||||
return &GID{*rr.Hdr.copyHeader(), rr.Gid}
|
return &GID{rr.Hdr, rr.Gid}
|
||||||
}
|
}
|
||||||
func (rr *GPOS) copy() RR {
|
func (rr *GPOS) copy() RR {
|
||||||
return &GPOS{*rr.Hdr.copyHeader(), rr.Longitude, rr.Latitude, rr.Altitude}
|
return &GPOS{rr.Hdr, rr.Longitude, rr.Latitude, rr.Altitude}
|
||||||
}
|
}
|
||||||
func (rr *HINFO) copy() RR {
|
func (rr *HINFO) copy() RR {
|
||||||
return &HINFO{*rr.Hdr.copyHeader(), rr.Cpu, rr.Os}
|
return &HINFO{rr.Hdr, rr.Cpu, rr.Os}
|
||||||
}
|
}
|
||||||
func (rr *HIP) copy() RR {
|
func (rr *HIP) copy() RR {
|
||||||
RendezvousServers := make([]string, len(rr.RendezvousServers))
|
RendezvousServers := make([]string, len(rr.RendezvousServers))
|
||||||
copy(RendezvousServers, rr.RendezvousServers)
|
copy(RendezvousServers, rr.RendezvousServers)
|
||||||
return &HIP{*rr.Hdr.copyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers}
|
return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers}
|
||||||
}
|
}
|
||||||
func (rr *KX) copy() RR {
|
func (rr *KX) copy() RR {
|
||||||
return &KX{*rr.Hdr.copyHeader(), rr.Preference, rr.Exchanger}
|
return &KX{rr.Hdr, rr.Preference, rr.Exchanger}
|
||||||
}
|
}
|
||||||
func (rr *L32) copy() RR {
|
func (rr *L32) copy() RR {
|
||||||
return &L32{*rr.Hdr.copyHeader(), rr.Preference, copyIP(rr.Locator32)}
|
return &L32{rr.Hdr, rr.Preference, copyIP(rr.Locator32)}
|
||||||
}
|
}
|
||||||
func (rr *L64) copy() RR {
|
func (rr *L64) copy() RR {
|
||||||
return &L64{*rr.Hdr.copyHeader(), rr.Preference, rr.Locator64}
|
return &L64{rr.Hdr, rr.Preference, rr.Locator64}
|
||||||
}
|
}
|
||||||
func (rr *LOC) copy() RR {
|
func (rr *LOC) copy() RR {
|
||||||
return &LOC{*rr.Hdr.copyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude}
|
return &LOC{rr.Hdr, rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude}
|
||||||
}
|
}
|
||||||
func (rr *LP) copy() RR {
|
func (rr *LP) copy() RR {
|
||||||
return &LP{*rr.Hdr.copyHeader(), rr.Preference, rr.Fqdn}
|
return &LP{rr.Hdr, rr.Preference, rr.Fqdn}
|
||||||
}
|
}
|
||||||
func (rr *MB) copy() RR {
|
func (rr *MB) copy() RR {
|
||||||
return &MB{*rr.Hdr.copyHeader(), rr.Mb}
|
return &MB{rr.Hdr, rr.Mb}
|
||||||
}
|
}
|
||||||
func (rr *MD) copy() RR {
|
func (rr *MD) copy() RR {
|
||||||
return &MD{*rr.Hdr.copyHeader(), rr.Md}
|
return &MD{rr.Hdr, rr.Md}
|
||||||
}
|
}
|
||||||
func (rr *MF) copy() RR {
|
func (rr *MF) copy() RR {
|
||||||
return &MF{*rr.Hdr.copyHeader(), rr.Mf}
|
return &MF{rr.Hdr, rr.Mf}
|
||||||
}
|
}
|
||||||
func (rr *MG) copy() RR {
|
func (rr *MG) copy() RR {
|
||||||
return &MG{*rr.Hdr.copyHeader(), rr.Mg}
|
return &MG{rr.Hdr, rr.Mg}
|
||||||
}
|
}
|
||||||
func (rr *MINFO) copy() RR {
|
func (rr *MINFO) copy() RR {
|
||||||
return &MINFO{*rr.Hdr.copyHeader(), rr.Rmail, rr.Email}
|
return &MINFO{rr.Hdr, rr.Rmail, rr.Email}
|
||||||
}
|
}
|
||||||
func (rr *MR) copy() RR {
|
func (rr *MR) copy() RR {
|
||||||
return &MR{*rr.Hdr.copyHeader(), rr.Mr}
|
return &MR{rr.Hdr, rr.Mr}
|
||||||
}
|
}
|
||||||
func (rr *MX) copy() RR {
|
func (rr *MX) copy() RR {
|
||||||
return &MX{*rr.Hdr.copyHeader(), rr.Preference, rr.Mx}
|
return &MX{rr.Hdr, rr.Preference, rr.Mx}
|
||||||
}
|
}
|
||||||
func (rr *NAPTR) copy() RR {
|
func (rr *NAPTR) copy() RR {
|
||||||
return &NAPTR{*rr.Hdr.copyHeader(), rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement}
|
return &NAPTR{rr.Hdr, rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement}
|
||||||
}
|
}
|
||||||
func (rr *NID) copy() RR {
|
func (rr *NID) copy() RR {
|
||||||
return &NID{*rr.Hdr.copyHeader(), rr.Preference, rr.NodeID}
|
return &NID{rr.Hdr, rr.Preference, rr.NodeID}
|
||||||
}
|
}
|
||||||
func (rr *NIMLOC) copy() RR {
|
func (rr *NIMLOC) copy() RR {
|
||||||
return &NIMLOC{*rr.Hdr.copyHeader(), rr.Locator}
|
return &NIMLOC{rr.Hdr, rr.Locator}
|
||||||
}
|
}
|
||||||
func (rr *NINFO) copy() RR {
|
func (rr *NINFO) copy() RR {
|
||||||
ZSData := make([]string, len(rr.ZSData))
|
ZSData := make([]string, len(rr.ZSData))
|
||||||
copy(ZSData, rr.ZSData)
|
copy(ZSData, rr.ZSData)
|
||||||
return &NINFO{*rr.Hdr.copyHeader(), ZSData}
|
return &NINFO{rr.Hdr, ZSData}
|
||||||
}
|
}
|
||||||
func (rr *NS) copy() RR {
|
func (rr *NS) copy() RR {
|
||||||
return &NS{*rr.Hdr.copyHeader(), rr.Ns}
|
return &NS{rr.Hdr, rr.Ns}
|
||||||
}
|
}
|
||||||
func (rr *NSAPPTR) copy() RR {
|
func (rr *NSAPPTR) copy() RR {
|
||||||
return &NSAPPTR{*rr.Hdr.copyHeader(), rr.Ptr}
|
return &NSAPPTR{rr.Hdr, rr.Ptr}
|
||||||
}
|
}
|
||||||
func (rr *NSEC) copy() RR {
|
func (rr *NSEC) copy() RR {
|
||||||
TypeBitMap := make([]uint16, len(rr.TypeBitMap))
|
TypeBitMap := make([]uint16, len(rr.TypeBitMap))
|
||||||
copy(TypeBitMap, rr.TypeBitMap)
|
copy(TypeBitMap, rr.TypeBitMap)
|
||||||
return &NSEC{*rr.Hdr.copyHeader(), rr.NextDomain, TypeBitMap}
|
return &NSEC{rr.Hdr, rr.NextDomain, TypeBitMap}
|
||||||
}
|
}
|
||||||
func (rr *NSEC3) copy() RR {
|
func (rr *NSEC3) copy() RR {
|
||||||
TypeBitMap := make([]uint16, len(rr.TypeBitMap))
|
TypeBitMap := make([]uint16, len(rr.TypeBitMap))
|
||||||
copy(TypeBitMap, rr.TypeBitMap)
|
copy(TypeBitMap, rr.TypeBitMap)
|
||||||
return &NSEC3{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap}
|
return &NSEC3{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap}
|
||||||
}
|
}
|
||||||
func (rr *NSEC3PARAM) copy() RR {
|
func (rr *NSEC3PARAM) copy() RR {
|
||||||
return &NSEC3PARAM{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt}
|
return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt}
|
||||||
}
|
}
|
||||||
func (rr *OPENPGPKEY) copy() RR {
|
func (rr *OPENPGPKEY) copy() RR {
|
||||||
return &OPENPGPKEY{*rr.Hdr.copyHeader(), rr.PublicKey}
|
return &OPENPGPKEY{rr.Hdr, rr.PublicKey}
|
||||||
}
|
}
|
||||||
func (rr *OPT) copy() RR {
|
func (rr *OPT) copy() RR {
|
||||||
Option := make([]EDNS0, len(rr.Option))
|
Option := make([]EDNS0, len(rr.Option))
|
||||||
copy(Option, rr.Option)
|
copy(Option, rr.Option)
|
||||||
return &OPT{*rr.Hdr.copyHeader(), Option}
|
return &OPT{rr.Hdr, Option}
|
||||||
}
|
}
|
||||||
func (rr *PTR) copy() RR {
|
func (rr *PTR) copy() RR {
|
||||||
return &PTR{*rr.Hdr.copyHeader(), rr.Ptr}
|
return &PTR{rr.Hdr, rr.Ptr}
|
||||||
}
|
}
|
||||||
func (rr *PX) copy() RR {
|
func (rr *PX) copy() RR {
|
||||||
return &PX{*rr.Hdr.copyHeader(), rr.Preference, rr.Map822, rr.Mapx400}
|
return &PX{rr.Hdr, rr.Preference, rr.Map822, rr.Mapx400}
|
||||||
}
|
}
|
||||||
func (rr *RFC3597) copy() RR {
|
func (rr *RFC3597) copy() RR {
|
||||||
return &RFC3597{*rr.Hdr.copyHeader(), rr.Rdata}
|
return &RFC3597{rr.Hdr, rr.Rdata}
|
||||||
}
|
}
|
||||||
func (rr *RKEY) copy() RR {
|
func (rr *RKEY) copy() RR {
|
||||||
return &RKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
|
return &RKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
|
||||||
}
|
}
|
||||||
func (rr *RP) copy() RR {
|
func (rr *RP) copy() RR {
|
||||||
return &RP{*rr.Hdr.copyHeader(), rr.Mbox, rr.Txt}
|
return &RP{rr.Hdr, rr.Mbox, rr.Txt}
|
||||||
}
|
}
|
||||||
func (rr *RRSIG) copy() RR {
|
func (rr *RRSIG) copy() RR {
|
||||||
return &RRSIG{*rr.Hdr.copyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature}
|
return &RRSIG{rr.Hdr, rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature}
|
||||||
}
|
}
|
||||||
func (rr *RT) copy() RR {
|
func (rr *RT) copy() RR {
|
||||||
return &RT{*rr.Hdr.copyHeader(), rr.Preference, rr.Host}
|
return &RT{rr.Hdr, rr.Preference, rr.Host}
|
||||||
}
|
}
|
||||||
func (rr *SMIMEA) copy() RR {
|
func (rr *SMIMEA) copy() RR {
|
||||||
return &SMIMEA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
|
return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
|
||||||
}
|
}
|
||||||
func (rr *SOA) copy() RR {
|
func (rr *SOA) copy() RR {
|
||||||
return &SOA{*rr.Hdr.copyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl}
|
return &SOA{rr.Hdr, rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl}
|
||||||
}
|
}
|
||||||
func (rr *SPF) copy() RR {
|
func (rr *SPF) copy() RR {
|
||||||
Txt := make([]string, len(rr.Txt))
|
Txt := make([]string, len(rr.Txt))
|
||||||
copy(Txt, rr.Txt)
|
copy(Txt, rr.Txt)
|
||||||
return &SPF{*rr.Hdr.copyHeader(), Txt}
|
return &SPF{rr.Hdr, Txt}
|
||||||
}
|
}
|
||||||
func (rr *SRV) copy() RR {
|
func (rr *SRV) copy() RR {
|
||||||
return &SRV{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target}
|
return &SRV{rr.Hdr, rr.Priority, rr.Weight, rr.Port, rr.Target}
|
||||||
}
|
}
|
||||||
func (rr *SSHFP) copy() RR {
|
func (rr *SSHFP) copy() RR {
|
||||||
return &SSHFP{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint}
|
return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint}
|
||||||
}
|
}
|
||||||
func (rr *TA) copy() RR {
|
func (rr *TA) copy() RR {
|
||||||
return &TA{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
|
return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
|
||||||
}
|
}
|
||||||
func (rr *TALINK) copy() RR {
|
func (rr *TALINK) copy() RR {
|
||||||
return &TALINK{*rr.Hdr.copyHeader(), rr.PreviousName, rr.NextName}
|
return &TALINK{rr.Hdr, rr.PreviousName, rr.NextName}
|
||||||
}
|
}
|
||||||
func (rr *TKEY) copy() RR {
|
func (rr *TKEY) copy() RR {
|
||||||
return &TKEY{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData}
|
return &TKEY{rr.Hdr, rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData}
|
||||||
}
|
}
|
||||||
func (rr *TLSA) copy() RR {
|
func (rr *TLSA) copy() RR {
|
||||||
return &TLSA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
|
return &TLSA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
|
||||||
}
|
}
|
||||||
func (rr *TSIG) copy() RR {
|
func (rr *TSIG) copy() RR {
|
||||||
return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData}
|
return &TSIG{rr.Hdr, rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData}
|
||||||
}
|
}
|
||||||
func (rr *TXT) copy() RR {
|
func (rr *TXT) copy() RR {
|
||||||
Txt := make([]string, len(rr.Txt))
|
Txt := make([]string, len(rr.Txt))
|
||||||
copy(Txt, rr.Txt)
|
copy(Txt, rr.Txt)
|
||||||
return &TXT{*rr.Hdr.copyHeader(), Txt}
|
return &TXT{rr.Hdr, Txt}
|
||||||
}
|
}
|
||||||
func (rr *UID) copy() RR {
|
func (rr *UID) copy() RR {
|
||||||
return &UID{*rr.Hdr.copyHeader(), rr.Uid}
|
return &UID{rr.Hdr, rr.Uid}
|
||||||
}
|
}
|
||||||
func (rr *UINFO) copy() RR {
|
func (rr *UINFO) copy() RR {
|
||||||
return &UINFO{*rr.Hdr.copyHeader(), rr.Uinfo}
|
return &UINFO{rr.Hdr, rr.Uinfo}
|
||||||
}
|
}
|
||||||
func (rr *URI) copy() RR {
|
func (rr *URI) copy() RR {
|
||||||
return &URI{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Target}
|
return &URI{rr.Hdr, rr.Priority, rr.Weight, rr.Target}
|
||||||
}
|
}
|
||||||
func (rr *X25) copy() RR {
|
func (rr *X25) copy() RR {
|
||||||
return &X25{*rr.Hdr.copyHeader(), rr.PSDNAddress}
|
return &X25{rr.Hdr, rr.PSDNAddress}
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,7 +77,7 @@
|
||||||
{"path":"github.com/hashicorp/raft-boltdb","checksumSHA1":"QAxukkv54/iIvLfsUP6IK4R0m/A=","revision":"d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee","revisionTime":"2015-02-01T20:08:39Z"},
|
{"path":"github.com/hashicorp/raft-boltdb","checksumSHA1":"QAxukkv54/iIvLfsUP6IK4R0m/A=","revision":"d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee","revisionTime":"2015-02-01T20:08:39Z"},
|
||||||
{"path":"github.com/hashicorp/serf/coordinate","checksumSHA1":"0PeWsO2aI+2PgVYlYlDPKfzCLEQ=","revision":"4b67f2c2b2bb5b748d934a6d48221062e43d2274","revisionTime":"2018-05-04T20:06:40Z"},
|
{"path":"github.com/hashicorp/serf/coordinate","checksumSHA1":"0PeWsO2aI+2PgVYlYlDPKfzCLEQ=","revision":"4b67f2c2b2bb5b748d934a6d48221062e43d2274","revisionTime":"2018-05-04T20:06:40Z"},
|
||||||
{"path":"github.com/hashicorp/serf/serf","checksumSHA1":"QrT+nzyXsD/MmhTjjhcPdnALZ1I=","revision":"4b67f2c2b2bb5b748d934a6d48221062e43d2274","revisionTime":"2018-05-04T20:06:40Z"},
|
{"path":"github.com/hashicorp/serf/serf","checksumSHA1":"QrT+nzyXsD/MmhTjjhcPdnALZ1I=","revision":"4b67f2c2b2bb5b748d934a6d48221062e43d2274","revisionTime":"2018-05-04T20:06:40Z"},
|
||||||
{"path":"github.com/hashicorp/yamux","checksumSHA1":"NnWv17i1tpvBNJtpdRRWpE6j4LY=","revision":"2658be15c5f05e76244154714161f17e3e77de2e","revisionTime":"2018-03-14T20:07:45Z"},
|
{"path":"github.com/hashicorp/yamux","checksumSHA1":"jJouEcBeEAO0ejDRJoT67jL8NjQ=","revision":"3520598351bb3500a49ae9563f5539666ae0a27c","revisionTime":"2018-06-04T19:48:46Z"},
|
||||||
{"path":"github.com/joyent/triton-go","checksumSHA1":"LuvUVcxSYc0KkzpqNJArgiPMtNU=","revision":"7283d1d02f7a297bf2f068178a084c5bd090002c","revisionTime":"2018-04-27T15:22:50Z"},
|
{"path":"github.com/joyent/triton-go","checksumSHA1":"LuvUVcxSYc0KkzpqNJArgiPMtNU=","revision":"7283d1d02f7a297bf2f068178a084c5bd090002c","revisionTime":"2018-04-27T15:22:50Z"},
|
||||||
{"path":"github.com/joyent/triton-go/authentication","checksumSHA1":"yNrArK8kjkVkU0bunKlemd6dFkE=","revision":"7283d1d02f7a297bf2f068178a084c5bd090002c","revisionTime":"2018-04-27T15:22:50Z"},
|
{"path":"github.com/joyent/triton-go/authentication","checksumSHA1":"yNrArK8kjkVkU0bunKlemd6dFkE=","revision":"7283d1d02f7a297bf2f068178a084c5bd090002c","revisionTime":"2018-04-27T15:22:50Z"},
|
||||||
{"path":"github.com/joyent/triton-go/client","checksumSHA1":"nppv6i9E2yqdbQ6qJkahCwELSeI=","revision":"7283d1d02f7a297bf2f068178a084c5bd090002c","revisionTime":"2018-04-27T15:22:50Z"},
|
{"path":"github.com/joyent/triton-go/client","checksumSHA1":"nppv6i9E2yqdbQ6qJkahCwELSeI=","revision":"7283d1d02f7a297bf2f068178a084c5bd090002c","revisionTime":"2018-04-27T15:22:50Z"},
|
||||||
|
@ -85,7 +85,7 @@
|
||||||
{"path":"github.com/joyent/triton-go/errors","checksumSHA1":"d/Py6j/uMgOAFNFGpsQrNnSsO+k=","revision":"7283d1d02f7a297bf2f068178a084c5bd090002c","revisionTime":"2018-04-27T15:22:50Z"},
|
{"path":"github.com/joyent/triton-go/errors","checksumSHA1":"d/Py6j/uMgOAFNFGpsQrNnSsO+k=","revision":"7283d1d02f7a297bf2f068178a084c5bd090002c","revisionTime":"2018-04-27T15:22:50Z"},
|
||||||
{"path":"github.com/mattn/go-isatty","checksumSHA1":"xZuhljnmBysJPta/lMyYmJdujCg=","revision":"66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8","revisionTime":"2016-08-06T12:27:52Z"},
|
{"path":"github.com/mattn/go-isatty","checksumSHA1":"xZuhljnmBysJPta/lMyYmJdujCg=","revision":"66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8","revisionTime":"2016-08-06T12:27:52Z"},
|
||||||
{"path":"github.com/matttproud/golang_protobuf_extensions/pbutil","checksumSHA1":"bKMZjd2wPw13VwoE7mBeSv5djFA=","revision":"c12348ce28de40eed0136aa2b644d0ee0650e56c","revisionTime":"2016-04-24T11:30:07Z"},
|
{"path":"github.com/matttproud/golang_protobuf_extensions/pbutil","checksumSHA1":"bKMZjd2wPw13VwoE7mBeSv5djFA=","revision":"c12348ce28de40eed0136aa2b644d0ee0650e56c","revisionTime":"2016-04-24T11:30:07Z"},
|
||||||
{"path":"github.com/miekg/dns","checksumSHA1":"XTeOihCDhjG6ltUKExoJ2uEzShk=","revision":"5364553f1ee9cddc7ac8b62dce148309c386695b","revisionTime":"2018-01-25T10:38:03Z","version":"v1.0.4","versionExact":"v1.0.4"},
|
{"path":"github.com/miekg/dns","checksumSHA1":"ybBd9oDdeJEZj9YmIKGqaBVqZok=","revision":"e57bf427e68187a27e22adceac868350d7a7079b","revisionTime":"2018-05-16T07:59:02Z","version":"v1.0.7","versionExact":"v1.0.7"},
|
||||||
{"path":"github.com/mitchellh/cli","checksumSHA1":"GzfpPGtV2UJH9hFsKwzGjKrhp/A=","revision":"dff723fff508858a44c1f4bd0911f00d73b0202f","revisionTime":"2017-09-05T22:10:09Z"},
|
{"path":"github.com/mitchellh/cli","checksumSHA1":"GzfpPGtV2UJH9hFsKwzGjKrhp/A=","revision":"dff723fff508858a44c1f4bd0911f00d73b0202f","revisionTime":"2017-09-05T22:10:09Z"},
|
||||||
{"path":"github.com/mitchellh/copystructure","checksumSHA1":"86nE93o1VIND0Doe8PuhCXnhUx0=","revision":"cdac8253d00f2ecf0a0b19fbff173a9a72de4f82","revisionTime":"2016-08-04T03:23:30Z"},
|
{"path":"github.com/mitchellh/copystructure","checksumSHA1":"86nE93o1VIND0Doe8PuhCXnhUx0=","revision":"cdac8253d00f2ecf0a0b19fbff173a9a72de4f82","revisionTime":"2016-08-04T03:23:30Z"},
|
||||||
{"path":"github.com/mitchellh/go-homedir","checksumSHA1":"V/quM7+em2ByJbWBLOsEwnY3j/Q=","revision":"b8bc1bf767474819792c23f32d8286a45736f1c6","revisionTime":"2016-12-03T19:45:07Z"},
|
{"path":"github.com/mitchellh/go-homedir","checksumSHA1":"V/quM7+em2ByJbWBLOsEwnY3j/Q=","revision":"b8bc1bf767474819792c23f32d8286a45736f1c6","revisionTime":"2016-12-03T19:45:07Z"},
|
||||||
|
|
|
@ -19,11 +19,17 @@ const (
|
||||||
maxBackoffTime = 180 * time.Second
|
maxBackoffTime = 180 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
// Run is used to run a watch plan
|
|
||||||
func (p *Plan) Run(address string) error {
|
func (p *Plan) Run(address string) error {
|
||||||
|
return p.RunWithConfig(address, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run is used to run a watch plan
|
||||||
|
func (p *Plan) RunWithConfig(address string, conf *consulapi.Config) error {
|
||||||
// Setup the client
|
// Setup the client
|
||||||
p.address = address
|
p.address = address
|
||||||
conf := consulapi.DefaultConfig()
|
if conf == nil {
|
||||||
|
conf = consulapi.DefaultConfig()
|
||||||
|
}
|
||||||
conf.Address = address
|
conf.Address = address
|
||||||
conf.Datacenter = p.Datacenter
|
conf.Datacenter = p.Datacenter
|
||||||
conf.Token = p.Token
|
conf.Token = p.Token
|
||||||
|
|
|
@ -0,0 +1,143 @@
|
||||||
|
---
|
||||||
|
layout: api
|
||||||
|
page_title: License - Operator - HTTP API
|
||||||
|
sidebar_current: api-operator-license
|
||||||
|
description: |-
|
||||||
|
The /operator/license endpoints allow for setting and retrieving the Consul
|
||||||
|
Enterprise License.
|
||||||
|
---
|
||||||
|
|
||||||
|
# License - Operator HTTP API
|
||||||
|
|
||||||
|
~> **Enterprise Only!** This API endpoint and functionality only exists in
|
||||||
|
Consul Enterprise. This is not present in the open source version of Consul.
|
||||||
|
|
||||||
|
The licensing functionality described here is available only in
|
||||||
|
[Consul Enterprise](https://www.hashicorp.com/products/consul/) version 1.1.0 and later.
|
||||||
|
|
||||||
|
## Getting the Consul License
|
||||||
|
|
||||||
|
This endpoint gets information about the current license.
|
||||||
|
|
||||||
|
| Method | Path | Produces |
|
||||||
|
| ------ | ---------------------------- | -------------------------- |
|
||||||
|
| `GET` | `/operator/license` | `application/json` |
|
||||||
|
|
||||||
|
The table below shows this endpoint's support for
|
||||||
|
[blocking queries](/api/index.html#blocking-queries),
|
||||||
|
[consistency modes](/api/index.html#consistency-modes), and
|
||||||
|
[required ACLs](/api/index.html#acls).
|
||||||
|
|
||||||
|
| Blocking Queries | Consistency Modes | ACL Required |
|
||||||
|
| ---------------- | ----------------- | ---------------- |
|
||||||
|
| `NO` | `all` | `none` |
|
||||||
|
|
||||||
|
### Parameters
|
||||||
|
|
||||||
|
- `dc` `(string: "")` - Specifies the datacenter whose license should be retrieved.
|
||||||
|
This will default to the datacenter of the agent serving the HTTP request.
|
||||||
|
This is specified as a URL query parameter.
|
||||||
|
|
||||||
|
### Sample Request
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ curl \
|
||||||
|
https://consul.rocks/v1/operator/license
|
||||||
|
```
|
||||||
|
|
||||||
|
### Sample Response
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"Valid": true,
|
||||||
|
"License": {
|
||||||
|
"license_id": "2afbf681-0d1a-0649-cb6c-333ec9f0989c",
|
||||||
|
"customer_id": "0259271d-8ffc-e85e-0830-c0822c1f5f2b",
|
||||||
|
"installation_id": "*",
|
||||||
|
"issue_time": "2018-05-21T20:03:35.911567355Z",
|
||||||
|
"start_time": "2018-05-21T04:00:00Z",
|
||||||
|
"expiration_time": "2019-05-22T03:59:59.999Z",
|
||||||
|
"product": "consul",
|
||||||
|
"flags": {
|
||||||
|
"package": "premium"
|
||||||
|
},
|
||||||
|
"features": [
|
||||||
|
"Automated Backups",
|
||||||
|
"Automated Upgrades",
|
||||||
|
"Enhanced Read Scalability",
|
||||||
|
"Network Segments",
|
||||||
|
"Redundancy Zone",
|
||||||
|
"Advanced Network Federation"
|
||||||
|
],
|
||||||
|
"temporary": false
|
||||||
|
},
|
||||||
|
"Warnings": []
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Updating the Consul License
|
||||||
|
|
||||||
|
This endpoint updates the Consul license and returns some of the
|
||||||
|
license contents as well as any warning messages regarding its validity.
|
||||||
|
|
||||||
|
| Method | Path | Produces |
|
||||||
|
| ------ | ---------------------------- | -------------------------- |
|
||||||
|
| `PUT` | `/operator/license` | `application/json` |
|
||||||
|
|
||||||
|
The table below shows this endpoint's support for
|
||||||
|
[blocking queries](/api/index.html#blocking-queries),
|
||||||
|
[consistency modes](/api/index.html#consistency-modes), and
|
||||||
|
[required ACLs](/api/index.html#acls).
|
||||||
|
|
||||||
|
| Blocking Queries | Consistency Modes | ACL Required |
|
||||||
|
| ---------------- | ----------------- | ---------------- |
|
||||||
|
| `NO` | `none` | `operator:write` |
|
||||||
|
|
||||||
|
### Parameters
|
||||||
|
|
||||||
|
- `dc` `(string: "")` - Specifies the datacenter whose license should be updated.
|
||||||
|
This will default to the datacenter of the agent serving the HTTP request.
|
||||||
|
This is specified as a URL query parameter.
|
||||||
|
|
||||||
|
### Sample Payload
|
||||||
|
|
||||||
|
The payload is the raw license blob.
|
||||||
|
|
||||||
|
### Sample Request
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ curl \
|
||||||
|
--request PUT \
|
||||||
|
--data @consul.license \
|
||||||
|
https://consul.rocks/v1/operator/license
|
||||||
|
```
|
||||||
|
|
||||||
|
### Sample Response
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"Valid": true,
|
||||||
|
"License": {
|
||||||
|
"license_id": "2afbf681-0d1a-0649-cb6c-333ec9f0989c",
|
||||||
|
"customer_id": "0259271d-8ffc-e85e-0830-c0822c1f5f2b",
|
||||||
|
"installation_id": "*",
|
||||||
|
"issue_time": "2018-05-21T20:03:35.911567355Z",
|
||||||
|
"start_time": "2018-05-21T04:00:00Z",
|
||||||
|
"expiration_time": "2019-05-22T03:59:59.999Z",
|
||||||
|
"product": "consul",
|
||||||
|
"flags": {
|
||||||
|
"package": "premium"
|
||||||
|
},
|
||||||
|
"features": [
|
||||||
|
"Automated Backups",
|
||||||
|
"Automated Upgrades",
|
||||||
|
"Enhanced Read Scalability",
|
||||||
|
"Network Segments",
|
||||||
|
"Redundancy Zone",
|
||||||
|
"Advanced Network Federation"
|
||||||
|
],
|
||||||
|
"temporary": false
|
||||||
|
},
|
||||||
|
"Warnings": []
|
||||||
|
}
|
||||||
|
```
|
|
@ -25,7 +25,7 @@ of their own.
|
||||||
|
|
||||||
The agent is started with the [`consul agent`](/docs/commands/agent.html) command. This
|
The agent is started with the [`consul agent`](/docs/commands/agent.html) command. This
|
||||||
command blocks, running forever or until told to quit. The agent command takes a variety
|
command blocks, running forever or until told to quit. The agent command takes a variety
|
||||||
of configuration options, but most have sane defaults.
|
of [`configuration options`](/docs/agent/options.html#command-line-options), but most have sane defaults.
|
||||||
|
|
||||||
When running [`consul agent`](/docs/commands/agent.html), you should see output similar to this:
|
When running [`consul agent`](/docs/commands/agent.html), you should see output similar to this:
|
||||||
|
|
||||||
|
|
|
@ -244,7 +244,7 @@ The configuration can also be provided by environment variables.
|
||||||
### Scaleway
|
### Scaleway
|
||||||
|
|
||||||
This returns the first private IP address of all servers for the given
|
This returns the first private IP address of all servers for the given
|
||||||
`region` with the given `tag_key` and `tag_value`.
|
`region` with the given `tag_name`.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ consul agent -retry-join "provider=scaleway organization=my-org tag_name=consul-server token=... region=..."
|
$ consul agent -retry-join "provider=scaleway organization=my-org tag_name=consul-server token=... region=..."
|
||||||
|
@ -259,8 +259,8 @@ $ consul agent -retry-join "provider=scaleway organization=my-org tag_name=consu
|
||||||
- `provider` (required) - the name of the provider ("scaleway" in this case).
|
- `provider` (required) - the name of the provider ("scaleway" in this case).
|
||||||
- `region` (required) - the name of the region.
|
- `region` (required) - the name of the region.
|
||||||
- `tag_name` (required) - the name of the tag to auto-join on.
|
- `tag_name` (required) - the name of the tag to auto-join on.
|
||||||
- `organization` (optional) - the organization access key to use for auth.
|
- `organization` (required) - the organization access key to use for auth (equal to access key).
|
||||||
- `token` (optional) - the token to use for auth.
|
- `token` (required) - the token to use for auth.
|
||||||
|
|
||||||
### Joyent Triton
|
### Joyent Triton
|
||||||
|
|
||||||
|
@ -282,4 +282,4 @@ $ consul agent -retry-join "provider=triton account=testaccount url=https://us-s
|
||||||
- `url` (required) - the URL of the Triton api endpoint to use.
|
- `url` (required) - the URL of the Triton api endpoint to use.
|
||||||
- `key_id` (required) - the key id to use.
|
- `key_id` (required) - the key id to use.
|
||||||
- `tag_key` (optional) - the instance tag key to use.
|
- `tag_key` (optional) - the instance tag key to use.
|
||||||
- `tag_value` (optional) - the tag value to use.
|
- `tag_value` (optional) - the tag value to use.
|
||||||
|
|
|
@ -364,14 +364,15 @@ will exit with an error at startup.
|
||||||
rarely need to be changed. Very busy clusters experiencing excessive disk IO may increase this value to reduce disk IO, and minimize
|
rarely need to be changed. Very busy clusters experiencing excessive disk IO may increase this value to reduce disk IO, and minimize
|
||||||
the chances of all servers taking snapshots at the same time. Increasing this trades off disk IO for disk space since the log will
|
the chances of all servers taking snapshots at the same time. Increasing this trades off disk IO for disk space since the log will
|
||||||
grow much larger and the space in the raft.db file can't be reclaimed till the next snapshot. Servers may take longer to recover from
|
grow much larger and the space in the raft.db file can't be reclaimed till the next snapshot. Servers may take longer to recover from
|
||||||
crashes or failover if this is increased significantly as more logs will need to be replayed.
|
crashes or failover if this is increased significantly as more logs will need to be replayed. In Consul 1.1.0 and later this
|
||||||
|
defaults to 16384, and in prior versions it was set to 8192.
|
||||||
|
|
||||||
* <a name="_raft_snapshot_interval"></a><a href="#_raft_snapshot_interval">`-raft-snapshot-interval`</a> - This controls how often servers
|
* <a name="_raft_snapshot_interval"></a><a href="#_raft_snapshot_interval">`-raft-snapshot-interval`</a> - This controls how often servers
|
||||||
check if they need to save a snapshot to disk. his is a low-level parameter that should rarely need to be changed. Very busy clusters
|
check if they need to save a snapshot to disk. his is a low-level parameter that should rarely need to be changed. Very busy clusters
|
||||||
experiencing excessive disk IO may increase this value to reduce disk IO, and minimize the chances of all servers taking snapshots at the same time.
|
experiencing excessive disk IO may increase this value to reduce disk IO, and minimize the chances of all servers taking snapshots at the same time.
|
||||||
Increasing this trades off disk IO for disk space since the log will grow much larger and the space in the raft.db file can't be reclaimed
|
Increasing this trades off disk IO for disk space since the log will grow much larger and the space in the raft.db file can't be reclaimed
|
||||||
till the next snapshot. Servers may take longer to recover from crashes or failover if this is increased significantly as more logs
|
till the next snapshot. Servers may take longer to recover from crashes or failover if this is increased significantly as more logs
|
||||||
will need to be replayed.
|
will need to be replayed. In Consul 1.1.0 and later this defaults to `30s`, and in prior versions it was set to `5s`.
|
||||||
|
|
||||||
* <a name="_recursor"></a><a href="#_recursor">`-recursor`</a> - Specifies the address of an upstream DNS
|
* <a name="_recursor"></a><a href="#_recursor">`-recursor`</a> - Specifies the address of an upstream DNS
|
||||||
server. This option may be provided multiple times, and is functionally
|
server. This option may be provided multiple times, and is functionally
|
||||||
|
|
|
@ -87,6 +87,12 @@ These metrics are used to monitor the health of specific Consul agents.
|
||||||
<td>requests</td>
|
<td>requests</td>
|
||||||
<td>counter</td>
|
<td>counter</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>`consul.client.rpc.error.catalog_register.<node>`</td>
|
||||||
|
<td>This increments whenever a Consul agent receives an RPC error for a catalog register request.</td>
|
||||||
|
<td>errors</td>
|
||||||
|
<td>counter</td>
|
||||||
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td>`consul.client.api.catalog_deregister.<node>`</td>
|
<td>`consul.client.api.catalog_deregister.<node>`</td>
|
||||||
<td>This increments whenever a Consul agent receives a catalog de-register request.</td>
|
<td>This increments whenever a Consul agent receives a catalog de-register request.</td>
|
||||||
|
@ -99,6 +105,12 @@ These metrics are used to monitor the health of specific Consul agents.
|
||||||
<td>requests</td>
|
<td>requests</td>
|
||||||
<td>counter</td>
|
<td>counter</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>`consul.client.rpc.error.catalog_deregister.<node>`</td>
|
||||||
|
<td>This increments whenever a Consul agent receives an RPC error for a catalog de-register request.</td>
|
||||||
|
<td>errors</td>
|
||||||
|
<td>counter</td>
|
||||||
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td>`consul.client.api.catalog_datacenters.<node>`</td>
|
<td>`consul.client.api.catalog_datacenters.<node>`</td>
|
||||||
<td>This increments whenever a Consul agent receives a request to list datacenters in the catalog.</td>
|
<td>This increments whenever a Consul agent receives a request to list datacenters in the catalog.</td>
|
||||||
|
@ -111,6 +123,12 @@ These metrics are used to monitor the health of specific Consul agents.
|
||||||
<td>requests</td>
|
<td>requests</td>
|
||||||
<td>counter</td>
|
<td>counter</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>`consul.client.rpc.error.catalog_datacenters.<node>`</td>
|
||||||
|
<td>This increments whenever a Consul agent receives an RPC error for a request to list datacenters.</td>
|
||||||
|
<td>errors</td>
|
||||||
|
<td>counter</td>
|
||||||
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td>`consul.client.api.catalog_nodes.<node>`</td>
|
<td>`consul.client.api.catalog_nodes.<node>`</td>
|
||||||
<td>This increments whenever a Consul agent receives a request to list nodes from the catalog.</td>
|
<td>This increments whenever a Consul agent receives a request to list nodes from the catalog.</td>
|
||||||
|
@ -123,6 +141,12 @@ These metrics are used to monitor the health of specific Consul agents.
|
||||||
<td>requests</td>
|
<td>requests</td>
|
||||||
<td>counter</td>
|
<td>counter</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>`consul.client.rpc.error.catalog_nodes.<node>`</td>
|
||||||
|
<td>This increments whenever a Consul agent receives an RPC error for a request to list nodes.</td>
|
||||||
|
<td>errors</td>
|
||||||
|
<td>counter</td>
|
||||||
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td>`consul.client.api.catalog_services.<node>`</td>
|
<td>`consul.client.api.catalog_services.<node>`</td>
|
||||||
<td>This increments whenever a Consul agent receives a request to list services from the catalog.</td>
|
<td>This increments whenever a Consul agent receives a request to list services from the catalog.</td>
|
||||||
|
@ -135,6 +159,12 @@ These metrics are used to monitor the health of specific Consul agents.
|
||||||
<td>requests</td>
|
<td>requests</td>
|
||||||
<td>counter</td>
|
<td>counter</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>`consul.client.rpc.error.catalog_services.<node>`</td>
|
||||||
|
<td>This increments whenever a Consul agent receives an RPC error for a request to list services.</td>
|
||||||
|
<td>errors</td>
|
||||||
|
<td>counter</td>
|
||||||
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td>`consul.client.api.catalog_service_nodes.<node>`</td>
|
<td>`consul.client.api.catalog_service_nodes.<node>`</td>
|
||||||
<td>This increments whenever a Consul agent receives a request to list nodes offering a service.</td>
|
<td>This increments whenever a Consul agent receives a request to list nodes offering a service.</td>
|
||||||
|
@ -147,6 +177,12 @@ These metrics are used to monitor the health of specific Consul agents.
|
||||||
<td>requests</td>
|
<td>requests</td>
|
||||||
<td>counter</td>
|
<td>counter</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>`consul.client.rpc.error.catalog_service_nodes.<node>`</td>
|
||||||
|
<td>This increments whenever a Consul agent receives an RPC error for a request to list nodes offering a service.</td>
|
||||||
|
<td>errors</td>
|
||||||
|
<td>counter</td>
|
||||||
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td>`consul.client.api.catalog_node_services.<node>`</td>
|
<td>`consul.client.api.catalog_node_services.<node>`</td>
|
||||||
<td>This increments whenever a Consul agent receives a request to list services registered in a node.</td>
|
<td>This increments whenever a Consul agent receives a request to list services registered in a node.</td>
|
||||||
|
@ -159,6 +195,12 @@ These metrics are used to monitor the health of specific Consul agents.
|
||||||
<td>requests</td>
|
<td>requests</td>
|
||||||
<td>counter</td>
|
<td>counter</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>`consul.client.rpc.error.catalog_node_services.<node>`</td>
|
||||||
|
<td>This increments whenever a Consul agent receives an RPC error for a request to list services in a service.</td>
|
||||||
|
<td>errors</td>
|
||||||
|
<td>counter</td>
|
||||||
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td>`consul.runtime.num_goroutines`</td>
|
<td>`consul.runtime.num_goroutines`</td>
|
||||||
<td>This tracks the number of running goroutines and is a general load pressure indicator. This may burst from time to time but should return to a steady state value.</td>
|
<td>This tracks the number of running goroutines and is a general load pressure indicator. This may burst from time to time but should return to a steady state value.</td>
|
||||||
|
@ -443,7 +485,7 @@ These metrics are used to monitor the health of the Consul servers.
|
||||||
<td>timer</td>
|
<td>timer</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td>`consul.prepared-query.execute`</td>
|
<td>`consul.prepared-query.execute_remote`</td>
|
||||||
<td>This measures the time it takes to process a prepared query execute request that was forwarded to another datacenter.</td>
|
<td>This measures the time it takes to process a prepared query execute request that was forwarded to another datacenter.</td>
|
||||||
<td>ms</td>
|
<td>ms</td>
|
||||||
<td>timer</td>
|
<td>timer</td>
|
||||||
|
@ -472,6 +514,12 @@ These metrics are used to monitor the health of the Consul servers.
|
||||||
<td>queries</td>
|
<td>queries</td>
|
||||||
<td>counter</td>
|
<td>counter</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>`consul.rpc.cross-dc`</td>
|
||||||
|
<td>This increments when a server receives a (potentially blocking) cross datacenter RPC query.</td>
|
||||||
|
<td>queries</td>
|
||||||
|
<td>counter</td>
|
||||||
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td>`consul.rpc.consistentRead`</td>
|
<td>`consul.rpc.consistentRead`</td>
|
||||||
<td>This measures the time spent confirming that a consistent read can be performed.</td>
|
<td>This measures the time spent confirming that a consistent read can be performed.</td>
|
||||||
|
|
|
@ -23,7 +23,7 @@ no arguments:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
$ consul
|
$ consul
|
||||||
usage: consul [--version] [--help] <command> [<args>]
|
Usage: consul [--version] [--help] <command> [<args>]
|
||||||
|
|
||||||
Available commands are:
|
Available commands are:
|
||||||
agent Runs a Consul agent
|
agent Runs a Consul agent
|
||||||
|
@ -37,6 +37,7 @@ Available commands are:
|
||||||
keyring Manages gossip layer encryption keys
|
keyring Manages gossip layer encryption keys
|
||||||
kv Interact with the key-value store
|
kv Interact with the key-value store
|
||||||
leave Gracefully leaves the Consul cluster and shuts down
|
leave Gracefully leaves the Consul cluster and shuts down
|
||||||
|
license Get/Put the Consul Enterprise license (Enterprise-only)
|
||||||
lock Execute a command holding a lock
|
lock Execute a command holding a lock
|
||||||
maint Controls node or service maintenance mode
|
maint Controls node or service maintenance mode
|
||||||
members Lists the members of a Consul cluster
|
members Lists the members of a Consul cluster
|
||||||
|
|
|
@ -0,0 +1,109 @@
|
||||||
|
---
|
||||||
|
layout: "docs"
|
||||||
|
page_title: "Commands: License"
|
||||||
|
sidebar_current: "docs-commands-license"
|
||||||
|
description: >
|
||||||
|
The license command provides datacenter-level management of the Consul Enterprise license.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Consul License
|
||||||
|
|
||||||
|
Command: `consul license`
|
||||||
|
|
||||||
|
<%= enterprise_alert :consul %>
|
||||||
|
|
||||||
|
The `license` command provides datacenter-level management of the Consul Enterprise license. This was added in Consul 1.1.0.
|
||||||
|
|
||||||
|
If ACLs are enabled then a token with operator privileges may be required in
|
||||||
|
order to use this command. Requests are forwarded internally to the leader
|
||||||
|
if required, so this can be run from any Consul node in a cluster. See the
|
||||||
|
[ACL Guide](/docs/guides/acl.html#operator) for more information.
|
||||||
|
|
||||||
|
|
||||||
|
```text
|
||||||
|
Usage: consul license <subcommand> [options] [args]
|
||||||
|
|
||||||
|
This command has subcommands for managing the Consul Enterprise license
|
||||||
|
Here are some simple examples, and more detailed examples are
|
||||||
|
available in the subcommands or the documentation.
|
||||||
|
|
||||||
|
Install a new license from a file:
|
||||||
|
|
||||||
|
$ consul license put @consul.license
|
||||||
|
|
||||||
|
Install a new license from stdin:
|
||||||
|
|
||||||
|
$ consul license put -
|
||||||
|
|
||||||
|
Install a new license from a string:
|
||||||
|
|
||||||
|
$ consul license put "<license blob>"
|
||||||
|
|
||||||
|
Retrieve the current license:
|
||||||
|
|
||||||
|
$ consul license get
|
||||||
|
|
||||||
|
For more examples, ask for subcommand help or view the documentation.
|
||||||
|
|
||||||
|
Subcommands:
|
||||||
|
get Get the current license
|
||||||
|
put Puts a new license in the datacenter
|
||||||
|
```
|
||||||
|
|
||||||
|
## put
|
||||||
|
|
||||||
|
This command sets the Consul Enterprise license.
|
||||||
|
|
||||||
|
Usage: `consul license put [options] LICENSE`
|
||||||
|
|
||||||
|
#### API Options
|
||||||
|
|
||||||
|
<%= partial "docs/commands/http_api_options_client" %>
|
||||||
|
<%= partial "docs/commands/http_api_options_server" %>
|
||||||
|
|
||||||
|
The output looks like this:
|
||||||
|
|
||||||
|
```
|
||||||
|
License is valid
|
||||||
|
License ID: 2afbf681-0d1a-0649-cb6c-333ec9f0989c
|
||||||
|
Customer ID: 0259271d-8ffc-e85e-0830-c0822c1f5f2b
|
||||||
|
Expires At: 2019-05-22 03:59:59.999 +0000 UTC
|
||||||
|
Datacenter: *
|
||||||
|
Package: premium
|
||||||
|
Licensed Features:
|
||||||
|
Automated Backups
|
||||||
|
Automated Upgrades
|
||||||
|
Enhanced Read Scalability
|
||||||
|
Network Segments
|
||||||
|
Redundancy Zone
|
||||||
|
Advanced Network Federation
|
||||||
|
```
|
||||||
|
|
||||||
|
## get
|
||||||
|
|
||||||
|
This command gets the Consul Enterprise license.
|
||||||
|
|
||||||
|
Usage: `consul license get [options]`
|
||||||
|
|
||||||
|
#### API Options
|
||||||
|
|
||||||
|
<%= partial "docs/commands/http_api_options_client" %>
|
||||||
|
<%= partial "docs/commands/http_api_options_server" %>
|
||||||
|
|
||||||
|
The output looks like this:
|
||||||
|
|
||||||
|
```
|
||||||
|
License is valid
|
||||||
|
License ID: 2afbf681-0d1a-0649-cb6c-333ec9f0989c
|
||||||
|
Customer ID: 0259271d-8ffc-e85e-0830-c0822c1f5f2b
|
||||||
|
Expires At: 2019-05-22 03:59:59.999 +0000 UTC
|
||||||
|
Datacenter: *
|
||||||
|
Package: premium
|
||||||
|
Licensed Features:
|
||||||
|
Automated Backups
|
||||||
|
Automated Upgrades
|
||||||
|
Enhanced Read Scalability
|
||||||
|
Network Segments
|
||||||
|
Redundancy Zone
|
||||||
|
Advanced Network Federation
|
||||||
|
```
|
|
@ -23,3 +23,14 @@ increases both scalability and resilience. Features include:
|
||||||
|
|
||||||
These features are part of [Consul
|
These features are part of [Consul
|
||||||
Enterprise](https://www.hashicorp.com/consul.html).
|
Enterprise](https://www.hashicorp.com/consul.html).
|
||||||
|
|
||||||
|
## Licensing
|
||||||
|
|
||||||
|
Licensing capabilities were added to Consul Enterprise v1.1.0. The license is set
|
||||||
|
once for a datacenter and will automatically propagate to all nodes within the
|
||||||
|
datacenter over a period of time scaled between 1 and 20 minutes depending on the
|
||||||
|
number of nodes in the datacenter. The license can be set via the
|
||||||
|
[API](/api/operator/license.html) or the [CLI](/docs/commands/license.html). When
|
||||||
|
Consul is first started, a 30 minute temporary license is available to allow for
|
||||||
|
time to license the datacenter. The license should be set within ten minutes of
|
||||||
|
starting the first Consul process to allow time for the license to propagate.
|
||||||
|
|
|
@ -50,7 +50,7 @@ a copy of [`git`](https://www.git-scm.com/) in your `PATH`.
|
||||||
needed to compile Consul:
|
needed to compile Consul:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ make bootstrap
|
$ make tools
|
||||||
```
|
```
|
||||||
|
|
||||||
1. Build Consul for your current system and put the binary in `./bin/`
|
1. Build Consul for your current system and put the binary in `./bin/`
|
||||||
|
|
|
@ -85,6 +85,9 @@ description: |-
|
||||||
<li>
|
<li>
|
||||||
<a href="https://github.com/ryanbreen/git2consul">git2consul</a> - Mirror the contents of a Git repository into Consul KVs
|
<a href="https://github.com/ryanbreen/git2consul">git2consul</a> - Mirror the contents of a Git repository into Consul KVs
|
||||||
</li>
|
</li>
|
||||||
|
<li>
|
||||||
|
<a href="https://github.com/miniclip/gonsul">Gonsul</a> - A Git to Consul standalone tool made in Go. Updates Consul KV from a repo with multiple strategies.
|
||||||
|
</li>
|
||||||
<li>
|
<li>
|
||||||
<a href="https://github.com/amirkibbar/red-apple">gradle-consul-plugin</a> - A Consul Gradle plugin
|
<a href="https://github.com/amirkibbar/red-apple">gradle-consul-plugin</a> - A Consul Gradle plugin
|
||||||
</li>
|
</li>
|
||||||
|
|
|
@ -51,6 +51,9 @@
|
||||||
<li<%= sidebar_current("api-operator-keyring") %>>
|
<li<%= sidebar_current("api-operator-keyring") %>>
|
||||||
<a href="/api/operator/keyring.html">Keyring</a>
|
<a href="/api/operator/keyring.html">Keyring</a>
|
||||||
</li>
|
</li>
|
||||||
|
<li<%= sidebar_current("api-operator-license") %>>
|
||||||
|
<a href="/api/operator/license.html">License</a>
|
||||||
|
</li>
|
||||||
<li<%= sidebar_current("api-operator-raft") %>>
|
<li<%= sidebar_current("api-operator-raft") %>>
|
||||||
<a href="/api/operator/raft.html">Raft</a>
|
<a href="/api/operator/raft.html">Raft</a>
|
||||||
</li>
|
</li>
|
||||||
|
|
|
@ -115,6 +115,9 @@
|
||||||
<li<%= sidebar_current("docs-commands-leave") %>>
|
<li<%= sidebar_current("docs-commands-leave") %>>
|
||||||
<a href="/docs/commands/leave.html">leave</a>
|
<a href="/docs/commands/leave.html">leave</a>
|
||||||
</li>
|
</li>
|
||||||
|
<li<%= sidebar_current("docs-commands-license") %>>
|
||||||
|
<a href="/docs/commands/license.html">license</a>
|
||||||
|
</li>
|
||||||
<li<%= sidebar_current("docs-commands-lock") %>>
|
<li<%= sidebar_current("docs-commands-lock") %>>
|
||||||
<a href="/docs/commands/lock.html">lock</a>
|
<a href="/docs/commands/lock.html">lock</a>
|
||||||
</li>
|
</li>
|
||||||
|
|
Loading…
Reference in New Issue