Merge pull request #1 from hashicorp/master

merging from hashicorp
This commit is contained in:
Eric Connell 2014-08-01 13:04:12 -06:00
commit 0fc55a64e8
50 changed files with 1078 additions and 177 deletions

View file

@ -1,3 +1,31 @@
## 0.3.2 (Unreleased)
IMPROVEMENTS:
* DNS case-insensitivity [GH-189]
## 0.3.1 (July 21, 2014)
FEATURES:
* Improved bootstrapping process, thanks to @robxu9
BUG FIXES:
* Fixed issue with service re-registration [GH-216]
* Fixed handling of `-rejoin` flag
* Restored 0.2 TLS behavior, thanks to @nelhage [GH-233]
* Fix the statsite flags, thanks to @nelhage [GH-243]
* Fixed filters on criticial / non-passing checks [GH-241]
IMPROVEMENTS:
* UI Improvements
* Improved handling of Serf snapshot data
* Increase reliability of failure detector
* More useful logging messages
## 0.3.0 (June 13, 2014)
FEATURES:

View file

@ -30,4 +30,4 @@ web:
web-push:
./scripts/website_push.sh
.PNONY: all cov deps integ test web web-push
.PHONY: all cov deps integ test web web-push

View file

@ -53,8 +53,8 @@
"sudo mkdir /etc/consul.d",
"sudo apt-get update",
"sudo apt-get install unzip make",
"wget https://dl.bintray.com/mitchellh/consul/0.3.0_linux_amd64.zip",
"unzip 0.3.0_linux_amd64.zip",
"wget https://dl.bintray.com/mitchellh/consul/0.3.1_linux_amd64.zip",
"unzip 0.3.1_linux_amd64.zip",
"sudo mv consul /usr/local/bin/consul",
"chmod +x /usr/local/bin/consul"
]

View file

@ -47,8 +47,8 @@
"mkdir /etc/consul.d",
"apt-get update",
"apt-get install unzip make",
"wget https://dl.bintray.com/mitchellh/consul/0.3.0_linux_amd64.zip",
"unzip 0.3.0_linux_amd64.zip",
"wget https://dl.bintray.com/mitchellh/consul/0.3.1_linux_amd64.zip",
"unzip 0.3.1_linux_amd64.zip",
"mv consul /usr/local/bin/consul",
"chmod +x /usr/local/bin/consul"
]

View file

@ -2,15 +2,16 @@ package agent
import (
"fmt"
"github.com/hashicorp/consul/consul"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/serf/serf"
"io"
"log"
"net"
"os"
"strconv"
"sync"
"github.com/hashicorp/consul/consul"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/serf/serf"
)
/*
@ -171,6 +172,12 @@ func (a *Agent) consulConfig() *consul.Config {
if a.config.Bootstrap {
base.Bootstrap = true
}
if a.config.RejoinAfterLeave {
base.RejoinAfterLeave = true
}
if a.config.BootstrapExpect != 0 {
base.BootstrapExpect = a.config.BootstrapExpect
}
if a.config.Protocol > 0 {
base.ProtocolVersion = uint8(a.config.Protocol)
}
@ -393,7 +400,6 @@ func (a *Agent) AddService(service *structs.NodeService, chkType *CheckType) err
ServiceName: service.Service,
}
if err := a.AddCheck(check, chkType); err != nil {
a.state.RemoveService(service.ID)
return err
}
}
@ -429,8 +435,8 @@ func (a *Agent) AddCheck(check *structs.HealthCheck, chkType *CheckType) error {
// Check if already registered
if chkType != nil {
if chkType.IsTTL() {
if _, ok := a.checkTTLs[check.CheckID]; ok {
return fmt.Errorf("CheckID is already registered")
if existing, ok := a.checkTTLs[check.CheckID]; ok {
existing.Stop()
}
ttl := &CheckTTL{
@ -443,8 +449,8 @@ func (a *Agent) AddCheck(check *structs.HealthCheck, chkType *CheckType) error {
a.checkTTLs[check.CheckID] = ttl
} else {
if _, ok := a.checkMonitors[check.CheckID]; ok {
return fmt.Errorf("CheckID is already registered")
if existing, ok := a.checkMonitors[check.CheckID]; ok {
existing.Stop()
}
if chkType.Interval < MinInterval {
a.logger.Println(fmt.Sprintf("[WARN] agent: check '%s' has interval below minimum of %v",

View file

@ -3,10 +3,6 @@ package agent
import (
"flag"
"fmt"
"github.com/armon/go-metrics"
"github.com/hashicorp/go-syslog"
"github.com/hashicorp/logutils"
"github.com/mitchellh/cli"
"io"
"net"
"os"
@ -16,6 +12,11 @@ import (
"strings"
"syscall"
"time"
"github.com/armon/go-metrics"
"github.com/hashicorp/go-syslog"
"github.com/hashicorp/logutils"
"github.com/mitchellh/cli"
)
// gracefulTimeout controls how long we wait before forcefully terminating
@ -62,6 +63,7 @@ func (c *Command) readConfig() *Config {
cmdFlags.BoolVar(&cmdConfig.Server, "server", false, "run agent as server")
cmdFlags.BoolVar(&cmdConfig.Bootstrap, "bootstrap", false, "enable server bootstrap mode")
cmdFlags.IntVar(&cmdConfig.BootstrapExpect, "bootstrap-expect", 0, "enable automatic bootstrap via expect mode")
cmdFlags.StringVar(&cmdConfig.ClientAddr, "client", "", "address to bind client listeners to (DNS, HTTP, RPC)")
cmdFlags.StringVar(&cmdConfig.BindAddr, "bind", "", "address to bind server listeners to")
@ -127,6 +129,27 @@ func (c *Command) readConfig() *Config {
return nil
}
// Expect can only work when acting as a server
if config.BootstrapExpect != 0 && !config.Server {
c.Ui.Error("Expect mode cannot be enabled when server mode is not enabled")
return nil
}
// Expect & Bootstrap are mutually exclusive
if config.BootstrapExpect != 0 && config.Bootstrap {
c.Ui.Error("Bootstrap cannot be provided with an expected server count")
return nil
}
// Warn if we are in expect mode
if config.BootstrapExpect == 1 {
c.Ui.Error("WARNING: BootstrapExpect Mode is specified as 1; this is the same as Bootstrap mode.")
config.BootstrapExpect = 0
config.Bootstrap = true
} else if config.BootstrapExpect > 0 {
c.Ui.Error(fmt.Sprintf("WARNING: Expect Mode enabled, expecting %d servers", config.BootstrapExpect))
}
// Warn if we are in bootstrap mode
if config.Bootstrap {
c.Ui.Error("WARNING: Bootstrap mode enabled! Do not enable unless necessary")
@ -514,6 +537,7 @@ Options:
-advertise=addr Sets the advertise address to use
-bootstrap Sets server to bootstrap mode
-bind=0.0.0.0 Sets the bind address for cluster communication
-bootstrap-expect=0 Sets server to expect bootstrap mode.
-client=127.0.0.1 Sets the address to bind for client access.
This includes RPC, DNS and HTTP
-config-file=foo Path to a JSON file to read configuration from.

View file

@ -4,8 +4,6 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
"github.com/hashicorp/consul/consul"
"github.com/mitchellh/mapstructure"
"io"
"net"
"os"
@ -13,6 +11,9 @@ import (
"sort"
"strings"
"time"
"github.com/hashicorp/consul/consul"
"github.com/mitchellh/mapstructure"
)
// Ports is used to simplify the configuration by
@ -64,6 +65,10 @@ type Config struct {
// permits that node to elect itself leader
Bootstrap bool `mapstructure:"bootstrap"`
// BootstrapExpect tries to automatically bootstrap the Consul cluster,
// by witholding peers until enough servers join.
BootstrapExpect int `mapstructure:"bootstrap_expect"`
// Server controls if this agent acts like a Consul server,
// or merely as a client. Servers have more state, take part
// in leader election, etc.
@ -218,13 +223,14 @@ type dirEnts []os.FileInfo
// DefaultConfig is used to return a sane default configuration
func DefaultConfig() *Config {
return &Config{
Bootstrap: false,
Server: false,
Datacenter: consul.DefaultDC,
Domain: "consul.",
LogLevel: "INFO",
ClientAddr: "127.0.0.1",
BindAddr: "0.0.0.0",
Bootstrap: false,
BootstrapExpect: 0,
Server: false,
Datacenter: consul.DefaultDC,
Domain: "consul.",
LogLevel: "INFO",
ClientAddr: "127.0.0.1",
BindAddr: "0.0.0.0",
Ports: PortConfig{
DNS: 8600,
HTTP: 8500,
@ -449,6 +455,9 @@ func MergeConfig(a, b *Config) *Config {
if b.Bootstrap {
result.Bootstrap = true
}
if b.BootstrapExpect != 0 {
result.BootstrapExpect = b.BootstrapExpect
}
if b.Datacenter != "" {
result.Datacenter = b.Datacenter
}
@ -491,6 +500,9 @@ func MergeConfig(a, b *Config) *Config {
if b.SkipLeaveOnInt == true {
result.SkipLeaveOnInt = true
}
if b.StatsiteAddr != "" {
result.StatsiteAddr = b.StatsiteAddr
}
if b.EnableDebug {
result.EnableDebug = true
}

View file

@ -93,6 +93,21 @@ func TestDecodeConfig(t *testing.T) {
t.Fatalf("bad: %#v", config)
}
// Expect bootstrap
input = `{"server": true, "bootstrap_expect": 3}`
config, err = DecodeConfig(bytes.NewReader([]byte(input)))
if err != nil {
t.Fatalf("err: %s", err)
}
if !config.Server {
t.Fatalf("bad: %#v", config)
}
if config.BootstrapExpect != 3 {
t.Fatalf("bad: %#v", config)
}
// DNS setup
input = `{"ports": {"dns": 8500}, "recursor": "8.8.8.8", "domain": "foobar"}`
config, err = DecodeConfig(bytes.NewReader([]byte(input)))
@ -426,6 +441,7 @@ func TestDecodeConfig_Check(t *testing.T) {
func TestMergeConfig(t *testing.T) {
a := &Config{
Bootstrap: false,
BootstrapExpect: 0,
Datacenter: "dc1",
DataDir: "/tmp/foo",
DNSRecursor: "127.0.0.1:1001",
@ -443,10 +459,11 @@ func TestMergeConfig(t *testing.T) {
}
b := &Config{
Bootstrap: true,
Datacenter: "dc2",
DataDir: "/tmp/bar",
DNSRecursor: "127.0.0.2:1001",
Bootstrap: true,
BootstrapExpect: 3,
Datacenter: "dc2",
DataDir: "/tmp/bar",
DNSRecursor: "127.0.0.2:1001",
DNSConfig: DNSConfig{
NodeTTL: 10 * time.Second,
ServiceTTL: map[string]time.Duration{

View file

@ -84,14 +84,14 @@ func NewDNSServer(agent *Agent, config *DNSConfig, logOutput io.Writer, domain,
go func() {
err := server.ListenAndServe()
srv.logger.Printf("[ERR] dns: error starting udp server: %v", err)
errCh <- err
errCh <- fmt.Errorf("dns udp setup failed: %v", err)
}()
errChTCP := make(chan error, 1)
go func() {
err := serverTCP.ListenAndServe()
srv.logger.Printf("[ERR] dns: error starting tcp server: %v", err)
errChTCP <- err
errChTCP <- fmt.Errorf("dns tcp setup failed: %v", err)
}()
// Check the server is running, do a test lookup
@ -107,7 +107,7 @@ func NewDNSServer(agent *Agent, config *DNSConfig, logOutput io.Writer, domain,
c := new(dns.Client)
in, _, err := c.Exchange(m, bind)
if err != nil {
checkCh <- err
checkCh <- fmt.Errorf("dns test query failed: %v", err)
return
}
@ -248,7 +248,7 @@ func (d *DNSServer) dispatch(network string, req, resp *dns.Msg) {
datacenter := d.agent.config.Datacenter
// Get the QName without the domain suffix
qName := dns.Fqdn(req.Question[0].Name)
qName := strings.ToLower(dns.Fqdn(req.Question[0].Name))
qName = strings.TrimSuffix(qName, d.domain)
// Split into the label parts
@ -471,6 +471,7 @@ RPC:
// health checks to prevent routing to unhealthy nodes
func (d *DNSServer) filterServiceNodes(nodes structs.CheckServiceNodes) structs.CheckServiceNodes {
n := len(nodes)
OUTER:
for i := 0; i < n; i++ {
node := nodes[i]
for _, check := range node.Checks {
@ -480,6 +481,7 @@ func (d *DNSServer) filterServiceNodes(nodes structs.CheckServiceNodes) structs.
nodes[i], nodes[n-1] = nodes[n-1], structs.CheckServiceNode{}
n--
i--
continue OUTER
}
}
}

View file

@ -136,6 +136,40 @@ func TestDNS_NodeLookup(t *testing.T) {
}
}
func TestDNS_CaseInsensitiveNodeLookup(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "Foo",
Address: "127.0.0.1",
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("fOO.node.dc1.consul.", dns.TypeANY)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener(srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("empty lookup: %#v", in)
}
}
func TestDNS_NodeLookup_PeriodName(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
@ -336,6 +370,45 @@ func TestDNS_ServiceLookup(t *testing.T) {
}
}
func TestDNS_CaseInsensitiveServiceLookup(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
defer srv.agent.Shutdown()
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
// Register node
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "Db",
Tags: []string{"Master"},
Port: 12345,
},
}
var out struct{}
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("mASTER.dB.service.consul.", dns.TypeSRV)
c := new(dns.Client)
addr, _ := srv.agent.config.ClientListener(srv.agent.config.Ports.DNS)
in, _, err := c.Exchange(m, addr.String())
if err != nil {
t.Fatalf("err: %v", err)
}
if len(in.Answer) != 1 {
t.Fatalf("empty lookup: %#v", in)
}
}
func TestDNS_ServiceLookup_TagPeriod(t *testing.T) {
dir, srv := makeDNSServer(t)
defer os.RemoveAll(dir)
@ -651,6 +724,40 @@ func TestDNS_ServiceLookup_FilterCritical(t *testing.T) {
t.Fatalf("err: %v", err)
}
args3 := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
Check: &structs.HealthCheck{
CheckID: "db",
Name: "db",
ServiceID: "db",
Status: structs.HealthCritical,
},
}
if err := srv.agent.RPC("Catalog.Register", args3, &out); err != nil {
t.Fatalf("err: %v", err)
}
args4 := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "baz",
Address: "127.0.0.3",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"master"},
Port: 12345,
},
}
if err := srv.agent.RPC("Catalog.Register", args4, &out); err != nil {
t.Fatalf("err: %v", err)
}
m := new(dns.Msg)
m.SetQuestion("db.service.consul.", dns.TypeANY)
@ -662,9 +769,15 @@ func TestDNS_ServiceLookup_FilterCritical(t *testing.T) {
}
// Should get no answer since we are failing!
if len(in.Answer) != 0 {
if len(in.Answer) != 1 {
t.Fatalf("Bad: %#v", in)
}
resp := in.Answer[0]
aRec := resp.(*dns.A)
if aRec.A.String() != "127.0.0.3" {
t.Fatalf("Bad: %#v", in.Answer[0])
}
}
func TestDNS_ServiceLookup_Randomize(t *testing.T) {

View file

@ -117,6 +117,7 @@ func (s *HTTPServer) HealthServiceNodes(resp http.ResponseWriter, req *http.Requ
// filterNonPassing is used to filter out any nodes that have check that are not passing
func filterNonPassing(nodes structs.CheckServiceNodes) structs.CheckServiceNodes {
n := len(nodes)
OUTER:
for i := 0; i < n; i++ {
node := nodes[i]
for _, check := range node.Checks {
@ -124,6 +125,7 @@ func filterNonPassing(nodes structs.CheckServiceNodes) structs.CheckServiceNodes
nodes[i], nodes[n-1] = nodes[n-1], structs.CheckServiceNode{}
n--
i--
continue OUTER
}
}
}

View file

@ -7,6 +7,7 @@ import (
"net/http"
"net/http/httptest"
"os"
"reflect"
"testing"
)
@ -182,3 +183,39 @@ func TestHealthServiceNodes_PassingFilter(t *testing.T) {
t.Fatalf("bad: %v", obj)
}
}
func TestFilterNonPassing(t *testing.T) {
nodes := structs.CheckServiceNodes{
structs.CheckServiceNode{
Checks: structs.HealthChecks{
&structs.HealthCheck{
Status: structs.HealthCritical,
},
&structs.HealthCheck{
Status: structs.HealthCritical,
},
},
},
structs.CheckServiceNode{
Checks: structs.HealthChecks{
&structs.HealthCheck{
Status: structs.HealthCritical,
},
&structs.HealthCheck{
Status: structs.HealthCritical,
},
},
},
structs.CheckServiceNode{
Checks: structs.HealthChecks{
&structs.HealthCheck{
Status: structs.HealthPassing,
},
},
},
}
out := filterNonPassing(nodes)
if len(out) != 1 && reflect.DeepEqual(out[0], nodes[2]) {
t.Fatalf("bad: %v", out)
}
}

View file

@ -220,13 +220,13 @@ func TestCatalogListNodes(t *testing.T) {
})
// Server node is auto added from Serf
if out.Nodes[0].Node != s1.config.NodeName {
if out.Nodes[1].Node != s1.config.NodeName {
t.Fatalf("bad: %v", out)
}
if out.Nodes[1].Node != "foo" {
if out.Nodes[0].Node != "foo" {
t.Fatalf("bad: %v", out)
}
if out.Nodes[1].Address != "127.0.0.1" {
if out.Nodes[0].Address != "127.0.0.1" {
t.Fatalf("bad: %v", out)
}
}

View file

@ -88,10 +88,8 @@ func NewClient(config *Config) (*Client, error) {
// Create the tlsConfig
var tlsConfig *tls.Config
var err error
if config.VerifyOutgoing {
if tlsConfig, err = config.OutgoingTLSConfig(); err != nil {
return nil, err
}
if tlsConfig, err = config.OutgoingTLSConfig(); err != nil {
return nil, err
}
// Create a logger

View file

@ -44,6 +44,11 @@ type Config struct {
// other nodes being present
Bootstrap bool
// BootstrapExpect mode is used to automatically bring up a collection of
// Consul servers. This can be used to automatically bring up a collection
// of nodes.
BootstrapExpect int
// Datacenter is the datacenter this Consul server represents
Datacenter string
@ -172,16 +177,21 @@ func (c *Config) KeyPair() (*tls.Certificate, error) {
return &cert, err
}
// OutgoingTLSConfig generates a TLS configuration for outgoing requests
// OutgoingTLSConfig generates a TLS configuration for outgoing
// requests. It will return a nil config if this configuration should
// not use TLS for outgoing connections.
func (c *Config) OutgoingTLSConfig() (*tls.Config, error) {
if !c.VerifyOutgoing {
return nil, nil
}
// Create the tlsConfig
tlsConfig := &tls.Config{
ServerName: c.ServerName,
RootCAs: x509.NewCertPool(),
InsecureSkipVerify: !c.VerifyOutgoing,
InsecureSkipVerify: true,
}
if tlsConfig.ServerName == "" {
tlsConfig.ServerName = c.NodeName
if c.ServerName != "" {
tlsConfig.ServerName = c.ServerName
tlsConfig.InsecureSkipVerify = false
}
// Ensure we have a CA if VerifyOutgoing is set
@ -206,6 +216,59 @@ func (c *Config) OutgoingTLSConfig() (*tls.Config, error) {
return tlsConfig, nil
}
// Wrap a net.Conn into a client tls connection, performing any
// additional verification as needed.
//
// As of go 1.3, crypto/tls only supports either doing no certificate
// verification, or doing full verification including of the peer's
// DNS name. For consul, we want to validate that the certificate is
// signed by a known CA, but because consul doesn't use DNS names for
// node names, we don't verify the certificate DNS names. Since go 1.3
// no longer supports this mode of operation, we have to do it
// manually.
func wrapTLSClient(conn net.Conn, tlsConfig *tls.Config) (net.Conn, error) {
var err error
var tlsConn *tls.Conn
tlsConn = tls.Client(conn, tlsConfig)
// If crypto/tls is doing verification, there's no need to do
// our own.
if tlsConfig.InsecureSkipVerify == false {
return tlsConn, nil
}
if err = tlsConn.Handshake(); err != nil {
tlsConn.Close()
return nil, err
}
// The following is lightly-modified from the doFullHandshake
// method in crypto/tls's handshake_client.go.
opts := x509.VerifyOptions{
Roots: tlsConfig.RootCAs,
CurrentTime: time.Now(),
DNSName: "",
Intermediates: x509.NewCertPool(),
}
certs := tlsConn.ConnectionState().PeerCertificates
for i, cert := range certs {
if i == 0 {
continue
}
opts.Intermediates.AddCert(cert)
}
_, err = certs[0].Verify(opts)
if err != nil {
tlsConn.Close()
return nil, err
}
return tlsConn, err
}
// IncomingTLSConfig generates a TLS configuration for incoming requests
func (c *Config) IncomingTLSConfig() (*tls.Config, error) {
// Create the tlsConfig

View file

@ -3,6 +3,9 @@ package consul
import (
"crypto/tls"
"crypto/x509"
"io"
"io/ioutil"
"net"
"testing"
)
@ -78,14 +81,8 @@ func TestConfig_OutgoingTLS_OnlyCA(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
if tls == nil {
t.Fatalf("expected config")
}
if len(tls.RootCAs.Subjects()) != 1 {
t.Fatalf("expect root cert")
}
if !tls.InsecureSkipVerify {
t.Fatalf("expect to skip verification")
if tls != nil {
t.Fatalf("expected no config")
}
}
@ -104,8 +101,35 @@ func TestConfig_OutgoingTLS_VerifyOutgoing(t *testing.T) {
if len(tls.RootCAs.Subjects()) != 1 {
t.Fatalf("expect root cert")
}
if tls.ServerName != "" {
t.Fatalf("expect no server name verification")
}
if !tls.InsecureSkipVerify {
t.Fatalf("should skip built-in verification")
}
}
func TestConfig_OutgoingTLS_ServerName(t *testing.T) {
conf := &Config{
VerifyOutgoing: true,
CAFile: "../test/ca/root.cer",
ServerName: "consul.example.com",
}
tls, err := conf.OutgoingTLSConfig()
if err != nil {
t.Fatalf("err: %v", err)
}
if tls == nil {
t.Fatalf("expected config")
}
if len(tls.RootCAs.Subjects()) != 1 {
t.Fatalf("expect root cert")
}
if tls.ServerName != "consul.example.com" {
t.Fatalf("expect server name")
}
if tls.InsecureSkipVerify {
t.Fatalf("should not skip verification")
t.Fatalf("should not skip built-in verification")
}
}
@ -126,14 +150,107 @@ func TestConfig_OutgoingTLS_WithKeyPair(t *testing.T) {
if len(tls.RootCAs.Subjects()) != 1 {
t.Fatalf("expect root cert")
}
if tls.InsecureSkipVerify {
t.Fatalf("should not skip verification")
if !tls.InsecureSkipVerify {
t.Fatalf("should skip verification")
}
if len(tls.Certificates) != 1 {
t.Fatalf("expected client cert")
}
}
func startTLSServer(config *Config) (net.Conn, chan error) {
errc := make(chan error, 1)
tlsConfigServer, err := config.IncomingTLSConfig()
if err != nil {
errc <- err
return nil, errc
}
client, server := net.Pipe()
go func() {
tlsServer := tls.Server(server, tlsConfigServer)
if err := tlsServer.Handshake(); err != nil {
errc <- err
}
close(errc)
// Because net.Pipe() is unbuffered, if both sides
// Close() simultaneously, we will deadlock as they
// both send an alert and then block. So we make the
// server read any data from the client until error or
// EOF, which will allow the client to Close(), and
// *then* we Close() the server.
io.Copy(ioutil.Discard, tlsServer)
tlsServer.Close()
}()
return client, errc
}
func TestConfig_wrapTLS_OK(t *testing.T) {
config := &Config{
CAFile: "../test/ca/root.cer",
CertFile: "../test/key/ourdomain.cer",
KeyFile: "../test/key/ourdomain.key",
VerifyOutgoing: true,
}
client, errc := startTLSServer(config)
if client == nil {
t.Fatalf("startTLSServer err: %v", <-errc)
}
clientConfig, err := config.OutgoingTLSConfig()
if err != nil {
t.Fatalf("OutgoingTLSConfig err: %v", err)
}
tlsClient, err := wrapTLSClient(client, clientConfig)
if err != nil {
t.Fatalf("wrapTLS err: %v", err)
} else {
tlsClient.Close()
}
err = <-errc
if err != nil {
t.Fatalf("server: %v", err)
}
}
func TestConfig_wrapTLS_BadCert(t *testing.T) {
serverConfig := &Config{
CertFile: "../test/key/ssl-cert-snakeoil.pem",
KeyFile: "../test/key/ssl-cert-snakeoil.key",
}
client, errc := startTLSServer(serverConfig)
if client == nil {
t.Fatalf("startTLSServer err: %v", <-errc)
}
clientConfig := &Config{
CAFile: "../test/ca/root.cer",
VerifyOutgoing: true,
}
clientTLSConfig, err := clientConfig.OutgoingTLSConfig()
if err != nil {
t.Fatalf("OutgoingTLSConfig err: %v", err)
}
tlsClient, err := wrapTLSClient(client, clientTLSConfig)
if err == nil {
t.Fatalf("wrapTLS no err")
}
if tlsClient != nil {
t.Fatalf("returned a client")
}
err = <-errc
if err != nil {
t.Fatalf("server: %v", err)
}
}
func TestConfig_IncomingTLS(t *testing.T) {
conf := &Config{
VerifyIncoming: true,

View file

@ -1,13 +1,14 @@
package consul
import (
"net"
"strconv"
"time"
"github.com/armon/go-metrics"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
"net"
"strconv"
"time"
)
const (

View file

@ -45,12 +45,13 @@ type MDBTables []*MDBTable
// An Index is named, and uses a series of column values to
// map to the row-id containing the table
type MDBIndex struct {
AllowBlank bool // Can fields be blank
Unique bool // Controls if values are unique
Fields []string // Fields are used to build the index
IdxFunc IndexFunc // Can be used to provide custom indexing
Virtual bool // Virtual index does not exist, but can be used for queries
RealIndex string // Virtual indexes use a RealIndex for iteration
AllowBlank bool // Can fields be blank
Unique bool // Controls if values are unique
Fields []string // Fields are used to build the index
IdxFunc IndexFunc // Can be used to provide custom indexing
Virtual bool // Virtual index does not exist, but can be used for queries
RealIndex string // Virtual indexes use a RealIndex for iteration
CaseInsensitive bool // Controls if values are case-insensitive
table *MDBTable
name string
@ -426,6 +427,10 @@ func (t *MDBTable) getIndex(index string, parts []string) (*MDBIndex, []byte, er
return nil, nil, tooManyFields
}
if idx.CaseInsensitive {
parts = ToLowerList(parts)
}
// Construct the key
key := idx.keyFromParts(parts...)
return idx, key, nil
@ -613,6 +618,9 @@ func (i *MDBIndex) keyFromObject(obj interface{}) ([]byte, error) {
if !i.AllowBlank && val == "" {
return nil, fmt.Errorf("Field '%s' must be set: %#v", field, obj)
}
if i.CaseInsensitive {
val = strings.ToLower(val)
}
parts = append(parts, val)
}
key := i.keyFromParts(parts...)

View file

@ -221,7 +221,11 @@ func (p *ConnPool) getNewConn(addr net.Addr, version int) (*Conn, error) {
}
// Wrap the connection in a TLS client
conn = tls.Client(conn, p.tlsConfig)
conn, err = wrapTLSClient(conn, p.tlsConfig)
if err != nil {
conn.Close()
return nil, err
}
}
// Switch the multiplexing based on version

View file

@ -94,7 +94,10 @@ func (l *RaftLayer) Dial(address string, timeout time.Duration) (net.Conn, error
}
// Wrap the connection in a TLS client
conn = tls.Client(conn, l.tlsConfig)
conn, err = wrapTLSClient(conn, l.tlsConfig)
if err != nil {
return nil, err
}
}
// Write the Raft byte to set the mode

View file

@ -1,8 +1,10 @@
package consul
import (
"github.com/hashicorp/serf/serf"
"net"
"strings"
"github.com/hashicorp/serf/serf"
)
const (
@ -144,14 +146,72 @@ func (s *Server) nodeJoin(me serf.MemberEvent, wan bool) {
s.remoteLock.Unlock()
// Add to the local list as well
if !wan {
if !wan && parts.Datacenter == s.config.Datacenter {
s.localLock.Lock()
s.localConsuls[parts.Addr.String()] = parts
s.localLock.Unlock()
}
// If we still expecting to bootstrap, may need to handle this
if s.config.BootstrapExpect != 0 {
s.maybeBootstrap()
}
}
}
// maybeBootsrap is used to handle bootstrapping when a new consul server joins
func (s *Server) maybeBootstrap() {
index, err := s.raftStore.LastIndex()
if err != nil {
s.logger.Printf("[ERR] consul: failed to read last raft index: %v", err)
return
}
// Bootstrap can only be done if there are no committed logs,
// remove our expectations of bootstrapping
if index != 0 {
s.config.BootstrapExpect = 0
return
}
// Scan for all the known servers
members := s.serfLAN.Members()
addrs := make([]net.Addr, 0)
for _, member := range members {
valid, p := isConsulServer(member)
if !valid {
continue
}
if p.Datacenter != s.config.Datacenter {
s.logger.Printf("[ERR] consul: Member %v has a conflicting datacenter, ignoring", member)
continue
}
if p.Expect != 0 && p.Expect != s.config.BootstrapExpect {
s.logger.Printf("[ERR] consul: Member %v has a conflicting expect value. All nodes should expect the same number.", member)
return
}
if p.Bootstrap {
s.logger.Printf("[ERR] consul: Member %v has bootstrap mode. Expect disabled.", member)
return
}
addrs = append(addrs, &net.TCPAddr{IP: member.Addr, Port: p.Port})
}
// Skip if we haven't met the minimum expect count
if len(addrs) < s.config.BootstrapExpect {
return
}
// Update the peer set
s.logger.Printf("[INFO] consul: Attempting bootstrap with nodes: %v", addrs)
if err := s.raft.SetPeers(addrs).Error(); err != nil {
s.logger.Printf("[ERR] consul: failed to bootstrap peers: %v", err)
}
// Bootstrapping comlete, don't enter this again
s.config.BootstrapExpect = 0
}
// nodeFailed is used to handle fail events on both the serf clustes
func (s *Server) nodeFailed(me serf.MemberEvent, wan bool) {
for _, m := range me.Members {

View file

@ -4,9 +4,6 @@ import (
"crypto/tls"
"errors"
"fmt"
"github.com/hashicorp/raft"
"github.com/hashicorp/raft-mdb"
"github.com/hashicorp/serf/serf"
"log"
"net"
"net/rpc"
@ -17,6 +14,10 @@ import (
"strconv"
"sync"
"time"
"github.com/hashicorp/raft"
"github.com/hashicorp/raft-mdb"
"github.com/hashicorp/serf/serf"
)
// These are the protocol versions that Consul can _understand_. These are
@ -145,12 +146,9 @@ func NewServer(config *Config) (*Server, error) {
}
// Create the tlsConfig for outgoing connections
var tlsConfig *tls.Config
var err error
if config.VerifyOutgoing {
if tlsConfig, err = config.OutgoingTLSConfig(); err != nil {
return nil, err
}
tlsConfig, err := config.OutgoingTLSConfig()
if err != nil {
return nil, err
}
// Get the incoming tls config
@ -189,10 +187,6 @@ func NewServer(config *Config) (*Server, error) {
return nil, fmt.Errorf("Failed to start Raft: %v", err)
}
// Start the Serf listeners to prevent a deadlock
go s.lanEventHandler()
go s.wanEventHandler()
// Initialize the lan Serf
s.serfLAN, err = s.setupSerf(config.SerfLANConfig,
s.eventChLAN, serfLANSnapshot, false)
@ -200,6 +194,7 @@ func NewServer(config *Config) (*Server, error) {
s.Shutdown()
return nil, fmt.Errorf("Failed to start lan serf: %v", err)
}
go s.lanEventHandler()
// Initialize the wan Serf
s.serfWAN, err = s.setupSerf(config.SerfWANConfig,
@ -208,6 +203,7 @@ func NewServer(config *Config) (*Server, error) {
s.Shutdown()
return nil, fmt.Errorf("Failed to start wan serf: %v", err)
}
go s.wanEventHandler()
// Start listening for RPC requests
go s.listen()
@ -233,6 +229,9 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string, w
if s.config.Bootstrap {
conf.Tags["bootstrap"] = "1"
}
if s.config.BootstrapExpect != 0 {
conf.Tags["expect"] = fmt.Sprintf("%d", s.config.BootstrapExpect)
}
conf.MemberlistConfig.LogOutput = s.config.LogOutput
conf.LogOutput = s.config.LogOutput
conf.EventCh = ch

View file

@ -3,12 +3,13 @@ package consul
import (
"errors"
"fmt"
"github.com/hashicorp/consul/testutil"
"io/ioutil"
"net"
"os"
"testing"
"time"
"github.com/hashicorp/consul/testutil"
)
var nextPort = 15000
@ -87,6 +88,19 @@ func testServerDCBootstrap(t *testing.T, dc string, bootstrap bool) (string, *Se
return dir, server
}
func testServerDCExpect(t *testing.T, dc string, expect int) (string, *Server) {
name := fmt.Sprintf("Node %d", getPort())
dir, config := testServerConfig(t, name)
config.Datacenter = dc
config.Bootstrap = false
config.BootstrapExpect = expect
server, err := NewServer(config)
if err != nil {
t.Fatalf("err: %v", err)
}
return dir, server
}
func TestServer_StartStop(t *testing.T) {
dir := tmpDir(t)
defer os.RemoveAll(dir)
@ -304,3 +318,147 @@ func TestServer_JoinLAN_TLS(t *testing.T) {
t.Fatalf("no peer established")
})
}
func TestServer_Expect(t *testing.T) {
// all test servers should be in expect=3 mode
dir1, s1 := testServerDCExpect(t, "dc1", 3)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
dir2, s2 := testServerDCExpect(t, "dc1", 3)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
dir3, s3 := testServerDCExpect(t, "dc1", 0)
defer os.RemoveAll(dir3)
defer s3.Shutdown()
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
var p1 []net.Addr
var p2 []net.Addr
// should have no peers yet
testutil.WaitForResult(func() (bool, error) {
p1, _ = s1.raftPeers.Peers()
return len(p1) == 0, errors.New(fmt.Sprintf("%v", p1))
}, func(err error) {
t.Fatalf("should have 0 peers: %v", err)
})
testutil.WaitForResult(func() (bool, error) {
p2, _ = s2.raftPeers.Peers()
return len(p2) == 0, errors.New(fmt.Sprintf("%v", p2))
}, func(err error) {
t.Fatalf("should have 0 peers: %v", err)
})
// join the third node
if _, err := s3.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
var p3 []net.Addr
// should now have all three peers
testutil.WaitForResult(func() (bool, error) {
p1, _ = s1.raftPeers.Peers()
return len(p1) == 3, errors.New(fmt.Sprintf("%v", p1))
}, func(err error) {
t.Fatalf("should have 3 peers: %v", err)
})
testutil.WaitForResult(func() (bool, error) {
p2, _ = s2.raftPeers.Peers()
return len(p2) == 3, errors.New(fmt.Sprintf("%v", p2))
}, func(err error) {
t.Fatalf("should have 3 peers: %v", err)
})
testutil.WaitForResult(func() (bool, error) {
p3, _ = s3.raftPeers.Peers()
return len(p3) == 3, errors.New(fmt.Sprintf("%v", p3))
}, func(err error) {
t.Fatalf("should have 3 peers: %v", err)
})
// check if there is one leader now
testutil.WaitForLeader(t, s1.RPC, "dc1")
}
func TestServer_BadExpect(t *testing.T) {
// this one is in expect=3 mode
dir1, s1 := testServerDCExpect(t, "dc1", 3)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
// this one is in expect=2 mode
dir2, s2 := testServerDCExpect(t, "dc1", 2)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// and this one is in expect=3 mode
dir3, s3 := testServerDCExpect(t, "dc1", 3)
defer os.RemoveAll(dir3)
defer s3.Shutdown()
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
var p1 []net.Addr
var p2 []net.Addr
// should have no peers yet
testutil.WaitForResult(func() (bool, error) {
p1, _ = s1.raftPeers.Peers()
return len(p1) == 0, errors.New(fmt.Sprintf("%v", p1))
}, func(err error) {
t.Fatalf("should have 0 peers: %v", err)
})
testutil.WaitForResult(func() (bool, error) {
p2, _ = s2.raftPeers.Peers()
return len(p2) == 0, errors.New(fmt.Sprintf("%v", p2))
}, func(err error) {
t.Fatalf("should have 0 peers: %v", err)
})
// join the third node
if _, err := s3.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
var p3 []net.Addr
// should still have no peers (because s2 is in expect=2 mode)
testutil.WaitForResult(func() (bool, error) {
p1, _ = s1.raftPeers.Peers()
return len(p1) == 0, errors.New(fmt.Sprintf("%v", p1))
}, func(err error) {
t.Fatalf("should have 0 peers: %v", err)
})
testutil.WaitForResult(func() (bool, error) {
p2, _ = s2.raftPeers.Peers()
return len(p2) == 0, errors.New(fmt.Sprintf("%v", p2))
}, func(err error) {
t.Fatalf("should have 0 peers: %v", err)
})
testutil.WaitForResult(func() (bool, error) {
p3, _ = s3.raftPeers.Peers()
return len(p3) == 0, errors.New(fmt.Sprintf("%v", p3))
}, func(err error) {
t.Fatalf("should have 0 peers: %v", err)
})
}

View file

@ -177,8 +177,9 @@ func (s *StateStore) initialize() error {
Name: dbNodes,
Indexes: map[string]*MDBIndex{
"id": &MDBIndex{
Unique: true,
Fields: []string{"Node"},
Unique: true,
Fields: []string{"Node"},
CaseInsensitive: true,
},
},
Decoder: func(buf []byte) interface{} {
@ -198,8 +199,9 @@ func (s *StateStore) initialize() error {
Fields: []string{"Node", "ServiceID"},
},
"service": &MDBIndex{
AllowBlank: true,
Fields: []string{"ServiceName"},
AllowBlank: true,
Fields: []string{"ServiceName"},
CaseInsensitive: true,
},
},
Decoder: func(buf []byte) interface{} {
@ -640,7 +642,7 @@ func serviceTagFilter(l []interface{}, tag string) []interface{} {
n := len(l)
for i := 0; i < n; i++ {
srv := l[i].(*structs.ServiceNode)
if !strContains(srv.ServiceTags, tag) {
if !strContains(ToLowerList(srv.ServiceTags), strings.ToLower(tag)) {
l[i], l[n-1] = l[n-1], nil
i--
n--

View file

@ -4,12 +4,14 @@ import (
crand "crypto/rand"
"encoding/binary"
"fmt"
"github.com/hashicorp/serf/serf"
"net"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"github.com/hashicorp/serf/serf"
)
/*
@ -26,6 +28,7 @@ type serverParts struct {
Datacenter string
Port int
Bootstrap bool
Expect int
Version int
Addr net.Addr
}
@ -66,6 +69,14 @@ func strContains(l []string, s string) bool {
return false
}
func ToLowerList(l []string) []string {
var out []string
for _, value := range l {
out = append(out, strings.ToLower(value))
}
return out
}
// ensurePath is used to make sure a path exists
func ensurePath(path string, dir bool) error {
if !dir {
@ -84,6 +95,16 @@ func isConsulServer(m serf.Member) (bool, *serverParts) {
datacenter := m.Tags["dc"]
_, bootstrap := m.Tags["bootstrap"]
expect := 0
expect_str, ok := m.Tags["expect"]
var err error
if ok {
expect, err = strconv.Atoi(expect_str)
if err != nil {
return false, nil
}
}
port_str := m.Tags["port"]
port, err := strconv.Atoi(port_str)
if err != nil {
@ -103,6 +124,7 @@ func isConsulServer(m serf.Member) (bool, *serverParts) {
Datacenter: datacenter,
Port: port,
Bootstrap: bootstrap,
Expect: expect,
Addr: addr,
Version: vsn,
}

View file

@ -1,10 +1,11 @@
package consul
import (
"github.com/hashicorp/serf/serf"
"net"
"regexp"
"testing"
"github.com/hashicorp/serf/serf"
)
func TestStrContains(t *testing.T) {
@ -17,6 +18,15 @@ func TestStrContains(t *testing.T) {
}
}
func TestToLowerList(t *testing.T) {
l := []string{"ABC", "Abc", "abc"}
for _, value := range ToLowerList(l) {
if value != "abc" {
t.Fatalf("failed lowercasing")
}
}
}
func TestIsPrivateIP(t *testing.T) {
if !isPrivateIP("192.168.1.1") {
t.Fatalf("bad")
@ -56,6 +66,9 @@ func TestIsConsulServer(t *testing.T) {
if parts.Bootstrap {
t.Fatalf("unexpected bootstrap")
}
if parts.Expect != 0 {
t.Fatalf("bad: %v", parts.Expect)
}
m.Tags["bootstrap"] = "1"
valid, parts = isConsulServer(m)
if !valid || !parts.Bootstrap {
@ -67,6 +80,12 @@ func TestIsConsulServer(t *testing.T) {
if parts.Version != 1 {
t.Fatalf("bad: %v", parts)
}
m.Tags["expect"] = "3"
delete(m.Tags, "bootstrap")
valid, parts = isConsulServer(m)
if !valid || parts.Expect != 3 {
t.Fatalf("bad: %v", parts.Expect)
}
}
func TestIsConsulNode(t *testing.T) {

View file

@ -7,7 +7,7 @@ sudo apt-get install -y unzip
echo Fetching Consul...
cd /tmp/
wget https://dl.bintray.com/mitchellh/consul/0.3.0_linux_amd64.zip -O consul.zip
wget https://dl.bintray.com/mitchellh/consul/0.3.1_linux_amd64.zip -O consul.zip
echo Installing Consul...
unzip consul.zip

72
deps/v0-3-1.json vendored Normal file
View file

@ -0,0 +1,72 @@
{
"ImportPath": "github.com/hashicorp/consul",
"GoVersion": "go1.3",
"Deps": [
{
"ImportPath": "github.com/armon/circbuf",
"Rev": "f092b4f207b6e5cce0569056fba9e1a2735cb6cf"
},
{
"ImportPath": "github.com/armon/go-metrics",
"Rev": "02567bbc4f518a43853d262b651a3c8257c3f141"
},
{
"ImportPath": "github.com/armon/gomdb",
"Rev": "a8e036c4dabe7437014ecf9dbc03c6f6f0766ef8"
},
{
"ImportPath": "github.com/hashicorp/go-syslog",
"Rev": "ac3963b72ac367e48b1e68a831e62b93fb69091c"
},
{
"ImportPath": "github.com/hashicorp/logutils",
"Rev": "8e0820fe7ac5eb2b01626b1d99df47c5449eb2d8"
},
{
"ImportPath": "github.com/hashicorp/memberlist",
"Rev": "e6a282556f0e8f15e9a53dcb0d14912a3c2fb141"
},
{
"ImportPath": "github.com/hashicorp/raft",
"Rev": "35f5fa082f5a064595d84715b0cf8821f002e9ac"
},
{
"ImportPath": "github.com/hashicorp/raft-mdb",
"Rev": "9076b4b956c1c4c8a47117608b612bda2cb5f481"
},
{
"ImportPath": "github.com/hashicorp/serf/serf",
"Comment": "v0.6.3-1-g7f260e7",
"Rev": "7f260e70a89739bd38c1f0bf3b74c0e1c1ee617f"
},
{
"ImportPath": "github.com/hashicorp/yamux",
"Rev": "35417c7dfab4085d7c921b33e4d5ea6cf9ceef65"
},
{
"ImportPath": "github.com/inconshreveable/muxado",
"Rev": "f693c7e88ba316d1a0ae3e205e22a01aa3ec2848"
},
{
"ImportPath": "github.com/miekg/dns",
"Rev": "9af5c1f8a8a71bc5c8539d16cdc40b4a47ee7024"
},
{
"ImportPath": "github.com/mitchellh/cli",
"Rev": "eaf0e415fc517a431dca53c7b2e7559d42238ebe"
},
{
"ImportPath": "github.com/mitchellh/mapstructure",
"Rev": "6fb2c832bcac61d01212ab1d172f7a14a8585b07"
},
{
"ImportPath": "github.com/ryanuber/columnize",
"Comment": "v2.0.1",
"Rev": "785d943a7b6886e0bb2f139a60487b823dd8d9de"
},
{
"ImportPath": "github.com/ugorji/go/codec",
"Rev": "71c2886f5a673a35f909803f38ece5810165097b"
}
]
}

View file

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDYVw5skn/3Ka72
32ZaCrKtRVoQzan3tghq41KpQe3yZxIZbKy7sbwfdXnXVSwTAbq/3BYi9rya2t/v
W95yZh6JgfrLBvWl9Jo1EttZIxDhzCXGP+MPWm2KdNtHr84JznJbdxRpR0Jb4ykK
2d9dXbLJvCw8eEDFgOVGrj60USMir46sZFRvGWlMi+yHSOE+WQXaU40Dr0ZJqNvd
RNO9BtqpLaecZQaYTvlkyVdhjUE3+gQ0zEAQqpLcWi+zB5/IyR2+KwxDT3vAJumd
G7rIaGatPE8k0Ahb+zMKFFGYCoQ3sjbAbrQmrVtH4SU6ggl+CxpVdxshrK1W05Ms
WAiPw81/AgMBAAECggEAKjDIKlpjxGMHsTOeNV8yu2H0D6TcSefhOl885q9p5UU+
nWC5Sx19b7EsYtdEcix7LCGS25y86YJX+8kx16OcvvpvW5ru2z+Zt1IHHxocl7yF
fWVGNd9Pz5m8jf12NClj2fyeKW3xPhROE8Srr/yu+nLNObnF//6EOEWRCv9r176C
+dzYvYVNPP48Ug7NpjQB94CBprtJyqvuoXvBPtpARXazVniYEhnzG1Gaj1TiCII5
+emaMjKcWIEJ5stbBb3lUtqgm8bRNb/qcxoFfqTzHP+hbum9hbRz0KEIlAkm7uAv
S0TlyLuaj+gPQ+LwNX8EhGKUdlK/VM5bj2kq/tg3AQKBgQD/+A8ruHNa5nKGKNzP
dp+hXiL2sSzefMjDa2+sRJ0yftIMqYRfCJwzYumjfyycfCsu1LHainlQjSO6Kkgc
c0xVxnahWyPCQiqZuo9lLx4EVXCdRqWRg+pbyQhTSz90hfWEKD7XWsI8uRkOEnW8
36FiyovGDFxl0esaKrFNSFdmgQKBgQDYXcSIRJk41f7vL6FVmchpUnVYoD75k9YT
FqEplNMw6gXcqbC2aNH5wj7EJlRboyVpjXV4N0d2Cz6AwREJpr/rYpq68AixXmVs
kTKwevoHm/tln7CN+CyIEy6KXdLp4KoWLFfSG6tHWRwIGFxWEGrrIZS6Eznu4GPe
V2yOnMkz/wKBgC6nXtSALP5PbGZJgl2J6HR3/PVru5rdsZX0ugjzBJfUh6JpL0hH
AHlZOO5k2pO3CgPiHnyPqqbk4rMmy7frx+kGYE7ulqjseGlGmKY/nT/69qij3L+W
BJwwGwVbfLhXRjWNRE7qKub4cbmf4bfIJtkjw7AYRqsERM6jI2fLnKqBAoGAUBzY
CkSsHxlNXa7bI+DfDfBUNs6OwsZ0e3jjj4vlbrUYGo5SOhgxtzKvHt26Wnvb/Gs+
VZbSROkA6ZeTAWnWogdOl20NKu9yynIwvJusPGkK+qPYMZj0lCXWE7GNyL9A+xjM
I6XPE4nxESZD+jH2BL3YXdWEm+hF0iu4rE1tSm0CgYEAxssvvX7qcfTmxsp1YSHJ
H5j9ifkakci5W2VbCbdMtdOlgIlCFr2JYguaL98jx7WIJ4iH54ue/fbOdlkPCOsz
YGU4TceSRHeEJ7F6c67NOXm8j2TquAW2uYH87w07g2PIUwl/pp439qoDiThA6jEX
2ztyXgNUi7poqehPUoQuvC0=
-----END PRIVATE KEY-----

View file

@ -0,0 +1,17 @@
-----BEGIN CERTIFICATE-----
MIICsjCCAZqgAwIBAgIJAMi7aUCplU3VMA0GCSqGSIb3DQEBBQUAMBExDzANBgNV
BAMTBnVidW50dTAeFw0xMjEyMDIwNDQ3MzBaFw0yMjExMzAwNDQ3MzBaMBExDzAN
BgNVBAMTBnVidW50dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANhX
DmySf/cprvbfZloKsq1FWhDNqfe2CGrjUqlB7fJnEhlsrLuxvB91eddVLBMBur/c
FiL2vJra3+9b3nJmHomB+ssG9aX0mjUS21kjEOHMJcY/4w9abYp020evzgnOclt3
FGlHQlvjKQrZ311dssm8LDx4QMWA5UauPrRRIyKvjqxkVG8ZaUyL7IdI4T5ZBdpT
jQOvRkmo291E070G2qktp5xlBphO+WTJV2GNQTf6BDTMQBCqktxaL7MHn8jJHb4r
DENPe8Am6Z0bushoZq08TyTQCFv7MwoUUZgKhDeyNsButCatW0fhJTqCCX4LGlV3
GyGsrVbTkyxYCI/DzX8CAwEAAaMNMAswCQYDVR0TBAIwADANBgkqhkiG9w0BAQUF
AAOCAQEAQaS5yAih5NBV2edX1wkIQfAUElqmzoXvxsozDYy+P+S5tJeFXDSqzTAy
qkd/6qjkBdaARfKUJZeT/jRjqxoNtE9SR4PMOnD4zrqD26ujgZRVtPImbmVxCnMI
1B9LwvhpDHZuPGN5bPp3o+iDYea8zkS3Y31Ic889KSwKBDb1LlNogOdved+2DGd1
yCxEErImbl4B0+QPrRk2bWbDfKhDfJ2FV+9kWIoEuCQBpr2tj1E5zvTadOVm5P2M
u7kjGl4w0GIAONiMC9l2TwMmPuG1jpM/WjQkG0sTKOCl7xQKgXBNJ78Wm2bfGtgb
shr/PNbS/EyISlUa07+zJtiRnr/EiQ==
-----END CERTIFICATE-----

View file

@ -44,6 +44,8 @@ An example of this command, from inside the `ui/` directory, would be:
consul agent -bootstrap -server -data-dir /tmp/ -ui-dir .
Basic tests can be run by adding the `?test` query parameter to the
application.
### Releasing

View file

@ -7,6 +7,7 @@
<title>Consul</title>
<link rel="stylesheet" href="static/bootstrap.min.css">
<link rel="stylesheet" href="static/base.css">
<link rel="shortcut icon" href="static/favicon.png">
</head>
<body>
@ -247,9 +248,9 @@
{{#link-to 'nodes.show' model.session.Node tagName="div" href=false class="list-group-item list-condensed-link" }}
<div class="bg-light-gray list-bar-horizontal"></div>
<div class="name">
{{session.Node}}
{{ sessionName session }}
<small class="pull-right">
{{session.ID}}
{{session.Node}}
</small>
</div>
{{/link-to}}

BIN
ui/static/favicon.png (Stored with Git LFS) Normal file

Binary file not shown.

View file

@ -16,6 +16,8 @@
padding-left: 0px;
color: $gray;
font-size: 13px;
overflow: scroll;
height: 30px;
}
.list-group-item-heading {

View file

@ -4,7 +4,7 @@ package main
var GitCommit string
// The main version number that is being run at the moment.
const Version = "0.3.0"
const Version = "0.3.1"
// A pre-release marker for the version. If this is "" (empty string)
// then it means that it is a final release. Otherwise, this is a pre-release

View file

@ -1,4 +1,4 @@
This file doesn't do anything, but we periodically update the number
below just to force being able to deploy the website again.
1
2

View file

@ -57,8 +57,7 @@ There are several important components that `consul agent` outputs:
* **Server**: This shows if the agent is running in the server or client mode.
Server nodes have the extra burden of participating in the consensus quorum,
storing cluster state, and handling queries. Additionally, a server may be
in "bootstrap" mode. The first server must be in this mode to allow additional
servers to join the cluster. Multiple servers cannot be in bootstrap mode,
in "bootstrap" mode. Multiple servers cannot be in bootstrap mode,
otherwise the cluster state will be inconsistent.
* **Client Addr**: This is the address used for client interfaces to the agent.
@ -66,9 +65,9 @@ There are several important components that `consul agent` outputs:
address is used for other `consul` commands. Other Consul commands such
as `consul members` connect to a running agent and use RPC to query and
control the agent. By default, this binds only to localhost. If you
change this address or port, you'll have to specify an `-rpc-addr` to commands
such as `consul members` so they know how to talk to the agent. This is also
the address other applications can use over [RPC to control Consul](/docs/agent/rpc.html).
change this address or port, you'll have to specify an `-rpc-addr` whenever
you run commands such as `consul members` so they know how to talk to the
agent. This is also the address other applications can use over [RPC to control Consul](/docs/agent/rpc.html).
* **Cluster Addr**: This is the address and ports used for communication between
Consul agents in a cluster. Every Consul agent in a cluster does not have to

View file

@ -20,7 +20,8 @@ with no failing health checks. It's that simple!
There are a number of [configuration options](/docs/agent/options.html) that
are important for the DNS interface. They are `client_addr`, `ports.dns`, `recursor`,
`domain`, and `dns_config`. By default Consul will listen on 127.0.0.1:8600 for DNS queries
in the "consul." domain, without support for DNS recursion.
in the "consul." domain, without support for DNS recursion. All queries are case-insensitive, a
name lookup for `PostgreSQL.node.dc1.consul` will find all nodes named `postgresql`, no matter of case.
There are a few ways to use the DNS interface. One option is to use a custom
DNS resolver library and point it at Consul. Another option is to set Consul

View file

@ -28,7 +28,10 @@ With that key, you can enable encryption on the agent. You can verify
encryption is enabled because the output will include "Encrypted: true".
```
$ consul agent -data=/tmp/consul -encrypt=cg8StVXbQJ0gPvMd9o7yrg==
$ cat encrypt.json
{"encrypt": "cg8StVXbQJ0gPvMd9o7yrg=="}
$ consul agent -data=/tmp/consul -config-file encrypt.json
==> Starting Consul agent...
==> Starting Consul agent RPC...
==> Consul agent running!

View file

@ -35,11 +35,16 @@ The options below are all specified on the command-line.
as other nodes will treat the non-routability as a failure.
* `-bootstrap` - This flag is used to control if a server is in "bootstrap" mode. It is important that
no more than one server *per* datacenter be running in this mode. The initial server **must** be in bootstrap
mode. Technically, a server in bootstrap mode is allowed to self-elect as the Raft leader. It is important
that only a single node is in this mode, because otherwise consistency cannot be guaranteed if multiple
nodes are able to self-elect. Once there are multiple servers in a datacenter, it is generally a good idea
to disable bootstrap mode on all of them.
no more than one server *per* datacenter be running in this mode. Technically, a server in bootstrap mode
is allowed to self-elect as the Raft leader. It is important that only a single node is in this mode,
because otherwise consistency cannot be guaranteed if multiple nodes are able to self-elect.
It is not recommended to use this flag after a cluster has been bootstrapped.
* `-bootstrap-expect` - This flag provides the number of expected servers in the datacenter.
Either this value should not be provided, or the value must agree with other servers in
the cluster. When provided, Consul waits until the specified number of servers are
available, and then bootstraps the cluster. This allows an initial leader to be elected
automatically. This cannot be used in conjunction with the `-bootstrap` flag.
* `-bind` - The address that should be bound to for internal cluster communications.
This is an IP address that should be reachable by all other nodes in the cluster.
@ -148,6 +153,8 @@ definitions support being updated during a reload.
* `bootstrap` - Equivalent to the `-bootstrap` command-line flag.
* `bootstrap_expect` - Equivalent to the `-bootstrap-expect` command-line flag.
* `bind_addr` - Equivalent to the `-bind` command-line flag.
* `client_addr` - Equivalent to the `-client` command-line flag.

View file

@ -199,7 +199,7 @@ There is no request body, or special response body.
### stats
The stats command is used to provide operator information for debugginer.
The stats command is used to provide operator information for debugging.
There is no request body, the response body looks like:
```

View file

@ -18,9 +18,10 @@ information to the stderr of the agent.
In general, the telemetry information is used for debugging or otherwise
getting a better view into what Consul is doing.
Additionally, if the `-statsite` [option](/docs/agent/options.html) is provided,
then the telemetry information will be streamed to a [statsite](http://github.com/armon/statsite)
server where it can be aggregate and flushed to Graphite or any other metrics store.
Additionally, if the `statsite_addr` [configuration option](/docs/agent/options.html)
is provided, then the telemetry information will be streamed to a
[statsite](http://github.com/armon/statsite) server where it can be
aggregate and flushed to Graphite or any other metrics store.
Below is an example output:

View file

@ -6,74 +6,62 @@ sidebar_current: "docs-guides-bootstrapping"
# Bootstrapping a Datacenter
When deploying Consul to a datacenter for the first time, there is an initial bootstrapping that
must be done. Generally, the first nodes that are started are the server nodes. Remember that an
agent can run in both client and server mode. Server nodes are responsible for running
Before a Consul cluster can begin to service requests, it is necessary for a server node to
be elected leader. For this reason, the first nodes that are started are generally the server nodes.
Remember that an agent can run in both client and server mode. Server nodes are responsible for running
the [consensus protocol](/docs/internals/consensus.html), and storing the cluster state.
The client nodes are mostly stateless and rely on the server nodes, so they can be started easily.
The first server that is deployed in a new datacenter must provide the `-bootstrap` [configuration
option](/docs/agent/options.html). This option allows the server to assert leadership of the cluster
without agreement from any other server. This is necessary because at this point, there are no other
servers running in the datacenter! Lets call this first server `Node A`. When starting `Node A` something
like the following will be logged:
The recommended way to bootstrap is to use the `-bootstrap-expect` [configuration
option](/docs/agent/options.html). This options informs Consul of the expected number of
server nodes, and automatically bootstraps when that many servers are available. To prevent
inconsistencies and split-brain situations, all servers should specify the same value for `-bootstrap-expect`
or specify no value at all. Any server that does not specify a value will not attempt to
bootstrap the cluster.
2014/02/22 19:23:32 [INFO] consul: cluster leadership acquired
There is a [deployment table](/docs/internals/consensus.html#toc_3) that covers various options,
but it is recommended to have 3 or 5 total servers per data center. A single server deployment is _**highly**_
discouraged as data loss is inevitable in a failure scenario.
Once `Node A` is running, we can start the next set of servers. There is a [deployment table](/docs/internals/consensus.html#toc_3)
that covers various options, but it is recommended to have 3 or 5 total servers per data center.
A single server deployment is _**highly**_ discouraged as data loss is inevitable in a failure scenario.
We start the next servers **without** specifying `-bootstrap`. This is critical, since only one server
should ever be running in bootstrap mode*. Once `Node B` and `Node C` are started, you should see a
message to the effect of:
Suppose we are starting a 3 server cluster, we can start `Node A`, `Node B` and `Node C` providing
the `-bootstrap-expect 3` flag. Once the nodes are started, you should see a message to the effect of:
[WARN] raft: EnableSingleNode disabled, and no known peers. Aborting election.
This indicates that the node is not in bootstrap mode, and it will not elect itself as leader.
We can now join these machines together. Since a join operation is symmetric it does not matter
which node initiates it. From `Node B` and `Node C` you can do the following:
This indicates that the nodes are expecting 2 peers, but none are known yet. The servers will not elect
themselves leader to prevent a split-brain. We can now join these machines together. Since a join operation
is symmetric it does not matter which node initiates it. From any node you can do the following:
$ consul join <Node A Address>
Successfully joined cluster by contacting 1 nodes.
$ consul join <Node A Address> <Node B Address> <Node C Address>
Successfully joined cluster by contacting 3 nodes.
Alternatively, from `Node A` you can do the following:
Once the join is successful, one of the nodes will output something like:
$ consul join <Node B Address> <Node C Address>
Successfully joined cluster by contacting 2 nodes.
[INFO] consul: adding server foo (Addr: 127.0.0.2:8300) (DC: dc1)
[INFO] consul: adding server bar (Addr: 127.0.0.1:8300) (DC: dc1)
[INFO] consul: Attempting bootstrap with nodes: [127.0.0.3:8300 127.0.0.2:8300 127.0.0.1:8300]
...
[INFO] consul: cluster leadership acquired
Once the join is successful, `Node A` should output something like:
[INFO] raft: Added peer 127.0.0.2:8300, starting replication
....
[INFO] raft: Added peer 127.0.0.3:8300, starting replication
Another good check is to run the `consul info` command. When run on `Node A`, you can
As a sanity check, the `consul info` command is a useful tool. It can be used to
verify `raft.num_peers` is now 2, and you can view the latest log index under `raft.last_log_index`.
When running `consul info` on `Node B` and `Node C` you should see `raft.last_log_index`
When running `consul info` on the followers, you should see `raft.last_log_index`
converge to the same value as the leader begins replication. That value represents the last
log entry that has been stored on disk.
This indicates that `Node B` and `Node C` have been added as peers. At this point,
all three nodes see each other as peers, `Node A` is the leader, and replication
should be working.
The final step is to remove the `-bootstrap` flag. This is important since we don't
want the node to be able to make unilateral decisions in the case of a failure of the
other two nodes. To do this, we send a `SIGINT` to `Node A` to allow it to perform
a graceful leave. Then we remove the `-bootstrap` flag and restart the node. The node
will need to rejoin the cluster, since the graceful exit leaves the cluster. Any transactions
that took place while `Node A` was offline will be replicated and the node will catch up.
Now that the servers are all started and replicating to each other, all the remaining
clients can be joined. Clients are much easier, as they can be started and perform
a `join` against any existing node. All nodes participate in a gossip protocol to
perform basic discovery, so clients will automatically find the servers and register
themselves.
<div class="alert alert-block alert-info">
* If you accidentally start another server with the flag set, do not fret.
Shutdown the node, and remove the `raft/` folder from the data directory. This will
remove the bad state caused by being in `-bootstrap` mode. Then restart the
node and join the cluster normally.
</div>
It should be noted that it is not strictly necessary to start the server nodes
before the clients, however most operations will fail until the servers are available.
## Manual Bootstrapping
In versions of Consul previous to 0.4, bootstrapping was a more manual process.
For a guide on using the `-bootstrap` flag directly, see the [manual bootstrapping guide](/docs/guides/manual-bootstrap.html).
This is not recommended, as it is more error prone than automatic bootstrapping.

View file

@ -41,7 +41,7 @@ of the leader.
## TTL Values
TTL values can be set to allow DNS results to be cached upstream
TTL values can be set to allow DNS results to be cached downstream
of Consul which can be used to reduce the number of lookups and to amortize
the latency of doing a DNS lookup. By default, all TTLs are zero,
preventing any caching.

View file

@ -0,0 +1,83 @@
---
layout: "docs"
page_title: "Manual Bootstrapping"
sidebar_current: "docs-guides-bootstrapping"
---
# Manually Bootstrapping a Datacenter
When deploying Consul to a datacenter for the first time, there is an initial bootstrapping that
must be done. As of Consul 0.4, an [automatic bootstrapping](/docs/guides/bootstrapping.html) is
available and is the recommended approach. However, older versions only support a manual bootstrap
that is documented here.
Generally, the first nodes that are started are the server nodes. Remember that an
agent can run in both client and server mode. Server nodes are responsible for running
the [consensus protocol](/docs/internals/consensus.html), and storing the cluster state.
The client nodes are mostly stateless and rely on the server nodes, so they can be started easily.
Manual bootstrapping requires that the first server that is deployed in a new datacenter provide
the `-bootstrap` [configuration option](/docs/agent/options.html). This option allows the server to
assert leadership of the cluster without agreement from any other server. This is necessary because
at this point, there are no other servers running in the datacenter! Lets call this first server `Node A`.
When starting `Node A` something like the following will be logged:
2014/02/22 19:23:32 [INFO] consul: cluster leadership acquired
Once `Node A` is running, we can start the next set of servers. There is a [deployment table](/docs/internals/consensus.html#toc_3)
that covers various options, but it is recommended to have 3 or 5 total servers per data center.
A single server deployment is _**highly**_ discouraged as data loss is inevitable in a failure scenario.
We start the next servers **without** specifying `-bootstrap`. This is critical, since only one server
should ever be running in bootstrap mode*. Once `Node B` and `Node C` are started, you should see a
message to the effect of:
[WARN] raft: EnableSingleNode disabled, and no known peers. Aborting election.
This indicates that the node is not in bootstrap mode, and it will not elect itself as leader.
We can now join these machines together. Since a join operation is symmetric it does not matter
which node initiates it. From `Node B` and `Node C` you can do the following:
$ consul join <Node A Address>
Successfully joined cluster by contacting 1 nodes.
Alternatively, from `Node A` you can do the following:
$ consul join <Node B Address> <Node C Address>
Successfully joined cluster by contacting 2 nodes.
Once the join is successful, `Node A` should output something like:
[INFO] raft: Added peer 127.0.0.2:8300, starting replication
....
[INFO] raft: Added peer 127.0.0.3:8300, starting replication
As a sanity check, the `consul info` command is a useful tool. It can be used to
verify `raft.num_peers` is now 2, and you can view the latest log index under `raft.last_log_index`.
When running `consul info` on the followers, you should see `raft.last_log_index`
converge to the same value as the leader begins replication. That value represents the last
log entry that has been stored on disk.
This indicates that `Node B` and `Node C` have been added as peers. At this point,
all three nodes see each other as peers, `Node A` is the leader, and replication
should be working.
The final step is to remove the `-bootstrap` flag. This is important since we don't
want the node to be able to make unilateral decisions in the case of a failure of the
other two nodes. To do this, we send a `SIGINT` to `Node A` to allow it to perform
a graceful leave. Then we remove the `-bootstrap` flag and restart the node. The node
will need to rejoin the cluster, since the graceful exit leaves the cluster. Any transactions
that took place while `Node A` was offline will be replicated and the node will catch up.
Now that the servers are all started and replicating to each other, all the remaining
clients can be joined. Clients are much easier, as they can be started and perform
a `join` against any existing node. All nodes participate in a gossip protocol to
perform basic discovery, so clients will automatically find the servers and register
themselves.
<div class="alert alert-block alert-info">
* If you accidentally start another server with the flag set, do not fret.
Shutdown the node, and remove the `raft/` folder from the data directory. This will
remove the bad state caused by being in `-bootstrap` mode. Then restart the
node and join the cluster normally.
</div>

View file

@ -18,7 +18,7 @@ add or remove a server <a href="/docs/guides/servers.html">see this page</a>.
</div>
If you had only a single server and it has failed, simply restart it.
Note that a single server configuration requires the `-bootstrap` flag.
Note that a single server configuration requires the `-bootstrap` or `-bootstrap-expect 1` flag.
If that server cannot be recovered, you need to bring up a new server.
See the [bootstrapping guide](/docs/guides/bootstrapping.html). Data loss
is inevitable, since data was not replicated to any other servers. This

View file

@ -18,8 +18,7 @@ to first add the new nodes and then remove the old nodes.
## Adding New Servers
Adding new servers is generally straightforward. After the initial server, no further
servers should ever be started with the `-bootstrap` flag. Instead, simply start the new
Adding new servers is generally straightforward. Simply start the new
server with the `-server` flag. At this point, the server will not be a member of
any cluster, and should emit something like:

View file

@ -20,7 +20,8 @@ will be part of the cluster.
For simplicity, we'll run a single Consul agent in server mode right now:
```
$ consul agent -server -bootstrap -data-dir /tmp/consul
$ consul agent -server -bootstrap-expect 1 -data-dir /tmp/consul
==> WARNING: BootstrapExpect Mode is specified as 1; this is the same as Bootstrap mode.
==> WARNING: Bootstrap mode enabled! Do not enable unless necessary
==> WARNING: It is highly recommended to set GOMAXPROCS higher than 1
==> Starting Consul agent...
@ -67,15 +68,13 @@ joining clusters in the next section.
```
$ consul members
Armons-MacBook-Air 10.1.10.38:8301 alive role=consul,dc=dc1,vsn=1,vsn_min=1,vsn_max=1,port=8300,bootstrap=1
Node Address Status Type Build Protocol
Armons-MacBook-Air 10.1.10.38:8301 alive server 0.3.0 2
```
The output shows our own node, the address it is running on, its
health state, and some metadata associated with the node. Some important
metadata keys to recognize are the `role` and `dc` keys. These tell you
the service name and the datacenter that member is within. These can be
used to lookup nodes and services using the DNS interface, which is covered
shortly.
health state, its role in the cluster, as well as some versioning information.
Additional metadata can be viewed by providing the `-detailed` flag.
The output from the `members` command is generated based on the
[gossip protocol](/docs/internals/gossip.html) and is eventually consistent.

View file

@ -34,7 +34,7 @@ will act as our server in this cluster. We're still not making a cluster
of servers.
```
$ consul agent -server -bootstrap -data-dir /tmp/consul \
$ consul agent -server -bootstrap-expect 1 -data-dir /tmp/consul \
-node=agent-one -bind=172.20.20.10
...
```
@ -70,9 +70,10 @@ run `consul members` against each agent, you'll see that both agents now
know about each other:
```
$ consul members
agent-one 172.20.20.10:8301 alive role=consul,dc=dc1,vsn=1,vsn_min=1,vsn_max=1,port=8300,bootstrap=1
agent-two 172.20.20.11:8301 alive role=node,dc=dc1,vsn=1,vsn_min=1,vsn_max=1
$ consul members -detailed
Node Address Status Tags
agent-one 172.20.20.10:8301 alive role=consul,dc=dc1,vsn=2,vsn_min=1,vsn_max=2,port=8300,bootstrap=1
agent-two 172.20.20.11:8301 alive role=node,dc=dc1,vsn=2,vsn_min=1,vsn_max=2
```
<div class="alert alert-block alert-info">

View file

@ -43,7 +43,7 @@ $ echo '{"service": {"name": "web", "tags": ["rails"], "port": 80}}' \
Now, restart the agent we're running, providing the configuration directory:
```
$ consul agent -server -bootstrap -data-dir /tmp/consul -config-dir /etc/consul.d
$ consul agent -server -bootstrap-expect 1 -data-dir /tmp/consul -config-dir /etc/consul.d
==> Starting Consul agent...
...
[INFO] agent: Synced service 'web'