Update consul autopilot dependency
This commit is contained in:
parent
e9af35346c
commit
3dcc65d58d
|
@ -96,6 +96,11 @@ func (d *AutopilotDelegate) Raft() *raft.Raft {
|
|||
return d.server.raft
|
||||
}
|
||||
|
||||
func (d *AutopilotDelegate) Serf() *serf.Serf {
|
||||
func (d *AutopilotDelegate) SerfLAN() *serf.Serf {
|
||||
return d.server.serf
|
||||
}
|
||||
|
||||
func (d *AutopilotDelegate) SerfWAN() *serf.Serf {
|
||||
// serf WAN isn't supported in nomad yet
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ package nomad
|
|||
|
||||
import (
|
||||
"github.com/hashicorp/consul/agent/consul/autopilot"
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
)
|
||||
|
||||
type EnterpriseState struct{}
|
||||
|
@ -12,7 +11,7 @@ type EnterpriseState struct{}
|
|||
func (s *Server) setupEnterprise(config *Config) error {
|
||||
// Set up the OSS version of autopilot
|
||||
apDelegate := &AutopilotDelegate{s}
|
||||
s.autopilot = autopilot.NewAutopilot(s.logger.StandardLoggerIntercept(&log.StandardLoggerOptions{InferLevels: true}), apDelegate, config.AutopilotInterval, config.ServerHealthInterval)
|
||||
s.autopilot = autopilot.NewAutopilot(s.logger, apDelegate, config.AutopilotInterval, config.ServerHealthInterval)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
128
vendor/github.com/hashicorp/consul/agent/consul/autopilot/autopilot.go
generated
vendored
128
vendor/github.com/hashicorp/consul/agent/consul/autopilot/autopilot.go
generated
vendored
|
@ -3,15 +3,15 @@ package autopilot
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"github.com/hashicorp/consul/logging"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-version"
|
||||
"github.com/hashicorp/raft"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-version"
|
||||
"github.com/hashicorp/raft"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
)
|
||||
|
||||
// Delegate is the interface for the Autopilot mechanism
|
||||
|
@ -22,14 +22,15 @@ type Delegate interface {
|
|||
NotifyHealth(OperatorHealthReply)
|
||||
PromoteNonVoters(*Config, OperatorHealthReply) ([]raft.Server, error)
|
||||
Raft() *raft.Raft
|
||||
Serf() *serf.Serf
|
||||
SerfLAN() *serf.Serf
|
||||
SerfWAN() *serf.Serf
|
||||
}
|
||||
|
||||
// Autopilot is a mechanism for automatically managing the Raft
|
||||
// quorum using server health information along with updates from Serf gossip.
|
||||
// For more information, see https://www.consul.io/docs/guides/autopilot.html
|
||||
type Autopilot struct {
|
||||
logger *log.Logger
|
||||
logger hclog.Logger
|
||||
delegate Delegate
|
||||
|
||||
interval time.Duration
|
||||
|
@ -53,9 +54,9 @@ type ServerInfo struct {
|
|||
Status serf.MemberStatus
|
||||
}
|
||||
|
||||
func NewAutopilot(logger *log.Logger, delegate Delegate, interval, healthInterval time.Duration) *Autopilot {
|
||||
func NewAutopilot(logger hclog.Logger, delegate Delegate, interval, healthInterval time.Duration) *Autopilot {
|
||||
return &Autopilot{
|
||||
logger: logger,
|
||||
logger: logger.Named(logging.Autopilot),
|
||||
delegate: delegate,
|
||||
interval: interval,
|
||||
healthInterval: healthInterval,
|
||||
|
@ -110,15 +111,15 @@ func (a *Autopilot) run() {
|
|||
return
|
||||
case <-ticker.C:
|
||||
if err := a.promoteServers(); err != nil {
|
||||
a.logger.Printf("[ERR] autopilot: Error promoting servers: %v", err)
|
||||
a.logger.Error("Error promoting servers", "error", err)
|
||||
}
|
||||
|
||||
if err := a.pruneDeadServers(); err != nil {
|
||||
a.logger.Printf("[ERR] autopilot: Error checking for dead servers to remove: %s", err)
|
||||
a.logger.Error("Error checking for dead servers to remove", "error", err)
|
||||
}
|
||||
case <-a.removeDeadCh:
|
||||
if err := a.pruneDeadServers(); err != nil {
|
||||
a.logger.Printf("[ERR] autopilot: Error checking for dead servers to remove: %s", err)
|
||||
a.logger.Error("Error checking for dead servers to remove", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -173,6 +174,20 @@ func (a *Autopilot) RemoveDeadServers() {
|
|||
}
|
||||
}
|
||||
|
||||
func canRemoveServers(peers, minQuorum, deadServers int) (bool, string) {
|
||||
if peers-deadServers < int(minQuorum) {
|
||||
return false, fmt.Sprintf("denied, because removing %d/%d servers would leave less then minimal allowed quorum of %d servers", deadServers, peers, minQuorum)
|
||||
}
|
||||
|
||||
// Only do removals if a minority of servers will be affected.
|
||||
// For failure tolerance of F we need n = 2F+1 servers.
|
||||
// This means we can safely remove up to (n-1)/2 servers.
|
||||
if deadServers > (peers-1)/2 {
|
||||
return false, fmt.Sprintf("denied, because removing the majority of servers %d/%d is not safe", deadServers, peers)
|
||||
}
|
||||
return true, fmt.Sprintf("allowed, because removing %d/%d servers leaves a majority of servers above the minimal allowed quorum %d", deadServers, peers, minQuorum)
|
||||
}
|
||||
|
||||
// pruneDeadServers removes up to numPeers/2 failed servers
|
||||
func (a *Autopilot) pruneDeadServers() error {
|
||||
conf := a.delegate.AutopilotConfig()
|
||||
|
@ -182,7 +197,7 @@ func (a *Autopilot) pruneDeadServers() error {
|
|||
|
||||
// Failed servers are known to Serf and marked failed, and stale servers
|
||||
// are known to Raft but not Serf.
|
||||
var failed []string
|
||||
var failed []serf.Member
|
||||
staleRaftServers := make(map[string]raft.Server)
|
||||
raftNode := a.delegate.Raft()
|
||||
future := raftNode.GetConfiguration()
|
||||
|
@ -194,58 +209,73 @@ func (a *Autopilot) pruneDeadServers() error {
|
|||
for _, server := range raftConfig.Servers {
|
||||
staleRaftServers[string(server.Address)] = server
|
||||
}
|
||||
|
||||
serfLAN := a.delegate.Serf()
|
||||
serfWAN := a.delegate.SerfWAN()
|
||||
serfLAN := a.delegate.SerfLAN()
|
||||
for _, member := range serfLAN.Members() {
|
||||
server, err := a.delegate.IsServer(member)
|
||||
if err != nil {
|
||||
a.logger.Printf("[INFO] autopilot: Error parsing server info for %q: %s", member.Name, err)
|
||||
a.logger.Warn("Error parsing server info", "name", member.Name, "error", err)
|
||||
continue
|
||||
}
|
||||
if server != nil {
|
||||
// todo(kyhavlov): change this to index by UUID
|
||||
if _, ok := staleRaftServers[server.Addr.String()]; ok {
|
||||
s, found := staleRaftServers[server.Addr.String()]
|
||||
if found {
|
||||
delete(staleRaftServers, server.Addr.String())
|
||||
}
|
||||
|
||||
if member.Status == serf.StatusFailed {
|
||||
failed = append(failed, member.Name)
|
||||
// If the node is a nonvoter, we can remove it immediately.
|
||||
if found && s.Suffrage == raft.Nonvoter {
|
||||
a.logger.Info("Attempting removal of failed server node", "name", member.Name)
|
||||
go serfLAN.RemoveFailedNode(member.Name)
|
||||
if serfWAN != nil {
|
||||
go serfWAN.RemoveFailedNode(member.Name)
|
||||
}
|
||||
} else {
|
||||
failed = append(failed, member)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We can bail early if there's nothing to do.
|
||||
removalCount := len(failed) + len(staleRaftServers)
|
||||
if removalCount == 0 {
|
||||
deadServers := len(failed) + len(staleRaftServers)
|
||||
|
||||
// nothing to do
|
||||
if deadServers == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only do removals if a minority of servers will be affected.
|
||||
peers := NumPeers(raftConfig)
|
||||
if removalCount < peers/2 {
|
||||
for _, node := range failed {
|
||||
a.logger.Printf("[INFO] autopilot: Attempting removal of failed server node %q", node)
|
||||
go serfLAN.RemoveFailedNode(node)
|
||||
if ok, msg := canRemoveServers(NumPeers(raftConfig), int(conf.MinQuorum), deadServers); !ok {
|
||||
a.logger.Debug("Failed to remove dead servers", "error", msg)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, node := range failed {
|
||||
a.logger.Info("Attempting removal of failed server node", "name", node.Name)
|
||||
go serfLAN.RemoveFailedNode(node.Name)
|
||||
if serfWAN != nil {
|
||||
go serfWAN.RemoveFailedNode(fmt.Sprintf("%s.%s", node.Name, node.Tags["dc"]))
|
||||
}
|
||||
|
||||
minRaftProtocol, err := a.MinRaftProtocol()
|
||||
if err != nil {
|
||||
}
|
||||
|
||||
minRaftProtocol, err := a.MinRaftProtocol()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, raftServer := range staleRaftServers {
|
||||
a.logger.Info("Attempting removal of stale server", "server", fmtServer(raftServer))
|
||||
var future raft.Future
|
||||
if minRaftProtocol >= 2 {
|
||||
future = raftNode.RemoveServer(raftServer.ID, 0, 0)
|
||||
} else {
|
||||
future = raftNode.RemovePeer(raftServer.Address)
|
||||
}
|
||||
if err := future.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, raftServer := range staleRaftServers {
|
||||
a.logger.Printf("[INFO] autopilot: Attempting removal of stale %s", fmtServer(raftServer))
|
||||
var future raft.Future
|
||||
if minRaftProtocol >= 2 {
|
||||
future = raftNode.RemoveServer(raftServer.ID, 0, 0)
|
||||
} else {
|
||||
future = raftNode.RemovePeer(raftServer.Address)
|
||||
}
|
||||
if err := future.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
a.logger.Printf("[DEBUG] autopilot: Failed to remove dead servers: too many dead servers: %d/%d", removalCount, peers)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -253,7 +283,7 @@ func (a *Autopilot) pruneDeadServers() error {
|
|||
|
||||
// MinRaftProtocol returns the lowest supported Raft protocol among alive servers
|
||||
func (a *Autopilot) MinRaftProtocol() (int, error) {
|
||||
return minRaftProtocol(a.delegate.Serf().Members(), a.delegate.IsServer)
|
||||
return minRaftProtocol(a.delegate.SerfLAN().Members(), a.delegate.IsServer)
|
||||
}
|
||||
|
||||
func minRaftProtocol(members []serf.Member, serverFunc func(serf.Member) (*ServerInfo, error)) (int, error) {
|
||||
|
@ -304,7 +334,7 @@ func (a *Autopilot) handlePromotions(promotions []raft.Server) error {
|
|||
// to promote early than remove early, so by promoting as soon as
|
||||
// possible we have chosen that as the solution here.
|
||||
for _, server := range promotions {
|
||||
a.logger.Printf("[INFO] autopilot: Promoting %s to voter", fmtServer(server))
|
||||
a.logger.Info("Promoting server to voter", "server", fmtServer(server))
|
||||
addFuture := a.delegate.Raft().AddVoter(server.ID, server.Address, 0, 0)
|
||||
if err := addFuture.Error(); err != nil {
|
||||
return fmt.Errorf("failed to add raft peer: %v", err)
|
||||
|
@ -335,7 +365,7 @@ func (a *Autopilot) serverHealthLoop() {
|
|||
return
|
||||
case <-ticker.C:
|
||||
if err := a.updateClusterHealth(); err != nil {
|
||||
a.logger.Printf("[ERR] autopilot: Error updating cluster health: %s", err)
|
||||
a.logger.Error("Error updating cluster health", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -362,14 +392,14 @@ func (a *Autopilot) updateClusterHealth() error {
|
|||
// Get the the serf members which are Consul servers
|
||||
var serverMembers []serf.Member
|
||||
serverMap := make(map[string]*ServerInfo)
|
||||
for _, member := range a.delegate.Serf().Members() {
|
||||
for _, member := range a.delegate.SerfLAN().Members() {
|
||||
if member.Status == serf.StatusLeft {
|
||||
continue
|
||||
}
|
||||
|
||||
server, err := a.delegate.IsServer(member)
|
||||
if err != nil {
|
||||
a.logger.Printf("[INFO] autopilot: Error parsing server info for %q: %s", member.Name, err)
|
||||
a.logger.Warn("Error parsing server info", "name", member.Name, "error", err)
|
||||
continue
|
||||
}
|
||||
if server != nil {
|
||||
|
@ -422,7 +452,7 @@ func (a *Autopilot) updateClusterHealth() error {
|
|||
health.Version = parts.Build.String()
|
||||
if stats, ok := fetchedStats[string(server.ID)]; ok {
|
||||
if err := a.updateServerHealth(&health, parts, stats, autopilotConf, targetLastIndex); err != nil {
|
||||
a.logger.Printf("[WARN] autopilot: Error updating server %s health: %s", fmtServer(server), err)
|
||||
a.logger.Warn("Error updating server health", "server", fmtServer(server), "error", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
4
vendor/github.com/hashicorp/consul/agent/consul/autopilot/structs.go
generated
vendored
4
vendor/github.com/hashicorp/consul/agent/consul/autopilot/structs.go
generated
vendored
|
@ -20,6 +20,10 @@ type Config struct {
|
|||
// be behind before being considered unhealthy.
|
||||
MaxTrailingLogs uint64
|
||||
|
||||
// MinQuorum sets the minimum number of servers required in a cluster
|
||||
// before autopilot can prune dead servers.
|
||||
MinQuorum uint
|
||||
|
||||
// ServerStabilizationTime is the minimum amount of time a server must be
|
||||
// in a stable, healthy state before it can be added to the cluster. Only
|
||||
// applicable with Raft protocol version 3 or higher.
|
||||
|
|
56
vendor/github.com/hashicorp/consul/logging/gated_writer.go
generated
vendored
Normal file
56
vendor/github.com/hashicorp/consul/logging/gated_writer.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
package logging
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// GatedWriter is an io.Writer implementation that buffers all of its
|
||||
// data into an internal buffer until it is told to let data through.
|
||||
type GatedWriter struct {
|
||||
Writer io.Writer
|
||||
|
||||
buf [][]byte
|
||||
flush bool
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// Flush tells the GatedWriter to flush any buffered data and to stop
|
||||
// buffering.
|
||||
func (w *GatedWriter) Flush() {
|
||||
w.lock.Lock()
|
||||
w.flush = true
|
||||
w.lock.Unlock()
|
||||
|
||||
for _, p := range w.buf {
|
||||
w.Write(p)
|
||||
}
|
||||
w.buf = nil
|
||||
}
|
||||
|
||||
func (w *GatedWriter) Write(p []byte) (n int, err error) {
|
||||
// Once we flush we no longer synchronize writers since there's
|
||||
// no use of the internal buffer. This is the happy path.
|
||||
w.lock.RLock()
|
||||
if w.flush {
|
||||
w.lock.RUnlock()
|
||||
return w.Writer.Write(p)
|
||||
}
|
||||
w.lock.RUnlock()
|
||||
|
||||
// Now take the write lock.
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
// Things could have changed between the locking operations, so we
|
||||
// have to check one more time.
|
||||
if w.flush {
|
||||
return w.Writer.Write(p)
|
||||
}
|
||||
|
||||
// Buffer up the written data.
|
||||
p2 := make([]byte, len(p))
|
||||
copy(p2, p)
|
||||
w.buf = append(w.buf, p2)
|
||||
return len(p), nil
|
||||
}
|
102
vendor/github.com/hashicorp/consul/logging/grpc.go
generated
vendored
Normal file
102
vendor/github.com/hashicorp/consul/logging/grpc.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
package logging
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
)
|
||||
|
||||
// GRPCLogger wrapps a hclog.Logger and implements the grpclog.LoggerV2 interface
|
||||
// allowing gRPC servers to log to the standard Consul logger.
|
||||
type GRPCLogger struct {
|
||||
level string
|
||||
logger hclog.Logger
|
||||
}
|
||||
|
||||
// NewGRPCLogger creates a grpclog.LoggerV2 that will output to the supplied
|
||||
// logger with Severity/Verbosity level appropriate for the given config.
|
||||
//
|
||||
// Note that grpclog has Info, Warning, Error, Fatal severity levels AND integer
|
||||
// verbosity levels for additional info. Verbose logs in glog are always INFO
|
||||
// severity so we map Info,V0 to INFO, Info,V1 to DEBUG, and Info,V>1 to TRACE.
|
||||
func NewGRPCLogger(config *Config, logger hclog.Logger) *GRPCLogger {
|
||||
return &GRPCLogger{
|
||||
level: config.LogLevel,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// Info implements grpclog.LoggerV2
|
||||
func (g *GRPCLogger) Info(args ...interface{}) {
|
||||
g.logger.Info(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Infoln implements grpclog.LoggerV2
|
||||
func (g *GRPCLogger) Infoln(args ...interface{}) {
|
||||
g.Info(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Infof implements grpclog.LoggerV2
|
||||
func (g *GRPCLogger) Infof(format string, args ...interface{}) {
|
||||
g.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Warning implements grpclog.LoggerV2
|
||||
func (g *GRPCLogger) Warning(args ...interface{}) {
|
||||
g.logger.Warn(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Warningln implements grpclog.LoggerV2
|
||||
func (g *GRPCLogger) Warningln(args ...interface{}) {
|
||||
g.Warning(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Warningf implements grpclog.LoggerV2
|
||||
func (g *GRPCLogger) Warningf(format string, args ...interface{}) {
|
||||
g.Warning(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Error implements grpclog.LoggerV2
|
||||
func (g *GRPCLogger) Error(args ...interface{}) {
|
||||
g.logger.Error(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Errorln implements grpclog.LoggerV2
|
||||
func (g *GRPCLogger) Errorln(args ...interface{}) {
|
||||
g.Error(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Errorf implements grpclog.LoggerV2
|
||||
func (g *GRPCLogger) Errorf(format string, args ...interface{}) {
|
||||
g.Error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Fatal implements grpclog.LoggerV2
|
||||
func (g *GRPCLogger) Fatal(args ...interface{}) {
|
||||
g.logger.Error(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Fatalln implements grpclog.LoggerV2
|
||||
func (g *GRPCLogger) Fatalln(args ...interface{}) {
|
||||
g.Fatal(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Fatalf implements grpclog.LoggerV2
|
||||
func (g *GRPCLogger) Fatalf(format string, args ...interface{}) {
|
||||
g.Fatal(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// V implements grpclog.LoggerV2
|
||||
func (g *GRPCLogger) V(l int) bool {
|
||||
switch g.level {
|
||||
case "TRACE":
|
||||
// Enable ALL the verbosity!
|
||||
return true
|
||||
case "DEBUG":
|
||||
return l < 2
|
||||
case "INFO":
|
||||
return l < 1
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
36
vendor/github.com/hashicorp/consul/logging/log_levels.go
generated
vendored
Normal file
36
vendor/github.com/hashicorp/consul/logging/log_levels.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
package logging
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
)
|
||||
|
||||
var (
|
||||
allowedLogLevels = []string{"TRACE", "DEBUG", "INFO", "WARN", "ERR", "ERROR"}
|
||||
)
|
||||
|
||||
func AllowedLogLevels() []string {
|
||||
var c []string
|
||||
copy(c, allowedLogLevels)
|
||||
return c
|
||||
}
|
||||
|
||||
// ValidateLogLevel verifies that a new log level is valid
|
||||
func ValidateLogLevel(minLevel string) bool {
|
||||
newLevel := strings.ToUpper(minLevel)
|
||||
for _, level := range allowedLogLevels {
|
||||
if level == newLevel {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Backwards compatibility with former ERR log level
|
||||
func LevelFromString(level string) hclog.Level {
|
||||
if strings.ToUpper(level) == "ERR" {
|
||||
level = "ERROR"
|
||||
}
|
||||
return hclog.LevelFromString(level)
|
||||
}
|
135
vendor/github.com/hashicorp/consul/logging/logfile.go
generated
vendored
Normal file
135
vendor/github.com/hashicorp/consul/logging/logfile.go
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
|||
package logging
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
now = time.Now
|
||||
)
|
||||
|
||||
//LogFile is used to setup a file based logger that also performs log rotation
|
||||
type LogFile struct {
|
||||
//Name of the log file
|
||||
fileName string
|
||||
|
||||
//Path to the log file
|
||||
logPath string
|
||||
|
||||
//Duration between each file rotation operation
|
||||
duration time.Duration
|
||||
|
||||
//LastCreated represents the creation time of the latest log
|
||||
LastCreated time.Time
|
||||
|
||||
//FileInfo is the pointer to the current file being written to
|
||||
FileInfo *os.File
|
||||
|
||||
//MaxBytes is the maximum number of desired bytes for a log file
|
||||
MaxBytes int
|
||||
|
||||
//BytesWritten is the number of bytes written in the current log file
|
||||
BytesWritten int64
|
||||
|
||||
// Max rotated files to keep before removing them.
|
||||
MaxFiles int
|
||||
|
||||
//acquire is the mutex utilized to ensure we have no concurrency issues
|
||||
acquire sync.Mutex
|
||||
}
|
||||
|
||||
func (l *LogFile) fileNamePattern() string {
|
||||
// Extract the file extension
|
||||
fileExt := filepath.Ext(l.fileName)
|
||||
// If we have no file extension we append .log
|
||||
if fileExt == "" {
|
||||
fileExt = ".log"
|
||||
}
|
||||
// Remove the file extension from the filename
|
||||
return strings.TrimSuffix(l.fileName, fileExt) + "-%s" + fileExt
|
||||
}
|
||||
|
||||
func (l *LogFile) openNew() error {
|
||||
fileNamePattern := l.fileNamePattern()
|
||||
|
||||
createTime := now()
|
||||
newfileName := fmt.Sprintf(fileNamePattern, strconv.FormatInt(createTime.UnixNano(), 10))
|
||||
newfilePath := filepath.Join(l.logPath, newfileName)
|
||||
|
||||
// Try creating a file. We truncate the file because we are the only authority to write the logs
|
||||
filePointer, err := os.OpenFile(newfilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0640)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.FileInfo = filePointer
|
||||
// New file, new bytes tracker, new creation time :)
|
||||
l.LastCreated = createTime
|
||||
l.BytesWritten = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LogFile) rotate() error {
|
||||
// Get the time from the last point of contact
|
||||
timeElapsed := time.Since(l.LastCreated)
|
||||
// Rotate if we hit the byte file limit or the time limit
|
||||
if (l.BytesWritten >= int64(l.MaxBytes) && (l.MaxBytes > 0)) || timeElapsed >= l.duration {
|
||||
l.FileInfo.Close()
|
||||
if err := l.pruneFiles(); err != nil {
|
||||
return err
|
||||
}
|
||||
return l.openNew()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LogFile) pruneFiles() error {
|
||||
if l.MaxFiles == 0 {
|
||||
return nil
|
||||
}
|
||||
pattern := l.fileNamePattern()
|
||||
//get all the files that match the log file pattern
|
||||
globExpression := filepath.Join(l.logPath, fmt.Sprintf(pattern, "*"))
|
||||
matches, err := filepath.Glob(globExpression)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var stale int
|
||||
if l.MaxFiles <= -1 {
|
||||
// Prune everything
|
||||
stale = len(matches)
|
||||
} else {
|
||||
// Prune if there are more files stored than the configured max
|
||||
stale = len(matches) - l.MaxFiles
|
||||
}
|
||||
for i := 0; i < stale; i++ {
|
||||
if err := os.Remove(matches[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write is used to implement io.Writer
|
||||
func (l *LogFile) Write(b []byte) (n int, err error) {
|
||||
l.acquire.Lock()
|
||||
defer l.acquire.Unlock()
|
||||
// Create a new file if we have no file to write to
|
||||
if l.FileInfo == nil {
|
||||
if err := l.openNew(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
// Check for the last contact and rotate if necessary
|
||||
if err := l.rotate(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
l.BytesWritten += int64(len(b))
|
||||
return l.FileInfo.Write(b)
|
||||
}
|
146
vendor/github.com/hashicorp/consul/logging/logger.go
generated
vendored
Normal file
146
vendor/github.com/hashicorp/consul/logging/logger.go
generated
vendored
Normal file
|
@ -0,0 +1,146 @@
|
|||
package logging
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
gsyslog "github.com/hashicorp/go-syslog"
|
||||
"github.com/mitchellh/cli"
|
||||
)
|
||||
|
||||
// Config is used to set up logging.
|
||||
type Config struct {
|
||||
// LogLevel is the minimum level to be logged.
|
||||
LogLevel string
|
||||
|
||||
// LogJSON controls outputing logs in a JSON format.
|
||||
LogJSON bool
|
||||
|
||||
// Name is the name the returned logger will use to prefix log lines.
|
||||
Name string
|
||||
|
||||
// EnableSyslog controls forwarding to syslog.
|
||||
EnableSyslog bool
|
||||
|
||||
// SyslogFacility is the destination for syslog forwarding.
|
||||
SyslogFacility string
|
||||
|
||||
//LogFilePath is the path to write the logs to the user specified file.
|
||||
LogFilePath string
|
||||
|
||||
//LogRotateDuration is the user specified time to rotate logs
|
||||
LogRotateDuration time.Duration
|
||||
|
||||
//LogRotateBytes is the user specified byte limit to rotate logs
|
||||
LogRotateBytes int
|
||||
|
||||
//LogRotateMaxFiles is the maximum number of past archived log files to keep
|
||||
LogRotateMaxFiles int
|
||||
}
|
||||
|
||||
const (
|
||||
// defaultRotateDuration is the default time taken by the agent to rotate logs
|
||||
defaultRotateDuration = 24 * time.Hour
|
||||
)
|
||||
|
||||
var (
|
||||
logRotateDuration time.Duration
|
||||
logRotateBytes int
|
||||
)
|
||||
|
||||
// Setup is used to perform setup of several logging objects:
|
||||
//
|
||||
// * A hclog.Logger is used to perform filtering by log level and write to io.Writer.
|
||||
// * A GatedWriter is used to buffer logs until startup UI operations are
|
||||
// complete. After this is flushed then logs flow directly to output
|
||||
// destinations.
|
||||
// * An io.Writer is provided as the sink for all logs to flow to.
|
||||
//
|
||||
// The provided ui object will get any log messages related to setting up
|
||||
// logging itself, and will also be hooked up to the gated logger. The final bool
|
||||
// parameter indicates if logging was set up successfully.
|
||||
func Setup(config *Config, ui cli.Ui) (hclog.InterceptLogger, *GatedWriter, io.Writer, bool) {
|
||||
// The gated writer buffers logs at startup and holds until it's flushed.
|
||||
logGate := &GatedWriter{
|
||||
Writer: &cli.UiWriter{Ui: ui},
|
||||
}
|
||||
|
||||
if !ValidateLogLevel(config.LogLevel) {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Invalid log level: %s. Valid log levels are: %v",
|
||||
config.LogLevel, allowedLogLevels))
|
||||
return nil, nil, nil, false
|
||||
}
|
||||
|
||||
// Set up syslog if it's enabled.
|
||||
var syslog io.Writer
|
||||
if config.EnableSyslog {
|
||||
retries := 12
|
||||
delay := 5 * time.Second
|
||||
for i := 0; i <= retries; i++ {
|
||||
l, err := gsyslog.NewLogger(gsyslog.LOG_NOTICE, config.SyslogFacility, "consul")
|
||||
if err == nil {
|
||||
syslog = &SyslogWrapper{l}
|
||||
break
|
||||
}
|
||||
|
||||
ui.Error(fmt.Sprintf("Syslog setup error: %v", err))
|
||||
if i == retries {
|
||||
timeout := time.Duration(retries) * delay
|
||||
ui.Error(fmt.Sprintf("Syslog setup did not succeed within timeout (%s).", timeout.String()))
|
||||
return nil, nil, nil, false
|
||||
}
|
||||
|
||||
ui.Error(fmt.Sprintf("Retrying syslog setup in %s...", delay.String()))
|
||||
time.Sleep(delay)
|
||||
}
|
||||
}
|
||||
writers := []io.Writer{logGate}
|
||||
|
||||
var logOutput io.Writer
|
||||
if syslog != nil {
|
||||
writers = append(writers, syslog)
|
||||
}
|
||||
|
||||
// Create a file logger if the user has specified the path to the log file
|
||||
if config.LogFilePath != "" {
|
||||
dir, fileName := filepath.Split(config.LogFilePath)
|
||||
// If a path is provided but has no fileName a default is provided.
|
||||
if fileName == "" {
|
||||
fileName = "consul.log"
|
||||
}
|
||||
// Try to enter the user specified log rotation duration first
|
||||
if config.LogRotateDuration != 0 {
|
||||
logRotateDuration = config.LogRotateDuration
|
||||
} else {
|
||||
// Default to 24 hrs if no rotation period is specified
|
||||
logRotateDuration = defaultRotateDuration
|
||||
}
|
||||
// User specified byte limit for log rotation if one is provided
|
||||
if config.LogRotateBytes != 0 {
|
||||
logRotateBytes = config.LogRotateBytes
|
||||
}
|
||||
logFile := &LogFile{
|
||||
fileName: fileName,
|
||||
logPath: dir,
|
||||
duration: logRotateDuration,
|
||||
MaxBytes: logRotateBytes,
|
||||
MaxFiles: config.LogRotateMaxFiles,
|
||||
}
|
||||
writers = append(writers, logFile)
|
||||
}
|
||||
|
||||
logOutput = io.MultiWriter(writers...)
|
||||
|
||||
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
||||
Level: LevelFromString(config.LogLevel),
|
||||
Name: config.Name,
|
||||
Output: logOutput,
|
||||
JSONFormat: config.LogJSON,
|
||||
})
|
||||
|
||||
return logger, logGate, logOutput, true
|
||||
}
|
50
vendor/github.com/hashicorp/consul/logging/names.go
generated
vendored
Normal file
50
vendor/github.com/hashicorp/consul/logging/names.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
package logging
|
||||
|
||||
const (
|
||||
ACL string = "acl"
|
||||
Agent string = "agent"
|
||||
AntiEntropy string = "anti_entropy"
|
||||
AutoEncrypt string = "auto_encrypt"
|
||||
Autopilot string = "autopilot"
|
||||
AWS string = "aws"
|
||||
Azure string = "azure"
|
||||
CA string = "ca"
|
||||
CentralConfig string = "central_config"
|
||||
ConfigEntry string = "config_entry"
|
||||
Connect string = "connect"
|
||||
Consul string = "consul"
|
||||
ConsulClient string = "client"
|
||||
ConsulServer string = "server"
|
||||
Coordinate string = "coordinate"
|
||||
DNS string = "dns"
|
||||
Envoy string = "envoy"
|
||||
FSM string = "fsm"
|
||||
HTTP string = "http"
|
||||
Intentions string = "intentions"
|
||||
Internal string = "internal"
|
||||
KV string = "kvs"
|
||||
LAN string = "lan"
|
||||
Leader string = "leader"
|
||||
Legacy string = "legacy"
|
||||
License string = "license"
|
||||
Manager string = "manager"
|
||||
Memberlist string = "memberlist"
|
||||
MeshGateway string = "mesh_gateway"
|
||||
Namespace string = "namespace"
|
||||
Operator string = "operator"
|
||||
PreparedQuery string = "prepared_query"
|
||||
Proxy string = "proxy"
|
||||
ProxyConfig string = "proxycfg"
|
||||
Raft string = "raft"
|
||||
Replication string = "replication"
|
||||
Router string = "router"
|
||||
RPC string = "rpc"
|
||||
Serf string = "serf"
|
||||
Session string = "session"
|
||||
Sentinel string = "sentinel"
|
||||
Snapshot string = "snapshot"
|
||||
TLSUtil string = "tlsutil"
|
||||
Transaction string = "txn"
|
||||
WAN string = "wan"
|
||||
Watch string = "watch"
|
||||
)
|
50
vendor/github.com/hashicorp/consul/logging/syslog.go
generated
vendored
Normal file
50
vendor/github.com/hashicorp/consul/logging/syslog.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
package logging
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/hashicorp/go-syslog"
|
||||
)
|
||||
|
||||
// levelPriority is used to map a log level to a
|
||||
// syslog priority level
|
||||
var levelPriority = map[string]gsyslog.Priority{
|
||||
"TRACE": gsyslog.LOG_DEBUG,
|
||||
"DEBUG": gsyslog.LOG_INFO,
|
||||
"INFO": gsyslog.LOG_NOTICE,
|
||||
"WARN": gsyslog.LOG_WARNING,
|
||||
"ERROR": gsyslog.LOG_ERR,
|
||||
"CRIT": gsyslog.LOG_CRIT,
|
||||
}
|
||||
|
||||
// SyslogWrapper is used to cleanup log messages before
|
||||
// writing them to a Syslogger. Implements the io.Writer
|
||||
// interface.
|
||||
type SyslogWrapper struct {
|
||||
l gsyslog.Syslogger
|
||||
}
|
||||
|
||||
// Write is used to implement io.Writer
|
||||
func (s *SyslogWrapper) Write(p []byte) (int, error) {
|
||||
// Extract log level
|
||||
var level string
|
||||
afterLevel := p
|
||||
x := bytes.IndexByte(p, '[')
|
||||
if x >= 0 {
|
||||
y := bytes.IndexByte(p[x:], ']')
|
||||
if y >= 0 {
|
||||
level = string(p[x+1 : x+y])
|
||||
afterLevel = p[x+y+2:]
|
||||
}
|
||||
}
|
||||
|
||||
// Each log level will be handled by a specific syslog priority
|
||||
priority, ok := levelPriority[level]
|
||||
if !ok {
|
||||
priority = gsyslog.LOG_NOTICE
|
||||
}
|
||||
|
||||
// Attempt the write
|
||||
err := s.l.WriteLevel(priority, afterLevel)
|
||||
return len(p), err
|
||||
}
|
3
vendor/vendor.json
vendored
3
vendor/vendor.json
vendored
|
@ -205,11 +205,12 @@
|
|||
{"path":"github.com/hashicorp/consul-template/template","checksumSHA1":"/AjvyyxEZXksXgxm1gmdJdJoXkw=","revision":"f04989c64e9bd4c49a7217ac4635732dd8e0bb26","revisionTime":"2019-11-08T20:12:44Z","version":"v0.22.1","versionExact":"v0.22.1"},
|
||||
{"path":"github.com/hashicorp/consul-template/version","checksumSHA1":"CqEejkuDiTgPVrLg0xrMmAWvNwY=","revision":"f04989c64e9bd4c49a7217ac4635732dd8e0bb26","revisionTime":"2019-11-08T20:12:44Z","version":"v0.22.1","versionExact":"v0.22.1"},
|
||||
{"path":"github.com/hashicorp/consul-template/watch","checksumSHA1":"cBIJewG416sFREUenIUK9v3zrUk=","revision":"f04989c64e9bd4c49a7217ac4635732dd8e0bb26","revisionTime":"2019-11-08T20:12:44Z","version":"v0.22.1","versionExact":"v0.22.1"},
|
||||
{"path":"github.com/hashicorp/consul/agent/consul/autopilot","checksumSHA1":"+I7fgoQlrnTUGW5krqNLadWwtjg=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
||||
{"path":"github.com/hashicorp/consul/agent/consul/autopilot","checksumSHA1":"lu2pzUDqU0jwul48T6IkTZK9Gxc=","revision":"b137060630b463d7ad5360f0d8f32f9347ae3b7d","revisionTime":"2020-02-13T19:55:27Z"},
|
||||
{"path":"github.com/hashicorp/consul/api","checksumSHA1":"7JPBtnIgLkdcJ0ldXMTEnVjNEjA=","revision":"40cec98468b829e5cdaacb0629b3e23a028db688","revisionTime":"2019-05-22T20:19:12Z"},
|
||||
{"path":"github.com/hashicorp/consul/command/flags","checksumSHA1":"soNN4xaHTbeXFgNkZ7cX0gbFXQk=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
||||
{"path":"github.com/hashicorp/consul/lib","checksumSHA1":"Nrh9BhiivRyJiuPzttstmq9xl/w=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
||||
{"path":"github.com/hashicorp/consul/lib/freeport","checksumSHA1":"E28E4zR1FN2v1Xiq4FUER7KVN9M=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
||||
{"path":"github.com/hashicorp/consul/logging","checksumSHA1":"xddCR1b2SU/XPi3UrLGz7Ns5HiQ=","revision":"b137060630b463d7ad5360f0d8f32f9347ae3b7d","revisionTime":"2020-02-13T19:55:27Z"},
|
||||
{"path":"github.com/hashicorp/consul/test/porter","checksumSHA1":"5XjgqE4UIfwXvkq5VssGNc7uPhQ=","revision":"ad9425ca6353b8afcfebd19130a8cf768f7eac30","revisionTime":"2017-10-21T00:05:25Z"},
|
||||
{"path":"github.com/hashicorp/consul/testutil","checksumSHA1":"T4CeQD+QRsjf1BJ1n7FSojS5zDQ=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
||||
{"path":"github.com/hashicorp/consul/testutil/retry","checksumSHA1":"SCb2b91UYiB/23+SNDBlU5OZfFA=","revision":"fb848fc48818f58690db09d14640513aa6bf3c02","revisionTime":"2018-04-13T17:05:42Z"},
|
||||
|
|
Loading…
Reference in a new issue