Merge Consul OSS branch 'master' at commit ef257b084d2e2a474889518440515e360d0cd990

This commit is contained in:
hashicorp-ci 2019-07-20 02:00:29 +00:00
commit 8b109e5f9f
19 changed files with 191 additions and 127 deletions

View File

@ -67,7 +67,7 @@ func (probe *GrpcHealthProbe) Check() error {
if err != nil {
return err
}
if response == nil || (response != nil && response.Status != hv1.HealthCheckResponse_SERVING) {
if response == nil || response.Status != hv1.HealthCheckResponse_SERVING {
return ErrGRPCUnhealthy
}

View File

@ -846,6 +846,7 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) {
LogFile: b.stringVal(c.LogFile),
LogRotateBytes: b.intVal(c.LogRotateBytes),
LogRotateDuration: b.durationVal("log_rotate_duration", c.LogRotateDuration),
LogRotateMaxFiles: b.intVal(c.LogRotateMaxFiles),
NodeID: types.NodeID(b.stringVal(c.NodeID)),
NodeMeta: c.NodeMeta,
NodeName: b.nodeName(c.NodeName),

View File

@ -228,6 +228,7 @@ type Config struct {
LogFile *string `json:"log_file,omitempty" hcl:"log_file" mapstructure:"log_file"`
LogRotateDuration *string `json:"log_rotate_duration,omitempty" hcl:"log_rotate_duration" mapstructure:"log_rotate_duration"`
LogRotateBytes *int `json:"log_rotate_bytes,omitempty" hcl:"log_rotate_bytes" mapstructure:"log_rotate_bytes"`
LogRotateMaxFiles *int `json:"log_rotate_max_files,omitempty" hcl:"log_rotate_max_files" mapstructure:"log_rotate_max_files"`
NodeID *string `json:"node_id,omitempty" hcl:"node_id" mapstructure:"node_id"`
NodeMeta map[string]string `json:"node_meta,omitempty" hcl:"node_meta" mapstructure:"node_meta"`
NodeName *string `json:"node_name,omitempty" hcl:"node_name" mapstructure:"node_name"`

View File

@ -84,6 +84,7 @@ func AddFlags(fs *flag.FlagSet, f *Flags) {
add(&f.Config.LogFile, "log-file", "Path to the file the logs get written to")
add(&f.Config.LogRotateBytes, "log-rotate-bytes", "Maximum number of bytes that should be written to a log file")
add(&f.Config.LogRotateDuration, "log-rotate-duration", "Time after which log rotation needs to be performed")
add(&f.Config.LogRotateMaxFiles, "log-rotate-max-files", "Maximum number of log file archives to keep")
add(&f.Config.NodeName, "node", "Name of this node. Must be unique in the cluster.")
add(&f.Config.NodeID, "node-id", "A unique ID for this node across space and time. Defaults to a randomly-generated ID that persists in the data-dir.")
add(&f.Config.NodeMeta, "node-meta", "An arbitrary metadata key/value pair for this node, of the format `key:value`. Can be specified multiple times.")

View File

@ -865,6 +865,12 @@ type RuntimeConfig struct {
// flags: -log-rotate-bytes int
LogRotateBytes int
// LogRotateMaxFiles is the maximum number of log file archives to keep
//
// hcl: log_rotate_max_files = int
// flags: -log-rotate-max-files int
LogRotateMaxFiles int
// Node ID is a unique ID for this node across space and time. Defaults
// to a randomly-generated ID that persists in the data-dir.
//

View File

@ -514,6 +514,19 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
rt.DataDir = dataDir
},
},
{
desc: "-log-rotate-max-files",
args: []string{
`-log-rotate-max-files=2`,
`-data-dir=` + dataDir,
},
json: []string{`{ "log_rotate_max_files": 2 }`},
hcl: []string{`log_rotate_max_files = 2`},
patch: func(rt *RuntimeConfig) {
rt.LogRotateMaxFiles = 2
rt.DataDir = dataDir
},
},
{
desc: "-node",
args: []string{
@ -5860,6 +5873,7 @@ func TestSanitize(t *testing.T) {
"LogFile": "",
"LogRotateBytes": 0,
"LogRotateDuration": "0s",
"LogRotateMaxFiles": 0,
"NodeID": "",
"NodeMeta": {},
"NodeName": "",
@ -6239,92 +6253,6 @@ func TestRuntime_ToTLSUtilConfig(t *testing.T) {
require.Equal(t, []uint16{tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305}, r.CipherSuites)
}
func TestReadPath(t *testing.T) {
dataDir := testutil.TempDir(t, "consul")
defer os.RemoveAll(dataDir)
tt := []struct {
name string
pre func()
args []string
expect int
}{
{
name: "dir skip non json or hcl if config-format not set",
pre: func() {
writeFile(filepath.Join(dataDir, "conf.d/conf.json"), []byte(`{}`))
writeFile(filepath.Join(dataDir, "conf.d/conf.foobar"), []byte(`{}`))
},
args: []string{
`-config-dir`, filepath.Join(dataDir, "conf.d"),
},
expect: 1,
},
{
name: "dir read non json or hcl if config-format set",
pre: func() {
writeFile(filepath.Join(dataDir, "conf.d/conf.json"), []byte(`{}`))
writeFile(filepath.Join(dataDir, "conf.d/conf.foobar"), []byte(`{}`))
},
args: []string{
`-config-dir`, filepath.Join(dataDir, "conf.d"),
`-config-format`, "json",
},
expect: 2,
},
{
name: "file skip non json or hcl if config-format not set",
pre: func() {
writeFile(filepath.Join(dataDir, "conf.d/conf.foobar"), []byte(`{}`))
},
args: []string{
`-config-file`, filepath.Join(dataDir, "conf.d"),
},
expect: 0,
},
{
name: "file read non json or hcl if config-format set",
pre: func() {
writeFile(filepath.Join(dataDir, "conf.d/conf.foobar"), []byte(`{}`))
},
args: []string{
`-config-file`, filepath.Join(dataDir, "conf.d"),
`-config-format`, "json",
},
expect: 1,
},
}
for _, tc := range tt {
cleanDir(dataDir)
t.Run(tc.name, func(t *testing.T) {
flags := Flags{}
fs := flag.NewFlagSet("", flag.ContinueOnError)
AddFlags(fs, &flags)
err := fs.Parse(tc.args)
if err != nil {
t.Fatalf("ParseFlags failed: %s", err)
}
flags.Args = fs.Args()
// write cfg files
tc.pre()
// Then create a builder with the flags.
b, err := NewBuilder(flags)
if err != nil {
t.Fatal("NewBuilder", err)
}
got := len(b.Sources)
if tc.expect != got {
t.Fatalf("expected %d sources, got %d", tc.expect, got)
}
})
}
}
func Test_UIPathBuilder(t *testing.T) {
cases := []struct {
name string

View File

@ -602,7 +602,7 @@ func (a *ACL) tokenSetInternal(args *structs.ACLTokenSetRequest, reply *structs.
}
// Don't check expiration times here as it doesn't really matter.
if _, updatedToken, err := a.srv.fsm.State().ACLTokenGetByAccessor(nil, token.AccessorID); err == nil && token != nil {
if _, updatedToken, err := a.srv.fsm.State().ACLTokenGetByAccessor(nil, token.AccessorID); err == nil && updatedToken != nil {
*reply = *updatedToken
} else {
return fmt.Errorf("Failed to retrieve the token after insertion")

View File

@ -90,7 +90,7 @@ func (r *aclTokenReplicator) DeleteLocalBatch(srv *Server, batch []string) error
if err != nil {
return err
}
if respErr, ok := resp.(error); ok && err != nil {
if respErr, ok := resp.(error); ok {
return respErr
}
return nil
@ -119,7 +119,7 @@ func (r *aclTokenReplicator) UpdateLocalBatch(ctx context.Context, srv *Server,
if err != nil {
return err
}
if respErr, ok := resp.(error); ok && err != nil {
if respErr, ok := resp.(error); ok {
return respErr
}
@ -202,7 +202,7 @@ func (r *aclPolicyReplicator) DeleteLocalBatch(srv *Server, batch []string) erro
if err != nil {
return err
}
if respErr, ok := resp.(error); ok && err != nil {
if respErr, ok := resp.(error); ok {
return respErr
}
return nil

View File

@ -2728,7 +2728,7 @@ func TestStateStore_ACLRole_Delete(t *testing.T) {
t.Parallel()
s := testACLStateStore(t)
// deletion of non-existant roles is not an error
// deletion of non-existent roles is not an error
require.NoError(t, s.ACLRoleDeleteByName(3, "not-found"))
require.NoError(t, s.ACLRoleDeleteByID(3, testRoleID_A))
})
@ -3021,7 +3021,7 @@ func TestStateStore_ACLAuthMethod_Delete(t *testing.T) {
t.Parallel()
s := testACLStateStore(t)
// deletion of non-existant methods is not an error
// deletion of non-existent methods is not an error
require.NoError(t, s.ACLAuthMethodDeleteByName(3, "not-found"))
})
}
@ -3453,7 +3453,7 @@ func TestStateStore_ACLBindingRule_Delete(t *testing.T) {
t.Parallel()
s := testACLStateStore(t)
// deletion of non-existant rules is not an error
// deletion of non-existent rules is not an error
require.NoError(t, s.ACLBindingRuleDeleteByID(3, "ed3ce1b8-3a16-4e2f-b82e-f92e3b92410d"))
})
}

View File

@ -3884,7 +3884,7 @@ func TestStateStore_ensureServiceCASTxn(t *testing.T) {
require.NoError(t, err)
tx.Commit()
// ensure no update happend
// ensure no update happened
tx = s.db.Txn(false)
_, nsRead, err = s.NodeService("node1", "foo")
require.NoError(t, err)

View File

@ -135,7 +135,7 @@ func (s *HTTPServer) convertOps(resp http.ResponseWriter, req *http.Request) (st
}
netKVSize += size
verb := api.KVOp(in.KV.Verb)
verb := in.KV.Verb
if isWrite(verb) {
writes++
}

View File

@ -141,31 +141,29 @@ func (s *Server) makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot) (*envoy.Cluste
return makeClusterFromUserConfig(cfg.LocalClusterJSON)
}
if c == nil {
addr := cfgSnap.Proxy.LocalServiceAddress
if addr == "" {
addr = "127.0.0.1"
}
c = &envoy.Cluster{
Name: LocalAppClusterName,
ConnectTimeout: time.Duration(cfg.LocalConnectTimeoutMs) * time.Millisecond,
ClusterDiscoveryType: &envoy.Cluster_Type{Type: envoy.Cluster_STATIC},
LoadAssignment: &envoy.ClusterLoadAssignment{
ClusterName: LocalAppClusterName,
Endpoints: []envoyendpoint.LocalityLbEndpoints{
{
LbEndpoints: []envoyendpoint.LbEndpoint{
makeEndpoint(LocalAppClusterName,
addr,
cfgSnap.Proxy.LocalServicePort),
},
addr := cfgSnap.Proxy.LocalServiceAddress
if addr == "" {
addr = "127.0.0.1"
}
c = &envoy.Cluster{
Name: LocalAppClusterName,
ConnectTimeout: time.Duration(cfg.LocalConnectTimeoutMs) * time.Millisecond,
ClusterDiscoveryType: &envoy.Cluster_Type{Type: envoy.Cluster_STATIC},
LoadAssignment: &envoy.ClusterLoadAssignment{
ClusterName: LocalAppClusterName,
Endpoints: []envoyendpoint.LocalityLbEndpoints{
{
LbEndpoints: []envoyendpoint.LbEndpoint{
makeEndpoint(LocalAppClusterName,
addr,
cfgSnap.Proxy.LocalServicePort),
},
},
},
}
if cfg.Protocol == "http2" || cfg.Protocol == "grpc" {
c.Http2ProtocolOptions = &envoycore.Http2ProtocolOptions{}
}
},
}
if cfg.Protocol == "http2" || cfg.Protocol == "grpc" {
c.Http2ProtocolOptions = &envoycore.Http2ProtocolOptions{}
}
return c, err

View File

@ -194,6 +194,7 @@ func (c *cmd) run(args []string) int {
LogFilePath: config.LogFile,
LogRotateDuration: config.LogRotateDuration,
LogRotateBytes: config.LogRotateBytes,
LogRotateMaxFiles: config.LogRotateMaxFiles,
}
logFilter, logGate, logWriter, logOutput, ok := logger.Setup(logConfig, c.UI)
if !ok {

View File

@ -165,7 +165,7 @@ func TestKVPutCommand_Base64(t *testing.T) {
t.Fatal(err)
}
if !bytes.Equal(data.Value, []byte(expected)) {
if !bytes.Equal(data.Value, expected) {
t.Errorf("bad: %#v, %s", data.Value, data.Value)
}
}

View File

@ -1,6 +1,7 @@
package logger
import (
"fmt"
"os"
"path/filepath"
"strconv"
@ -41,11 +42,14 @@ type LogFile struct {
//BytesWritten is the number of bytes written in the current log file
BytesWritten int64
// Max rotated files to keep before removing them.
MaxFiles int
//acquire is the mutex utilized to ensure we have no concurrency issues
acquire sync.Mutex
}
func (l *LogFile) openNew() error {
func (l *LogFile) fileNamePattern() string {
// Extract the file extension
fileExt := filepath.Ext(l.fileName)
// If we have no file extension we append .log
@ -53,16 +57,21 @@ func (l *LogFile) openNew() error {
fileExt = ".log"
}
// Remove the file extension from the filename
fileName := strings.TrimSuffix(l.fileName, fileExt)
return strings.TrimSuffix(l.fileName, fileExt) + "-%s" + fileExt
}
func (l *LogFile) openNew() error {
fileNamePattern := l.fileNamePattern()
// New file name has the format : filename-timestamp.extension
createTime := now()
newfileName := fileName + "-" + strconv.FormatInt(createTime.UnixNano(), 10) + fileExt
newfileName := fmt.Sprintf(fileNamePattern, strconv.FormatInt(createTime.UnixNano(), 10))
newfilePath := filepath.Join(l.logPath, newfileName)
// Try creating a file. We truncate the file because we are the only authority to write the logs
filePointer, err := os.OpenFile(newfilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0640)
if err != nil {
return err
}
l.FileInfo = filePointer
// New file, new bytes tracker, new creation time :)
l.LastCreated = createTime
@ -76,11 +85,35 @@ func (l *LogFile) rotate() error {
// Rotate if we hit the byte file limit or the time limit
if (l.BytesWritten >= int64(l.MaxBytes) && (l.MaxBytes > 0)) || timeElapsed >= l.duration {
l.FileInfo.Close()
if err := l.pruneFiles(); err != nil {
return err
}
return l.openNew()
}
return nil
}
func (l *LogFile) pruneFiles() error {
if l.MaxFiles == 0 {
return nil
}
pattern := l.fileNamePattern()
//get all the files that match the log file pattern
globExpression := filepath.Join(l.logPath, fmt.Sprintf(pattern, "*"))
matches, err := filepath.Glob(globExpression)
if err != nil {
return err
}
// Prune if there are more files stored than the configured max
stale := len(matches) - l.MaxFiles
for i := 0; i < stale; i++ {
if err := os.Remove(matches[i]); err != nil {
return err
}
}
return nil
}
// Write is used to implement io.Writer
func (l *LogFile) Write(b []byte) (n int, err error) {
// Filter out log entries that do not match log level criteria

View File

@ -3,6 +3,7 @@ package logger
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
@ -21,7 +22,12 @@ func TestLogFile_timeRotation(t *testing.T) {
tempDir := testutil.TempDir(t, "LogWriterTime")
defer os.Remove(tempDir)
filt := LevelFilter()
logFile := LogFile{logFilter: filt, fileName: testFileName, logPath: tempDir, duration: testDuration}
logFile := LogFile{
logFilter: filt,
fileName: testFileName,
logPath: tempDir,
duration: testDuration,
}
logFile.Write([]byte("Hello World"))
time.Sleep(2 * time.Second)
logFile.Write([]byte("Second File"))
@ -51,7 +57,13 @@ func TestLogFile_byteRotation(t *testing.T) {
defer os.Remove(tempDir)
filt := LevelFilter()
filt.MinLevel = logutils.LogLevel("INFO")
logFile := LogFile{logFilter: filt, fileName: testFileName, logPath: tempDir, MaxBytes: testBytes, duration: 24 * time.Hour}
logFile := LogFile{
logFilter: filt,
fileName: testFileName,
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
}
logFile.Write([]byte("Hello World"))
logFile.Write([]byte("Second File"))
want := 2
@ -66,7 +78,13 @@ func TestLogFile_logLevelFiltering(t *testing.T) {
tempDir := testutil.TempDir(t, "LogWriterTime")
defer os.Remove(tempDir)
filt := LevelFilter()
logFile := LogFile{logFilter: filt, fileName: testFileName, logPath: tempDir, MaxBytes: testBytes, duration: testDuration}
logFile := LogFile{
logFilter: filt,
fileName: testFileName,
logPath: tempDir,
MaxBytes: testBytes,
duration: testDuration,
}
logFile.Write([]byte("[INFO] This is an info message"))
logFile.Write([]byte("[DEBUG] This is a debug message"))
logFile.Write([]byte("[ERR] This is an error message"))
@ -75,3 +93,68 @@ func TestLogFile_logLevelFiltering(t *testing.T) {
t.Errorf("Expected %d files, got %v file(s)", want, len(got))
}
}
func TestLogFile_deleteArchives(t *testing.T) {
t.Parallel()
tempDir := testutil.TempDir(t, "LogWriteDeleteArchives")
defer os.Remove(tempDir)
filt := LevelFilter()
filt.MinLevel = logutils.LogLevel("INFO")
logFile := LogFile{
logFilter: filt,
fileName: testFileName,
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
MaxFiles: 1,
}
logFile.Write([]byte("[INFO] Hello World"))
logFile.Write([]byte("[INFO] Second File"))
logFile.Write([]byte("[INFO] Third File"))
want := 2
tempFiles, _ := ioutil.ReadDir(tempDir)
if got := tempFiles; len(got) != want {
t.Errorf("Expected %d files, got %v file(s)", want, len(got))
return
}
for _, tempFile := range tempFiles {
var bytes []byte
var err error
path := filepath.Join(tempDir, tempFile.Name())
if bytes, err = ioutil.ReadFile(path); err != nil {
t.Errorf(err.Error())
return
}
contents := string(bytes)
if contents == "[INFO] Hello World" {
t.Errorf("Should have deleted the eldest log file")
return
}
}
}
func TestLogFile_deleteArchivesDisabled(t *testing.T) {
t.Parallel()
tempDir := testutil.TempDir(t, "LogWriteDeleteArchivesDisabled")
defer os.Remove(tempDir)
filt := LevelFilter()
filt.MinLevel = logutils.LogLevel("INFO")
logFile := LogFile{
logFilter: filt,
fileName: testFileName,
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
MaxFiles: 0,
}
logFile.Write([]byte("[INFO] Hello World"))
logFile.Write([]byte("[INFO] Second File"))
logFile.Write([]byte("[INFO] Third File"))
want := 3
tempFiles, _ := ioutil.ReadDir(tempDir)
if got := tempFiles; len(got) != want {
t.Errorf("Expected %d files, got %v file(s)", want, len(got))
return
}
}

View File

@ -31,6 +31,9 @@ type Config struct {
//LogRotateBytes is the user specified byte limit to rotate logs
LogRotateBytes int
//LogRotateMaxFiles is the maximum number of past archived log files to keep
LogRotateMaxFiles int
}
const (
@ -123,7 +126,14 @@ func Setup(config *Config, ui cli.Ui) (*logutils.LevelFilter, *GatedWriter, *Log
if config.LogRotateBytes != 0 {
logRotateBytes = config.LogRotateBytes
}
logFile := &LogFile{logFilter: logFilter, fileName: fileName, logPath: dir, duration: logRotateDuration, MaxBytes: logRotateBytes}
logFile := &LogFile{
logFilter: logFilter,
fileName: fileName,
logPath: dir,
duration: logRotateDuration,
MaxBytes: logRotateBytes,
MaxFiles: config.LogRotateMaxFiles,
}
writers = append(writers, logFile)
}

View File

@ -273,6 +273,8 @@ will exit with an error at startup.
* <a name="_log_rotate_duration"></a><a href="#_log_rotate_duration">`-log-rotate-duration`</a> - to specify the maximum duration a log should be written to before it needs to be rotated. Must be a duration value such as 30s. Defaults to 24h.
* <a name="_log_rotate_max_files"></a><a href="#_log_rotate_max_files">`-log-rotate-max-files`</a> - to specify the maximum number of older log file archives to keep. Defaults to 0 (no files are ever deleted). Set to -1 to disable rotation and discard all log files.
* <a name="_join"></a><a href="#_join">`-join`</a> - Address of another agent
to join upon starting up. This can be
specified multiple times to specify multiple agents to join. If Consul is

View File

@ -12,7 +12,7 @@ A Connect-aware proxy enables unmodified applications to use Connect. A
per-service proxy sidecar transparently handles inbound and outbound service
connections, automatically wrapping and verifying TLS connections. Consul
includes its own built-in L4 proxy and has first class support for Envoy. You
can chose other proxies to plug in as well. This section describes how to
can choose other proxies to plug in as well. This section describes how to
configure Envoy or the built-in proxy using Connect, and how to integrate the
proxy of your choice.