log rotation: limit count of rotated log files (#5831)

This commit is contained in:
javicrespo 2019-07-19 23:36:34 +02:00 committed by Freddy
parent 46542b2ff7
commit d4f3eebf9d
10 changed files with 159 additions and 93 deletions

View File

@ -846,6 +846,7 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) {
LogFile: b.stringVal(c.LogFile), LogFile: b.stringVal(c.LogFile),
LogRotateBytes: b.intVal(c.LogRotateBytes), LogRotateBytes: b.intVal(c.LogRotateBytes),
LogRotateDuration: b.durationVal("log_rotate_duration", c.LogRotateDuration), LogRotateDuration: b.durationVal("log_rotate_duration", c.LogRotateDuration),
LogRotateMaxFiles: b.intVal(c.LogRotateMaxFiles),
NodeID: types.NodeID(b.stringVal(c.NodeID)), NodeID: types.NodeID(b.stringVal(c.NodeID)),
NodeMeta: c.NodeMeta, NodeMeta: c.NodeMeta,
NodeName: b.nodeName(c.NodeName), NodeName: b.nodeName(c.NodeName),

View File

@ -226,6 +226,7 @@ type Config struct {
LogFile *string `json:"log_file,omitempty" hcl:"log_file" mapstructure:"log_file"` LogFile *string `json:"log_file,omitempty" hcl:"log_file" mapstructure:"log_file"`
LogRotateDuration *string `json:"log_rotate_duration,omitempty" hcl:"log_rotate_duration" mapstructure:"log_rotate_duration"` LogRotateDuration *string `json:"log_rotate_duration,omitempty" hcl:"log_rotate_duration" mapstructure:"log_rotate_duration"`
LogRotateBytes *int `json:"log_rotate_bytes,omitempty" hcl:"log_rotate_bytes" mapstructure:"log_rotate_bytes"` LogRotateBytes *int `json:"log_rotate_bytes,omitempty" hcl:"log_rotate_bytes" mapstructure:"log_rotate_bytes"`
LogRotateMaxFiles *int `json:"log_rotate_max_files,omitempty" hcl:"log_rotate_max_files" mapstructure:"log_rotate_max_files"`
NodeID *string `json:"node_id,omitempty" hcl:"node_id" mapstructure:"node_id"` NodeID *string `json:"node_id,omitempty" hcl:"node_id" mapstructure:"node_id"`
NodeMeta map[string]string `json:"node_meta,omitempty" hcl:"node_meta" mapstructure:"node_meta"` NodeMeta map[string]string `json:"node_meta,omitempty" hcl:"node_meta" mapstructure:"node_meta"`
NodeName *string `json:"node_name,omitempty" hcl:"node_name" mapstructure:"node_name"` NodeName *string `json:"node_name,omitempty" hcl:"node_name" mapstructure:"node_name"`

View File

@ -84,6 +84,7 @@ func AddFlags(fs *flag.FlagSet, f *Flags) {
add(&f.Config.LogFile, "log-file", "Path to the file the logs get written to") add(&f.Config.LogFile, "log-file", "Path to the file the logs get written to")
add(&f.Config.LogRotateBytes, "log-rotate-bytes", "Maximum number of bytes that should be written to a log file") add(&f.Config.LogRotateBytes, "log-rotate-bytes", "Maximum number of bytes that should be written to a log file")
add(&f.Config.LogRotateDuration, "log-rotate-duration", "Time after which log rotation needs to be performed") add(&f.Config.LogRotateDuration, "log-rotate-duration", "Time after which log rotation needs to be performed")
add(&f.Config.LogRotateMaxFiles, "log-rotate-max-files", "Maximum number of log file archives to keep")
add(&f.Config.NodeName, "node", "Name of this node. Must be unique in the cluster.") add(&f.Config.NodeName, "node", "Name of this node. Must be unique in the cluster.")
add(&f.Config.NodeID, "node-id", "A unique ID for this node across space and time. Defaults to a randomly-generated ID that persists in the data-dir.") add(&f.Config.NodeID, "node-id", "A unique ID for this node across space and time. Defaults to a randomly-generated ID that persists in the data-dir.")
add(&f.Config.NodeMeta, "node-meta", "An arbitrary metadata key/value pair for this node, of the format `key:value`. Can be specified multiple times.") add(&f.Config.NodeMeta, "node-meta", "An arbitrary metadata key/value pair for this node, of the format `key:value`. Can be specified multiple times.")

View File

@ -865,6 +865,12 @@ type RuntimeConfig struct {
// flags: -log-rotate-bytes int // flags: -log-rotate-bytes int
LogRotateBytes int LogRotateBytes int
// LogRotateMaxFiles is the maximum number of log file archives to keep
//
// hcl: log_rotate_max_files = int
// flags: -log-rotate-max-files int
LogRotateMaxFiles int
// Node ID is a unique ID for this node across space and time. Defaults // Node ID is a unique ID for this node across space and time. Defaults
// to a randomly-generated ID that persists in the data-dir. // to a randomly-generated ID that persists in the data-dir.
// //

View File

@ -514,6 +514,19 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
rt.DataDir = dataDir rt.DataDir = dataDir
}, },
}, },
{
desc: "-log-rotate-max-files",
args: []string{
`-log-rotate-max-files=2`,
`-data-dir=` + dataDir,
},
json: []string{`{ "log_rotate_max_files": 2 }`},
hcl: []string{`log_rotate_max_files = 2`},
patch: func(rt *RuntimeConfig) {
rt.LogRotateMaxFiles = 2
rt.DataDir = dataDir
},
},
{ {
desc: "-node", desc: "-node",
args: []string{ args: []string{
@ -5324,6 +5337,7 @@ func TestSanitize(t *testing.T) {
"LogFile": "", "LogFile": "",
"LogRotateBytes": 0, "LogRotateBytes": 0,
"LogRotateDuration": "0s", "LogRotateDuration": "0s",
"LogRotateMaxFiles": 0,
"NodeID": "", "NodeID": "",
"NodeMeta": {}, "NodeMeta": {},
"NodeName": "", "NodeName": "",
@ -5702,92 +5716,6 @@ func TestRuntime_ToTLSUtilConfig(t *testing.T) {
require.Equal(t, []uint16{tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305}, r.CipherSuites) require.Equal(t, []uint16{tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305}, r.CipherSuites)
} }
func TestReadPath(t *testing.T) {
dataDir := testutil.TempDir(t, "consul")
defer os.RemoveAll(dataDir)
tt := []struct {
name string
pre func()
args []string
expect int
}{
{
name: "dir skip non json or hcl if config-format not set",
pre: func() {
writeFile(filepath.Join(dataDir, "conf.d/conf.json"), []byte(`{}`))
writeFile(filepath.Join(dataDir, "conf.d/conf.foobar"), []byte(`{}`))
},
args: []string{
`-config-dir`, filepath.Join(dataDir, "conf.d"),
},
expect: 1,
},
{
name: "dir read non json or hcl if config-format set",
pre: func() {
writeFile(filepath.Join(dataDir, "conf.d/conf.json"), []byte(`{}`))
writeFile(filepath.Join(dataDir, "conf.d/conf.foobar"), []byte(`{}`))
},
args: []string{
`-config-dir`, filepath.Join(dataDir, "conf.d"),
`-config-format`, "json",
},
expect: 2,
},
{
name: "file skip non json or hcl if config-format not set",
pre: func() {
writeFile(filepath.Join(dataDir, "conf.d/conf.foobar"), []byte(`{}`))
},
args: []string{
`-config-file`, filepath.Join(dataDir, "conf.d"),
},
expect: 0,
},
{
name: "file read non json or hcl if config-format set",
pre: func() {
writeFile(filepath.Join(dataDir, "conf.d/conf.foobar"), []byte(`{}`))
},
args: []string{
`-config-file`, filepath.Join(dataDir, "conf.d"),
`-config-format`, "json",
},
expect: 1,
},
}
for _, tc := range tt {
cleanDir(dataDir)
t.Run(tc.name, func(t *testing.T) {
flags := Flags{}
fs := flag.NewFlagSet("", flag.ContinueOnError)
AddFlags(fs, &flags)
err := fs.Parse(tc.args)
if err != nil {
t.Fatalf("ParseFlags failed: %s", err)
}
flags.Args = fs.Args()
// write cfg files
tc.pre()
// Then create a builder with the flags.
b, err := NewBuilder(flags)
if err != nil {
t.Fatal("NewBuilder", err)
}
got := len(b.Sources)
if tc.expect != got {
t.Fatalf("expected %d sources, got %d", tc.expect, got)
}
})
}
}
func Test_UIPathBuilder(t *testing.T) { func Test_UIPathBuilder(t *testing.T) {
cases := []struct { cases := []struct {
name string name string

View File

@ -194,6 +194,7 @@ func (c *cmd) run(args []string) int {
LogFilePath: config.LogFile, LogFilePath: config.LogFile,
LogRotateDuration: config.LogRotateDuration, LogRotateDuration: config.LogRotateDuration,
LogRotateBytes: config.LogRotateBytes, LogRotateBytes: config.LogRotateBytes,
LogRotateMaxFiles: config.LogRotateMaxFiles,
} }
logFilter, logGate, logWriter, logOutput, ok := logger.Setup(logConfig, c.UI) logFilter, logGate, logWriter, logOutput, ok := logger.Setup(logConfig, c.UI)
if !ok { if !ok {

View File

@ -1,6 +1,7 @@
package logger package logger
import ( import (
"fmt"
"os" "os"
"path/filepath" "path/filepath"
"strconv" "strconv"
@ -41,11 +42,14 @@ type LogFile struct {
//BytesWritten is the number of bytes written in the current log file //BytesWritten is the number of bytes written in the current log file
BytesWritten int64 BytesWritten int64
// Max rotated files to keep before removing them.
MaxFiles int
//acquire is the mutex utilized to ensure we have no concurrency issues //acquire is the mutex utilized to ensure we have no concurrency issues
acquire sync.Mutex acquire sync.Mutex
} }
func (l *LogFile) openNew() error { func (l *LogFile) fileNamePattern() string {
// Extract the file extension // Extract the file extension
fileExt := filepath.Ext(l.fileName) fileExt := filepath.Ext(l.fileName)
// If we have no file extension we append .log // If we have no file extension we append .log
@ -53,16 +57,21 @@ func (l *LogFile) openNew() error {
fileExt = ".log" fileExt = ".log"
} }
// Remove the file extension from the filename // Remove the file extension from the filename
fileName := strings.TrimSuffix(l.fileName, fileExt) return strings.TrimSuffix(l.fileName, fileExt) + "-%s" + fileExt
}
func (l *LogFile) openNew() error {
fileNamePattern := l.fileNamePattern()
// New file name has the format : filename-timestamp.extension // New file name has the format : filename-timestamp.extension
createTime := now() createTime := now()
newfileName := fileName + "-" + strconv.FormatInt(createTime.UnixNano(), 10) + fileExt newfileName := fmt.Sprintf(fileNamePattern, strconv.FormatInt(createTime.UnixNano(), 10))
newfilePath := filepath.Join(l.logPath, newfileName) newfilePath := filepath.Join(l.logPath, newfileName)
// Try creating a file. We truncate the file because we are the only authority to write the logs // Try creating a file. We truncate the file because we are the only authority to write the logs
filePointer, err := os.OpenFile(newfilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0640) filePointer, err := os.OpenFile(newfilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0640)
if err != nil { if err != nil {
return err return err
} }
l.FileInfo = filePointer l.FileInfo = filePointer
// New file, new bytes tracker, new creation time :) // New file, new bytes tracker, new creation time :)
l.LastCreated = createTime l.LastCreated = createTime
@ -76,11 +85,35 @@ func (l *LogFile) rotate() error {
// Rotate if we hit the byte file limit or the time limit // Rotate if we hit the byte file limit or the time limit
if (l.BytesWritten >= int64(l.MaxBytes) && (l.MaxBytes > 0)) || timeElapsed >= l.duration { if (l.BytesWritten >= int64(l.MaxBytes) && (l.MaxBytes > 0)) || timeElapsed >= l.duration {
l.FileInfo.Close() l.FileInfo.Close()
if err := l.pruneFiles(); err != nil {
return err
}
return l.openNew() return l.openNew()
} }
return nil return nil
} }
func (l *LogFile) pruneFiles() error {
if l.MaxFiles == 0 {
return nil
}
pattern := l.fileNamePattern()
//get all the files that match the log file pattern
globExpression := filepath.Join(l.logPath, fmt.Sprintf(pattern, "*"))
matches, err := filepath.Glob(globExpression)
if err != nil {
return err
}
// Prune if there are more files stored than the configured max
stale := len(matches) - l.MaxFiles
for i := 0; i < stale; i++ {
if err := os.Remove(matches[i]); err != nil {
return err
}
}
return nil
}
// Write is used to implement io.Writer // Write is used to implement io.Writer
func (l *LogFile) Write(b []byte) (n int, err error) { func (l *LogFile) Write(b []byte) (n int, err error) {
// Filter out log entries that do not match log level criteria // Filter out log entries that do not match log level criteria

View File

@ -3,6 +3,7 @@ package logger
import ( import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath"
"testing" "testing"
"time" "time"
@ -21,7 +22,12 @@ func TestLogFile_timeRotation(t *testing.T) {
tempDir := testutil.TempDir(t, "LogWriterTime") tempDir := testutil.TempDir(t, "LogWriterTime")
defer os.Remove(tempDir) defer os.Remove(tempDir)
filt := LevelFilter() filt := LevelFilter()
logFile := LogFile{logFilter: filt, fileName: testFileName, logPath: tempDir, duration: testDuration} logFile := LogFile{
logFilter: filt,
fileName: testFileName,
logPath: tempDir,
duration: testDuration,
}
logFile.Write([]byte("Hello World")) logFile.Write([]byte("Hello World"))
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
logFile.Write([]byte("Second File")) logFile.Write([]byte("Second File"))
@ -51,7 +57,13 @@ func TestLogFile_byteRotation(t *testing.T) {
defer os.Remove(tempDir) defer os.Remove(tempDir)
filt := LevelFilter() filt := LevelFilter()
filt.MinLevel = logutils.LogLevel("INFO") filt.MinLevel = logutils.LogLevel("INFO")
logFile := LogFile{logFilter: filt, fileName: testFileName, logPath: tempDir, MaxBytes: testBytes, duration: 24 * time.Hour} logFile := LogFile{
logFilter: filt,
fileName: testFileName,
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
}
logFile.Write([]byte("Hello World")) logFile.Write([]byte("Hello World"))
logFile.Write([]byte("Second File")) logFile.Write([]byte("Second File"))
want := 2 want := 2
@ -66,7 +78,13 @@ func TestLogFile_logLevelFiltering(t *testing.T) {
tempDir := testutil.TempDir(t, "LogWriterTime") tempDir := testutil.TempDir(t, "LogWriterTime")
defer os.Remove(tempDir) defer os.Remove(tempDir)
filt := LevelFilter() filt := LevelFilter()
logFile := LogFile{logFilter: filt, fileName: testFileName, logPath: tempDir, MaxBytes: testBytes, duration: testDuration} logFile := LogFile{
logFilter: filt,
fileName: testFileName,
logPath: tempDir,
MaxBytes: testBytes,
duration: testDuration,
}
logFile.Write([]byte("[INFO] This is an info message")) logFile.Write([]byte("[INFO] This is an info message"))
logFile.Write([]byte("[DEBUG] This is a debug message")) logFile.Write([]byte("[DEBUG] This is a debug message"))
logFile.Write([]byte("[ERR] This is an error message")) logFile.Write([]byte("[ERR] This is an error message"))
@ -75,3 +93,68 @@ func TestLogFile_logLevelFiltering(t *testing.T) {
t.Errorf("Expected %d files, got %v file(s)", want, len(got)) t.Errorf("Expected %d files, got %v file(s)", want, len(got))
} }
} }
func TestLogFile_deleteArchives(t *testing.T) {
t.Parallel()
tempDir := testutil.TempDir(t, "LogWriteDeleteArchives")
defer os.Remove(tempDir)
filt := LevelFilter()
filt.MinLevel = logutils.LogLevel("INFO")
logFile := LogFile{
logFilter: filt,
fileName: testFileName,
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
MaxFiles: 1,
}
logFile.Write([]byte("[INFO] Hello World"))
logFile.Write([]byte("[INFO] Second File"))
logFile.Write([]byte("[INFO] Third File"))
want := 2
tempFiles, _ := ioutil.ReadDir(tempDir)
if got := tempFiles; len(got) != want {
t.Errorf("Expected %d files, got %v file(s)", want, len(got))
return
}
for _, tempFile := range tempFiles {
var bytes []byte
var err error
path := filepath.Join(tempDir, tempFile.Name())
if bytes, err = ioutil.ReadFile(path); err != nil {
t.Errorf(err.Error())
return
}
contents := string(bytes)
if contents == "[INFO] Hello World" {
t.Errorf("Should have deleted the eldest log file")
return
}
}
}
func TestLogFile_deleteArchivesDisabled(t *testing.T) {
t.Parallel()
tempDir := testutil.TempDir(t, "LogWriteDeleteArchivesDisabled")
defer os.Remove(tempDir)
filt := LevelFilter()
filt.MinLevel = logutils.LogLevel("INFO")
logFile := LogFile{
logFilter: filt,
fileName: testFileName,
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
MaxFiles: 0,
}
logFile.Write([]byte("[INFO] Hello World"))
logFile.Write([]byte("[INFO] Second File"))
logFile.Write([]byte("[INFO] Third File"))
want := 3
tempFiles, _ := ioutil.ReadDir(tempDir)
if got := tempFiles; len(got) != want {
t.Errorf("Expected %d files, got %v file(s)", want, len(got))
return
}
}

View File

@ -31,6 +31,9 @@ type Config struct {
//LogRotateBytes is the user specified byte limit to rotate logs //LogRotateBytes is the user specified byte limit to rotate logs
LogRotateBytes int LogRotateBytes int
//LogRotateMaxFiles is the maximum number of past archived log files to keep
LogRotateMaxFiles int
} }
const ( const (
@ -123,7 +126,14 @@ func Setup(config *Config, ui cli.Ui) (*logutils.LevelFilter, *GatedWriter, *Log
if config.LogRotateBytes != 0 { if config.LogRotateBytes != 0 {
logRotateBytes = config.LogRotateBytes logRotateBytes = config.LogRotateBytes
} }
logFile := &LogFile{logFilter: logFilter, fileName: fileName, logPath: dir, duration: logRotateDuration, MaxBytes: logRotateBytes} logFile := &LogFile{
logFilter: logFilter,
fileName: fileName,
logPath: dir,
duration: logRotateDuration,
MaxBytes: logRotateBytes,
MaxFiles: config.LogRotateMaxFiles,
}
writers = append(writers, logFile) writers = append(writers, logFile)
} }

View File

@ -273,6 +273,8 @@ will exit with an error at startup.
* <a name="_log_rotate_duration"></a><a href="#_log_rotate_duration">`-log-rotate-duration`</a> - to specify the maximum duration a log should be written to before it needs to be rotated. Must be a duration value such as 30s. Defaults to 24h. * <a name="_log_rotate_duration"></a><a href="#_log_rotate_duration">`-log-rotate-duration`</a> - to specify the maximum duration a log should be written to before it needs to be rotated. Must be a duration value such as 30s. Defaults to 24h.
* <a name="_log_rotate_max_files"></a><a href="#_log_rotate_max_files">`-log-rotate-max-files`</a> - to specify the maximum number of older log file archives to keep. Defaults to 0 (no files are ever deleted). Set to -1 to disable rotation and discard all log files.
* <a name="_join"></a><a href="#_join">`-join`</a> - Address of another agent * <a name="_join"></a><a href="#_join">`-join`</a> - Address of another agent
to join upon starting up. This can be to join upon starting up. This can be
specified multiple times to specify multiple agents to join. If Consul is specified multiple times to specify multiple agents to join. If Consul is