2016-02-05 07:54:15 +00:00
|
|
|
package logrotator
|
2016-01-14 01:16:25 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
2016-01-15 07:16:30 +00:00
|
|
|
"log"
|
2016-01-14 01:16:25 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2016-01-17 03:19:52 +00:00
|
|
|
"sort"
|
2016-01-14 01:16:25 +00:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
)
|
|
|
|
|
2016-01-15 07:16:30 +00:00
|
|
|
const (
|
2016-01-17 03:21:16 +00:00
|
|
|
bufSize = 32 * 1024 // Max number of bytes read from a buffer
|
2016-01-15 07:16:30 +00:00
|
|
|
)
|
|
|
|
|
2016-01-20 22:14:37 +00:00
|
|
|
// LogRotator ingests data and writes out to a rotated set of files
|
2016-01-14 01:16:25 +00:00
|
|
|
type LogRotator struct {
|
2016-02-08 18:10:01 +00:00
|
|
|
MaxFiles int // maximum number of rotated files retained by the log rotator
|
|
|
|
FileSize int64 // maximum file size of a rotated file
|
2016-01-20 22:14:37 +00:00
|
|
|
path string // path where the rotated files are created
|
|
|
|
fileName string // base file name of the rotated files
|
|
|
|
|
2016-02-11 22:44:35 +00:00
|
|
|
logFileIdx int // index to the current file
|
|
|
|
oldestLogFileIdx int // index to the oldest log file
|
2016-01-15 07:16:30 +00:00
|
|
|
|
2016-02-11 22:44:35 +00:00
|
|
|
logger *log.Logger
|
|
|
|
purgeCh chan struct{}
|
2016-01-14 01:16:25 +00:00
|
|
|
}
|
|
|
|
|
2016-01-17 03:19:52 +00:00
|
|
|
// NewLogRotator configures and returns a new LogRotator
|
2016-01-15 19:18:02 +00:00
|
|
|
func NewLogRotator(path string, fileName string, maxFiles int, fileSize int64, logger *log.Logger) (*LogRotator, error) {
|
2016-01-14 01:16:25 +00:00
|
|
|
files, err := ioutil.ReadDir(path)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-01-21 00:50:31 +00:00
|
|
|
// Finding out the log file with the largest index
|
2016-01-15 02:30:53 +00:00
|
|
|
logFileIdx := 0
|
2016-01-21 00:50:31 +00:00
|
|
|
prefix := fmt.Sprintf("%s.", fileName)
|
2016-01-14 01:16:25 +00:00
|
|
|
for _, f := range files {
|
2016-01-21 00:50:31 +00:00
|
|
|
if strings.HasPrefix(f.Name(), prefix) {
|
|
|
|
fileIdx := strings.TrimPrefix(f.Name(), prefix)
|
2016-01-14 01:16:25 +00:00
|
|
|
n, err := strconv.Atoi(fileIdx)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if n > logFileIdx {
|
|
|
|
logFileIdx = n
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-11 22:44:35 +00:00
|
|
|
lr := &LogRotator{
|
2016-02-08 18:10:01 +00:00
|
|
|
MaxFiles: maxFiles,
|
|
|
|
FileSize: fileSize,
|
2016-01-14 01:16:25 +00:00
|
|
|
path: path,
|
|
|
|
fileName: fileName,
|
|
|
|
logFileIdx: logFileIdx,
|
2016-01-15 19:18:02 +00:00
|
|
|
logger: logger,
|
2016-02-11 22:44:35 +00:00
|
|
|
purgeCh: make(chan struct{}, 1),
|
|
|
|
}
|
|
|
|
go lr.PurgeOldFiles()
|
|
|
|
|
|
|
|
return lr, nil
|
2016-01-14 01:16:25 +00:00
|
|
|
}
|
|
|
|
|
2016-01-17 03:19:52 +00:00
|
|
|
// Start reads from a Reader and writes them to files and rotates them when the
|
|
|
|
// size of the file becomes equal to the max size configured
|
2016-01-14 01:16:25 +00:00
|
|
|
func (l *LogRotator) Start(r io.Reader) error {
|
2016-01-15 07:16:30 +00:00
|
|
|
buf := make([]byte, bufSize)
|
2016-01-14 01:16:25 +00:00
|
|
|
for {
|
|
|
|
logFileName := filepath.Join(l.path, fmt.Sprintf("%s.%d", l.fileName, l.logFileIdx))
|
2016-02-11 00:13:13 +00:00
|
|
|
var fileSize int64
|
2016-01-15 07:05:45 +00:00
|
|
|
if f, err := os.Stat(logFileName); err == nil {
|
2016-01-21 00:50:31 +00:00
|
|
|
// Skipping the current file if it happens to be a directory
|
2016-01-15 07:05:45 +00:00
|
|
|
if f.IsDir() {
|
|
|
|
l.logFileIdx += 1
|
|
|
|
continue
|
|
|
|
}
|
2016-02-11 00:13:13 +00:00
|
|
|
fileSize = f.Size()
|
2016-01-21 00:50:31 +00:00
|
|
|
// Calculating the remaining capacity of the log file
|
2016-01-15 07:05:45 +00:00
|
|
|
}
|
2016-01-15 06:36:55 +00:00
|
|
|
f, err := os.OpenFile(logFileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
|
2016-01-14 01:16:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-01-20 23:52:52 +00:00
|
|
|
l.logger.Printf("[DEBUG] client.logrotator: opened a new file: %s", logFileName)
|
2016-01-21 00:17:49 +00:00
|
|
|
|
2016-01-21 00:50:31 +00:00
|
|
|
// Closing the current log file if it doesn't have any more capacity
|
2016-02-11 00:13:13 +00:00
|
|
|
if fileSize >= l.FileSize {
|
|
|
|
l.logFileIdx += 1
|
2016-01-15 02:30:53 +00:00
|
|
|
f.Close()
|
2016-01-14 01:16:25 +00:00
|
|
|
continue
|
|
|
|
}
|
2016-01-15 02:30:53 +00:00
|
|
|
|
2016-01-21 00:50:31 +00:00
|
|
|
// Reading from the reader and writing into the current log file as long
|
2016-01-20 22:14:37 +00:00
|
|
|
// as it has capacity or the reader closes
|
2016-02-11 00:13:13 +00:00
|
|
|
totalWritten := 0
|
2016-01-15 02:30:53 +00:00
|
|
|
for {
|
2016-02-11 01:34:14 +00:00
|
|
|
if l.FileSize-(fileSize+int64(totalWritten)) < 1 {
|
|
|
|
f.Close()
|
|
|
|
break
|
|
|
|
}
|
2016-01-15 06:36:55 +00:00
|
|
|
var nr int
|
|
|
|
var err error
|
2016-02-11 00:13:13 +00:00
|
|
|
remainingSize := l.FileSize - (int64(totalWritten) + fileSize)
|
2016-01-15 07:16:30 +00:00
|
|
|
if remainingSize < bufSize {
|
2016-01-15 06:36:55 +00:00
|
|
|
nr, err = r.Read(buf[0:remainingSize])
|
|
|
|
} else {
|
|
|
|
nr, err = r.Read(buf)
|
|
|
|
}
|
2016-01-15 02:30:53 +00:00
|
|
|
if err != nil {
|
|
|
|
f.Close()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
nw, err := f.Write(buf[:nr])
|
|
|
|
if err != nil {
|
|
|
|
f.Close()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if nr != nw {
|
|
|
|
f.Close()
|
2016-01-20 22:14:37 +00:00
|
|
|
return fmt.Errorf("failed to write data read from the reader into file, R: %d W: %d", nr, nw)
|
2016-01-15 02:30:53 +00:00
|
|
|
}
|
2016-02-11 01:34:14 +00:00
|
|
|
totalWritten += nr
|
2016-01-14 01:16:25 +00:00
|
|
|
}
|
|
|
|
l.logFileIdx = l.logFileIdx + 1
|
2016-02-11 22:44:35 +00:00
|
|
|
// Purge old files if we have more files than MaxFiles
|
|
|
|
if l.logFileIdx-l.oldestLogFileIdx >= l.MaxFiles {
|
|
|
|
select {
|
|
|
|
case l.purgeCh <- struct{}{}:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
2016-01-14 01:16:25 +00:00
|
|
|
}
|
|
|
|
}
|
2016-01-17 03:19:52 +00:00
|
|
|
|
|
|
|
// PurgeOldFiles removes older files and keeps only the last N files rotated for
|
|
|
|
// a file
|
|
|
|
func (l *LogRotator) PurgeOldFiles() {
|
2016-02-11 22:44:35 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-l.purgeCh:
|
|
|
|
var fIndexes []int
|
|
|
|
files, err := ioutil.ReadDir(l.path)
|
2016-01-17 03:19:52 +00:00
|
|
|
if err != nil {
|
2016-02-11 22:44:35 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// Inserting all the rotated files in a slice
|
|
|
|
for _, f := range files {
|
|
|
|
if strings.HasPrefix(f.Name(), l.fileName) {
|
|
|
|
fileIdx := strings.TrimPrefix(f.Name(), fmt.Sprintf("%s.", l.fileName))
|
|
|
|
n, err := strconv.Atoi(fileIdx)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
fIndexes = append(fIndexes, n)
|
|
|
|
}
|
2016-01-17 03:19:52 +00:00
|
|
|
}
|
2016-01-20 23:52:52 +00:00
|
|
|
|
2016-02-11 22:44:35 +00:00
|
|
|
// Sorting the file indexes so that we can purge the older files and keep
|
|
|
|
// only the number of files as configured by the user
|
|
|
|
sort.Sort(sort.IntSlice(fIndexes))
|
|
|
|
var toDelete []int
|
|
|
|
toDelete = fIndexes[0 : len(fIndexes)-l.MaxFiles]
|
|
|
|
for _, fIndex := range toDelete {
|
|
|
|
fname := filepath.Join(l.path, fmt.Sprintf("%s.%d", l.fileName, fIndex))
|
|
|
|
os.RemoveAll(fname)
|
|
|
|
}
|
|
|
|
l.oldestLogFileIdx = fIndexes[0]
|
|
|
|
}
|
2016-01-20 23:52:52 +00:00
|
|
|
}
|
2016-01-17 03:19:52 +00:00
|
|
|
}
|