|
@@ -0,0 +1,514 @@
|
|
|
+// Derived from https://github.com/natefinch/lumberjack
|
|
|
+// Adapt filesystem to github.com/spf13/afero
|
|
|
+
|
|
|
+package lumberjack_afero
|
|
|
+
|
|
|
+import (
|
|
|
+ "compress/gzip"
|
|
|
+ "errors"
|
|
|
+ "fmt"
|
|
|
+ "github.com/spf13/afero"
|
|
|
+ "io"
|
|
|
+ "io/fs"
|
|
|
+ "path/filepath"
|
|
|
+ "sort"
|
|
|
+ "strings"
|
|
|
+ "sync"
|
|
|
+ "time"
|
|
|
+)
|
|
|
+
|
|
|
+const (
|
|
|
+ backupTimeFormat = "2006-01-02T15-04-05.000"
|
|
|
+ compressSuffix = ".gz"
|
|
|
+ defaultMaxSize = 100
|
|
|
+)
|
|
|
+
|
|
|
+// ensure we always implement io.WriteCloser
|
|
|
+var _ io.WriteCloser = (*Logger)(nil)
|
|
|
+
|
|
|
+// Logger is an io.WriteCloser that writes to the specified filename.
|
|
|
+//
|
|
|
+// Logger opens or creates the logfile on first Write. If the file exists and
|
|
|
+// is less than MaxSize megabytes, lumberjack will open and append to that file.
|
|
|
+// If the file exists and its size is >= MaxSize megabytes, the file is renamed
|
|
|
+// by putting the current time in a timestamp in the name immediately before the
|
|
|
+// file's extension (or the end of the filename if there's no extension). A new
|
|
|
+// log file is then created using original filename.
|
|
|
+//
|
|
|
+// Whenever a write would cause the current log file exceed MaxSize megabytes,
|
|
|
+// the current file is closed, renamed, and a new log file created with the
|
|
|
+// original name. Thus, the filename you give Logger is always the "current" log
|
|
|
+// file.
|
|
|
+//
|
|
|
+// Backups use the log file name given to Logger, in the form
|
|
|
+// `name-timestamp.ext` where name is the filename without the extension,
|
|
|
+// timestamp is the time at which the log was rotated formatted with the
|
|
|
+// time.Time format of `2006-01-02T15-04-05.000` and the extension is the
|
|
|
+// original extension. For example, if your Logger.Filename is
|
|
|
+// `/var/log/foo/server.log`, a backup created at 6:30pm on Nov 11 2016 would
|
|
|
+// use the filename `/var/log/foo/server-2016-11-04T18-30-00.000.log`
|
|
|
+//
|
|
|
+// # Cleaning Up Old Log Files
|
|
|
+//
|
|
|
+// Whenever a new logfile gets created, old log files may be deleted. The most
|
|
|
+// recent files according to the encoded timestamp will be retained, up to a
|
|
|
+// number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
|
|
|
+// with an encoded timestamp older than MaxAge days are deleted, regardless of
|
|
|
+// MaxBackups. Note that the time encoded in the timestamp is the rotation
|
|
|
+// time, which may differ from the last time that file was written to.
|
|
|
+//
|
|
|
+// If MaxBackups and MaxAge are both 0, no old log files will be deleted.
|
|
|
+type Logger struct {
|
|
|
+ // Filesystem is an afero.Fs Object to hold the logs.
|
|
|
+ Filesystem afero.Fs
|
|
|
+ // Filename is the file to write logs to. Backup log files will be retained
|
|
|
+ // in the same directory. If not specified, will use `logs/<processname>.log`
|
|
|
+ Filename string `json:"filename" yaml:"filename"`
|
|
|
+
|
|
|
+ // MaxSize is the maximum size in megabytes of the log file before it gets
|
|
|
+ // rotated. It defaults to 100 megabytes.
|
|
|
+ MaxSize int `json:"maxsize" yaml:"maxsize"`
|
|
|
+
|
|
|
+ // MaxAge is the maximum number of days to retain old log files based on the
|
|
|
+ // timestamp encoded in their filename. Note that a day is defined as 24
|
|
|
+ // hours and may not exactly correspond to calendar days due to daylight
|
|
|
+ // savings, leap seconds, etc. The default is not to remove old log files
|
|
|
+ // based on age.
|
|
|
+ MaxAge int `json:"maxage" yaml:"maxage"`
|
|
|
+
|
|
|
+ // MaxBackups is the maximum number of old log files to retain. The default
|
|
|
+ // is to retain all old log files (though MaxAge may still cause them to get
|
|
|
+ // deleted.)
|
|
|
+ MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
|
|
|
+
|
|
|
+ // LocalTime determines if the time used for formatting the timestamps in
|
|
|
+ // backup files is the computer's local time. The default is to use UTC
|
|
|
+ // time.
|
|
|
+ LocalTime bool `json:"localtime" yaml:"localtime"`
|
|
|
+
|
|
|
+ // Compress determines if the rotated log files should be compressed
|
|
|
+ // using gzip. The default is not to perform compression.
|
|
|
+ Compress bool `json:"compress" yaml:"compress"`
|
|
|
+
|
|
|
+ size int64
|
|
|
+ file afero.File
|
|
|
+ mu sync.Mutex
|
|
|
+
|
|
|
+ millCh chan bool
|
|
|
+ startMill sync.Once
|
|
|
+}
|
|
|
+
|
|
|
+var (
|
|
|
+ // currentTime exists so it can be mocked out by tests.
|
|
|
+ currentTime = time.Now
|
|
|
+
|
|
|
+ // megabyte is the conversion factor between MaxSize and bytes. It is a
|
|
|
+ // variable so tests can mock it out and not need to write megabytes of data
|
|
|
+ // to disk.
|
|
|
+ megabyte = 1024 * 1024
|
|
|
+)
|
|
|
+
|
|
|
+// Write implements io.Writer. If a write would cause the log file to be larger
|
|
|
+// than MaxSize, the file is closed, renamed to include a timestamp of the
|
|
|
+// current time, and a new log file is created using the original log file name.
|
|
|
+// If the length of the write is greater than MaxSize, an error is returned.
|
|
|
+func (l *Logger) Write(p []byte) (n int, err error) {
|
|
|
+ l.mu.Lock()
|
|
|
+ defer l.mu.Unlock()
|
|
|
+
|
|
|
+ writeLen := int64(len(p))
|
|
|
+ if writeLen > l.max() {
|
|
|
+ return 0, fmt.Errorf(
|
|
|
+ "write length %d exceeds maximum file size %d", writeLen, l.max(),
|
|
|
+ )
|
|
|
+ }
|
|
|
+
|
|
|
+ if l.file == nil {
|
|
|
+ if err = l.openExistingOrNew(len(p)); err != nil {
|
|
|
+ return 0, err
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if l.size+writeLen > l.max() {
|
|
|
+ if err := l.rotate(); err != nil {
|
|
|
+ return 0, err
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ n, err = l.file.Write(p)
|
|
|
+ l.size += int64(n)
|
|
|
+
|
|
|
+ return n, err
|
|
|
+}
|
|
|
+
|
|
|
+// Close implements io.Closer, and closes the current logfile.
|
|
|
+func (l *Logger) Close() error {
|
|
|
+ l.mu.Lock()
|
|
|
+ defer l.mu.Unlock()
|
|
|
+ return l.close()
|
|
|
+}
|
|
|
+
|
|
|
+// close closes the file if it is open.
|
|
|
+func (l *Logger) close() error {
|
|
|
+ if l.file == nil {
|
|
|
+ return nil
|
|
|
+ }
|
|
|
+ err := l.file.Close()
|
|
|
+ l.file = nil
|
|
|
+ return err
|
|
|
+}
|
|
|
+
|
|
|
+// Rotate causes Logger to close the existing log file and immediately create a
|
|
|
+// new one. This is a helper function for applications that want to initiate
|
|
|
+// rotations outside of the normal rotation rules, such as in response to
|
|
|
+// SIGHUP. After rotating, this initiates compression and removal of old log
|
|
|
+// files according to the configuration.
|
|
|
+func (l *Logger) Rotate() error {
|
|
|
+ l.mu.Lock()
|
|
|
+ defer l.mu.Unlock()
|
|
|
+ return l.rotate()
|
|
|
+}
|
|
|
+
|
|
|
+// rotate closes the current file, moves it aside with a timestamp in the name,
|
|
|
+// (if it exists), opens a new file with the original filename, and then runs
|
|
|
+// post-rotation processing and removal.
|
|
|
+func (l *Logger) rotate() error {
|
|
|
+ if err := l.close(); err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+ if err := l.openNew(); err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+ l.mill()
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+// openNew opens a new log file for writing, moving any old log file out of the
|
|
|
+// way. This methods assumes the file has already been closed.
|
|
|
+func (l *Logger) openNew() error {
|
|
|
+ err := l.Filesystem.MkdirAll(l.dir(), 0755)
|
|
|
+ if err != nil {
|
|
|
+ return fmt.Errorf("can't make directories for new logfile: %s", err)
|
|
|
+ }
|
|
|
+
|
|
|
+ name := l.filename()
|
|
|
+ mode := fs.FileMode(0600)
|
|
|
+ info, err := l.Filesystem.Stat(name)
|
|
|
+ if err == nil {
|
|
|
+ // Copy the mode off the old logfile.
|
|
|
+ mode = info.Mode()
|
|
|
+ // move the existing file
|
|
|
+ newname := backupName(name, l.LocalTime)
|
|
|
+ if err := l.Filesystem.Rename(name, newname); err != nil {
|
|
|
+ return fmt.Errorf("can't rename log file: %s", err)
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // we use truncate here because this should only get called when we've moved
|
|
|
+ // the file ourselves. if someone else creates the file in the meantime,
|
|
|
+ // just wipe out the contents.
|
|
|
+ f, err := l.Filesystem.OpenFile(name, O_CREATE|O_WRONLY|O_TRUNC, mode)
|
|
|
+ if err != nil {
|
|
|
+ return fmt.Errorf("can't open new logfile: %s", err)
|
|
|
+ }
|
|
|
+ l.file = f
|
|
|
+ l.size = 0
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+// backupName creates a new filename from the given name, inserting a timestamp
|
|
|
+// between the filename and the extension, using the local time if requested
|
|
|
+// (otherwise UTC).
|
|
|
+func backupName(name string, local bool) string {
|
|
|
+ dir := filepath.Dir(name)
|
|
|
+ filename := filepath.Base(name)
|
|
|
+ ext := filepath.Ext(filename)
|
|
|
+ prefix := filename[:len(filename)-len(ext)]
|
|
|
+ t := currentTime()
|
|
|
+ if !local {
|
|
|
+ t = t.UTC()
|
|
|
+ }
|
|
|
+
|
|
|
+ timestamp := t.Format(backupTimeFormat)
|
|
|
+ return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
|
|
|
+}
|
|
|
+
|
|
|
+// openExistingOrNew opens the logfile if it exists and if the current write
|
|
|
+// would not put it over MaxSize. If there is no such file or the write would
|
|
|
+// put it over the MaxSize, a new file is created.
|
|
|
+func (l *Logger) openExistingOrNew(writeLen int) error {
|
|
|
+ l.mill()
|
|
|
+
|
|
|
+ filename := l.filename()
|
|
|
+ info, err := l.Filesystem.Stat(filename)
|
|
|
+ if IsNotExist(err) {
|
|
|
+ return l.openNew()
|
|
|
+ }
|
|
|
+ if err != nil {
|
|
|
+ return fmt.Errorf("error getting log file info: %s", err)
|
|
|
+ }
|
|
|
+
|
|
|
+ if info.Size()+int64(writeLen) >= l.max() {
|
|
|
+ return l.rotate()
|
|
|
+ }
|
|
|
+
|
|
|
+ file, err := l.Filesystem.OpenFile(filename, O_APPEND|O_WRONLY, 0644)
|
|
|
+ if err != nil {
|
|
|
+ // if we fail to open the old log file for some reason, just ignore
|
|
|
+ // it and open a new log file.
|
|
|
+ return l.openNew()
|
|
|
+ }
|
|
|
+ l.file = file
|
|
|
+ l.size = info.Size()
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+// filename generates the name of the logfile from the current time.
|
|
|
+func (l *Logger) filename() string {
|
|
|
+ if l.Filename != "" {
|
|
|
+ return l.Filename
|
|
|
+ }
|
|
|
+ name := getProcessName() + ".log"
|
|
|
+ return filepath.Join("logs", name)
|
|
|
+}
|
|
|
+
|
|
|
+// millRunOnce performs compression and removal of stale log files.
|
|
|
+// Log files are compressed if enabled via configuration and old log
|
|
|
+// files are removed, keeping at most l.MaxBackups files, as long as
|
|
|
+// none of them are older than MaxAge.
|
|
|
+func (l *Logger) millRunOnce() error {
|
|
|
+ if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress {
|
|
|
+ return nil
|
|
|
+ }
|
|
|
+
|
|
|
+ files, err := l.oldLogFiles()
|
|
|
+ if err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+
|
|
|
+ var compress, remove []logInfo
|
|
|
+
|
|
|
+ if l.MaxBackups > 0 && l.MaxBackups < len(files) {
|
|
|
+ preserved := make(map[string]bool)
|
|
|
+ var remaining []logInfo
|
|
|
+ for _, f := range files {
|
|
|
+ // Only count the uncompressed log file or the
|
|
|
+ // compressed log file, not both.
|
|
|
+ fn := f.Name()
|
|
|
+ if strings.HasSuffix(fn, compressSuffix) {
|
|
|
+ fn = fn[:len(fn)-len(compressSuffix)]
|
|
|
+ }
|
|
|
+ preserved[fn] = true
|
|
|
+
|
|
|
+ if len(preserved) > l.MaxBackups {
|
|
|
+ remove = append(remove, f)
|
|
|
+ } else {
|
|
|
+ remaining = append(remaining, f)
|
|
|
+ }
|
|
|
+ }
|
|
|
+ files = remaining
|
|
|
+ }
|
|
|
+ if l.MaxAge > 0 {
|
|
|
+ diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge))
|
|
|
+ cutoff := currentTime().Add(-1 * diff)
|
|
|
+
|
|
|
+ var remaining []logInfo
|
|
|
+ for _, f := range files {
|
|
|
+ if f.timestamp.Before(cutoff) {
|
|
|
+ remove = append(remove, f)
|
|
|
+ } else {
|
|
|
+ remaining = append(remaining, f)
|
|
|
+ }
|
|
|
+ }
|
|
|
+ files = remaining
|
|
|
+ }
|
|
|
+
|
|
|
+ if l.Compress {
|
|
|
+ for _, f := range files {
|
|
|
+ if !strings.HasSuffix(f.Name(), compressSuffix) {
|
|
|
+ compress = append(compress, f)
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ for _, f := range remove {
|
|
|
+ errRemove := l.Filesystem.Remove(filepath.Join(l.dir(), f.Name()))
|
|
|
+ if err == nil && errRemove != nil {
|
|
|
+ err = errRemove
|
|
|
+ }
|
|
|
+ }
|
|
|
+ for _, f := range compress {
|
|
|
+ fn := filepath.Join(l.dir(), f.Name())
|
|
|
+ errCompress := l.compressLogFile(fn, fn+compressSuffix)
|
|
|
+ if err == nil && errCompress != nil {
|
|
|
+ err = errCompress
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return err
|
|
|
+}
|
|
|
+
|
|
|
+// millRun runs in a goroutine to manage post-rotation compression and removal
|
|
|
+// of old log files.
|
|
|
+func (l *Logger) millRun() {
|
|
|
+ for range l.millCh {
|
|
|
+ // what am I going to do, log this?
|
|
|
+ _ = l.millRunOnce()
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+// mill performs post-rotation compression and removal of stale log files,
|
|
|
+// starting the mill goroutine if necessary.
|
|
|
+func (l *Logger) mill() {
|
|
|
+ l.startMill.Do(func() {
|
|
|
+ l.millCh = make(chan bool, 1)
|
|
|
+ go l.millRun()
|
|
|
+ })
|
|
|
+ select {
|
|
|
+ case l.millCh <- true:
|
|
|
+ default:
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+// oldLogFiles returns the list of backup log files stored in the same
|
|
|
+// directory as the current log file, sorted by ModTime
|
|
|
+func (l *Logger) oldLogFiles() ([]logInfo, error) {
|
|
|
+ files, err := afero.ReadDir(l.Filesystem, l.dir())
|
|
|
+ if err != nil {
|
|
|
+ return nil, fmt.Errorf("can't read log file directory: %s", err)
|
|
|
+ }
|
|
|
+ logFiles := []logInfo{}
|
|
|
+
|
|
|
+ prefix, ext := l.prefixAndExt()
|
|
|
+
|
|
|
+ for _, f := range files {
|
|
|
+ if f.IsDir() {
|
|
|
+ continue
|
|
|
+ }
|
|
|
+ if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil {
|
|
|
+ logFiles = append(logFiles, logInfo{t, f})
|
|
|
+ continue
|
|
|
+ }
|
|
|
+ if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil {
|
|
|
+ logFiles = append(logFiles, logInfo{t, f})
|
|
|
+ continue
|
|
|
+ }
|
|
|
+ // error parsing means that the suffix at the end was not generated
|
|
|
+ // by lumberjack, and therefore it's not a backup file.
|
|
|
+ }
|
|
|
+
|
|
|
+ sort.Sort(byFormatTime(logFiles))
|
|
|
+
|
|
|
+ return logFiles, nil
|
|
|
+}
|
|
|
+
|
|
|
+// timeFromName extracts the formatted time from the filename by stripping off
|
|
|
+// the filename's prefix and extension. This prevents someone's filename from
|
|
|
+// confusing time.parse.
|
|
|
+func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) {
|
|
|
+ if !strings.HasPrefix(filename, prefix) {
|
|
|
+ return time.Time{}, errors.New("mismatched prefix")
|
|
|
+ }
|
|
|
+ if !strings.HasSuffix(filename, ext) {
|
|
|
+ return time.Time{}, errors.New("mismatched extension")
|
|
|
+ }
|
|
|
+ ts := filename[len(prefix) : len(filename)-len(ext)]
|
|
|
+ return time.Parse(backupTimeFormat, ts)
|
|
|
+}
|
|
|
+
|
|
|
+// max returns the maximum size in bytes of log files before rolling.
|
|
|
+func (l *Logger) max() int64 {
|
|
|
+ if l.MaxSize == 0 {
|
|
|
+ return int64(defaultMaxSize * megabyte)
|
|
|
+ }
|
|
|
+ return int64(l.MaxSize) * int64(megabyte)
|
|
|
+}
|
|
|
+
|
|
|
+// dir returns the directory for the current filename.
|
|
|
+func (l *Logger) dir() string {
|
|
|
+ return filepath.Dir(l.filename())
|
|
|
+}
|
|
|
+
|
|
|
+// prefixAndExt returns the filename part and extension part from the Logger's
|
|
|
+// filename.
|
|
|
+func (l *Logger) prefixAndExt() (prefix, ext string) {
|
|
|
+ filename := filepath.Base(l.filename())
|
|
|
+ ext = filepath.Ext(filename)
|
|
|
+ prefix = filename[:len(filename)-len(ext)] + "-"
|
|
|
+ return prefix, ext
|
|
|
+}
|
|
|
+
|
|
|
+// compressLogFile compresses the given log file, removing the
|
|
|
+// uncompressed log file if successful.
|
|
|
+func (l *Logger) compressLogFile(src, dst string) (err error) {
|
|
|
+ f, err := l.Filesystem.Open(src)
|
|
|
+ if err != nil {
|
|
|
+ return fmt.Errorf("failed to open log file: %v", err)
|
|
|
+ }
|
|
|
+ // noinspection GoUnhandledErrorResult
|
|
|
+ defer f.Close()
|
|
|
+
|
|
|
+ fi, err := l.Filesystem.Stat(src)
|
|
|
+ if err != nil {
|
|
|
+ return fmt.Errorf("failed to stat log file: %v", err)
|
|
|
+ }
|
|
|
+
|
|
|
+ // If this file already exists, we presume it was created by
|
|
|
+ // a previous attempt to compress the log file.
|
|
|
+ gzf, err := l.Filesystem.OpenFile(dst, O_CREATE|O_TRUNC|O_WRONLY, fi.Mode())
|
|
|
+ if err != nil {
|
|
|
+ return fmt.Errorf("failed to open compressed log file: %v", err)
|
|
|
+ }
|
|
|
+ // noinspection GoUnhandledErrorResult
|
|
|
+ defer gzf.Close()
|
|
|
+
|
|
|
+ gz := gzip.NewWriter(gzf)
|
|
|
+
|
|
|
+ defer func() {
|
|
|
+ if err != nil {
|
|
|
+ _ = l.Filesystem.Remove(dst)
|
|
|
+ err = fmt.Errorf("failed to compress log file: %v", err)
|
|
|
+ }
|
|
|
+ }()
|
|
|
+
|
|
|
+ if _, err := io.Copy(gz, f); err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+ if err := gz.Close(); err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+ if err := gzf.Close(); err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+
|
|
|
+ if err := f.Close(); err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+ if err := l.Filesystem.Remove(src); err != nil {
|
|
|
+ return err
|
|
|
+ }
|
|
|
+
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+// logInfo is a convenience struct to return the filename and its embedded
|
|
|
+// timestamp.
|
|
|
+type logInfo struct {
|
|
|
+ timestamp time.Time
|
|
|
+ fs.FileInfo
|
|
|
+}
|
|
|
+
|
|
|
+// byFormatTime sorts by newest time formatted in the name.
|
|
|
+type byFormatTime []logInfo
|
|
|
+
|
|
|
+func (b byFormatTime) Less(i, j int) bool {
|
|
|
+ return b[i].timestamp.After(b[j].timestamp)
|
|
|
+}
|
|
|
+
|
|
|
+func (b byFormatTime) Swap(i, j int) {
|
|
|
+ b[i], b[j] = b[j], b[i]
|
|
|
+}
|
|
|
+
|
|
|
+func (b byFormatTime) Len() int {
|
|
|
+ return len(b)
|
|
|
+}
|