github.com/v2pro/plz@v0.0.0-20221028024117-e5f9aec5b631/countlog/output/lumberjack/lumberjack.go (about)

     1  // Package lumberjack provides a rolling logger.
     2  //
     3  // Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
     4  // thusly:
     5  //
     6  //   import "gopkg.in/natefinch/lumberjack.v2"
     7  //
     8  // The package name remains simply lumberjack, and the code resides at
     9  // https://github.com/natefinch/lumberjack under the v2.0 branch.
    10  //
    11  // Lumberjack is intended to be one part of a logging infrastructure.
    12  // It is not an all-in-one solution, but instead is a pluggable
    13  // component at the bottom of the logging stack that simply controls the files
    14  // to which logs are written.
    15  //
    16  // Lumberjack plays well with any logging package that can write to an
    17  // io.Writer, including the standard library's log package.
    18  //
    19  // Lumberjack assumes that only one process is writing to the output files.
    20  // Using the same lumberjack configuration from multiple processes on the same
    21  // machine will result in improper behavior.
    22  package lumberjack
    23  
    24  import (
    25  	"compress/gzip"
    26  	"errors"
    27  	"fmt"
    28  	"io"
    29  	"io/ioutil"
    30  	"os"
    31  	"path/filepath"
    32  	"sort"
    33  	"strings"
    34  	"sync"
    35  	"time"
    36  )
    37  
    38  const (
    39  	defaultBackupTimeFormat = "2006-01-02T15-04-05.000"
    40  	compressSuffix   = ".gz"
    41  	defaultMaxSize   = 100
    42  )
    43  
    44  // ensure we always implement io.WriteCloser
    45  var _ io.WriteCloser = (*Logger)(nil)
    46  
    47  // Logger is an io.WriteCloser that writes to the specified filename.
    48  //
    49  // Logger opens or creates the logfile on first Write.  If the file exists and
    50  // is less than MaxSize megabytes, lumberjack will open and append to that file.
    51  // If the file exists and its size is >= MaxSize megabytes, the file is renamed
    52  // by putting the current time in a timestamp in the name immediately before the
    53  // file's extension (or the end of the filename if there's no extension). A new
    54  // log file is then created using original filename.
    55  //
    56  // Whenever a write would cause the current log file exceed MaxSize megabytes,
    57  // the current file is closed, renamed, and a new log file created with the
    58  // original name. Thus, the filename you give Logger is always the "current" log
    59  // file.
    60  //
    61  // Backups use the log file name given to Logger, in the form
    62  // `name-timestamp.ext` where name is the filename without the extension,
    63  // timestamp is the time at which the log was rotated formatted with the
    64  // time.Time format of `2006-01-02T15-04-05.000` and the extension is the
    65  // original extension.  For example, if your Logger.Filename is
    66  // `/var/log/foo/server.log`, a backup created at 6:30pm on Nov 11 2016 would
    67  // use the filename `/var/log/foo/server-2016-11-04T18-30-00.000.log`
    68  //
    69  // Cleaning Up Old Log Files
    70  //
    71  // Whenever a new logfile gets created, old log files may be deleted.  The most
    72  // recent files according to the encoded timestamp will be retained, up to a
    73  // number equal to MaxBackups (or all of them if MaxBackups is 0).  Any files
    74  // with an encoded timestamp older than MaxAge days are deleted, regardless of
    75  // MaxBackups.  Note that the time encoded in the timestamp is the rotation
    76  // time, which may differ from the last time that file was written to.
    77  //
    78  // If MaxBackups and MaxAge are both 0, no old log files will be deleted.
    79  type Logger struct {
    80  	// Filename is the file to write logs to.  Backup log files will be retained
    81  	// in the same directory.  It uses <processname>-lumberjack.log in
    82  	// os.TempDir() if empty.
    83  	Filename string `json:"filename" yaml:"filename"`
    84  
    85  	// MaxSize is the maximum size in megabytes of the log file before it gets
    86  	// rotated. It defaults to 100 megabytes.
    87  	MaxSize int `json:"maxsize" yaml:"maxsize"`
    88  
    89  	// MaxAge is the maximum number of days to retain old log files based on the
    90  	// timestamp encoded in their filename.  Note that a day is defined as 24
    91  	// hours and may not exactly correspond to calendar days due to daylight
    92  	// savings, leap seconds, etc. The default is not to remove old log files
    93  	// based on age.
    94  	MaxAge int `json:"maxage" yaml:"maxage"`
    95  
    96  	// MaxBackups is the maximum number of old log files to retain.  The default
    97  	// is to retain all old log files (though MaxAge may still cause them to get
    98  	// deleted.)
    99  	MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
   100  
   101  	// LocalTime determines if the time used for formatting the timestamps in
   102  	// backup files is the computer's local time.  The default is to use UTC
   103  	// time.
   104  	LocalTime bool `json:"localtime" yaml:"localtime"`
   105  
   106  	// Compress determines if the rotated log files should be compressed
   107  	// using gzip. The default is not to perform compression.
   108  	Compress bool `json:"compress" yaml:"compress"`
   109  
   110  	BackupTimeFormat string
   111  
   112  	size int64
   113  	file *os.File
   114  	mu   sync.Mutex
   115  
   116  	millCh    chan bool
   117  	startMill sync.Once
   118  }
   119  
   120  var (
   121  	// currentTime exists so it can be mocked out by tests.
   122  	currentTime = time.Now
   123  
   124  	// os_Stat exists so it can be mocked out by tests.
   125  	os_Stat = os.Stat
   126  
   127  	// megabyte is the conversion factor between MaxSize and bytes.  It is a
   128  	// variable so tests can mock it out and not need to write megabytes of data
   129  	// to disk.
   130  	megabyte = 1024 * 1024
   131  )
   132  
   133  // Write implements io.Writer.  If a write would cause the log file to be larger
   134  // than MaxSize, the file is closed, renamed to include a timestamp of the
   135  // current time, and a new log file is created using the original log file name.
   136  // If the length of the write is greater than MaxSize, an error is returned.
   137  func (l *Logger) Write(p []byte) (n int, err error) {
   138  	l.mu.Lock()
   139  	defer l.mu.Unlock()
   140  
   141  	writeLen := int64(len(p))
   142  	if writeLen > l.max() {
   143  		return 0, fmt.Errorf(
   144  			"write length %d exceeds maximum file size %d", writeLen, l.max(),
   145  		)
   146  	}
   147  
   148  	if l.file == nil {
   149  		if err = l.openExistingOrNew(len(p)); err != nil {
   150  			return 0, err
   151  		}
   152  	}
   153  
   154  	if l.size+writeLen > l.max() {
   155  		if err := l.rotate(); err != nil {
   156  			return 0, err
   157  		}
   158  	}
   159  
   160  	n, err = l.file.Write(p)
   161  	l.size += int64(n)
   162  
   163  	return n, err
   164  }
   165  
   166  // Close implements io.Closer, and closes the current logfile.
   167  func (l *Logger) Close() error {
   168  	l.mu.Lock()
   169  	defer l.mu.Unlock()
   170  	return l.close()
   171  }
   172  
   173  // close closes the file if it is open.
   174  func (l *Logger) close() error {
   175  	if l.file == nil {
   176  		return nil
   177  	}
   178  	err := l.file.Close()
   179  	l.file = nil
   180  	return err
   181  }
   182  
   183  // Rotate causes Logger to close the existing log file and immediately create a
   184  // new one.  This is a helper function for applications that want to initiate
   185  // rotations outside of the normal rotation rules, such as in response to
   186  // SIGHUP.  After rotating, this initiates compression and removal of old log
   187  // files according to the configuration.
   188  func (l *Logger) Rotate() error {
   189  	l.mu.Lock()
   190  	defer l.mu.Unlock()
   191  	return l.rotate()
   192  }
   193  
   194  // rotate closes the current file, moves it aside with a timestamp in the name,
   195  // (if it exists), opens a new file with the original filename, and then runs
   196  // post-rotation processing and removal.
   197  func (l *Logger) rotate() error {
   198  	if err := l.close(); err != nil {
   199  		return err
   200  	}
   201  	if err := l.openNew(); err != nil {
   202  		return err
   203  	}
   204  	l.mill()
   205  	return nil
   206  }
   207  
   208  // openNew opens a new log file for writing, moving any old log file out of the
   209  // way.  This methods assumes the file has already been closed.
   210  func (l *Logger) openNew() error {
   211  	err := os.MkdirAll(l.dir(), 0744)
   212  	if err != nil {
   213  		return fmt.Errorf("can't make directories for new logfile: %s", err)
   214  	}
   215  
   216  	name := l.filename()
   217  	mode := os.FileMode(0644)
   218  	info, err := os_Stat(name)
   219  	if err == nil {
   220  		// Copy the mode off the old logfile.
   221  		mode = info.Mode()
   222  		// move the existing file
   223  		if l.BackupTimeFormat == "" {
   224  			l.BackupTimeFormat = defaultBackupTimeFormat
   225  		}
   226  		newname := backupName(name, l.LocalTime, l.BackupTimeFormat)
   227  		if err := os.Rename(name, newname); err != nil {
   228  			return fmt.Errorf("can't rename log file: %s", err)
   229  		}
   230  
   231  		// this is a no-op anywhere but linux
   232  		if err := chown(name, info); err != nil {
   233  			return err
   234  		}
   235  	}
   236  
   237  	// we use truncate here because this should only get called when we've moved
   238  	// the file ourselves. if someone else creates the file in the meantime,
   239  	// just wipe out the contents.
   240  	f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
   241  	if err != nil {
   242  		return fmt.Errorf("can't open new logfile: %s", err)
   243  	}
   244  	l.file = f
   245  	l.size = 0
   246  	return nil
   247  }
   248  
   249  // backupName creates a new filename from the given name, inserting a timestamp
   250  // between the filename and the extension, using the local time if requested
   251  // (otherwise UTC).
   252  func backupName(name string, local bool, backupTimeFormat string) string {
   253  	dir := filepath.Dir(name)
   254  	filename := filepath.Base(name)
   255  	ext := filepath.Ext(filename)
   256  	prefix := filename[:len(filename)-len(ext)]
   257  	t := currentTime()
   258  	if !local {
   259  		t = t.UTC()
   260  	}
   261  
   262  	timestamp := t.Format(backupTimeFormat)
   263  	return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
   264  }
   265  
   266  // openExistingOrNew opens the logfile if it exists and if the current write
   267  // would not put it over MaxSize.  If there is no such file or the write would
   268  // put it over the MaxSize, a new file is created.
   269  func (l *Logger) openExistingOrNew(writeLen int) error {
   270  	l.mill()
   271  
   272  	filename := l.filename()
   273  	info, err := os_Stat(filename)
   274  	if os.IsNotExist(err) {
   275  		return l.openNew()
   276  	}
   277  	if err != nil {
   278  		return fmt.Errorf("error getting log file info: %s", err)
   279  	}
   280  
   281  	if info.Size()+int64(writeLen) >= l.max() {
   282  		return l.rotate()
   283  	}
   284  
   285  	file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644)
   286  	if err != nil {
   287  		// if we fail to open the old log file for some reason, just ignore
   288  		// it and open a new log file.
   289  		return l.openNew()
   290  	}
   291  	l.file = file
   292  	l.size = info.Size()
   293  	return nil
   294  }
   295  
   296  // genFilename generates the name of the logfile from the current time.
   297  func (l *Logger) filename() string {
   298  	if l.Filename != "" {
   299  		return l.Filename
   300  	}
   301  	name := filepath.Base(os.Args[0]) + "-lumberjack.log"
   302  	return filepath.Join(os.TempDir(), name)
   303  }
   304  
   305  // millRunOnce performs compression and removal of stale log files.
   306  // Log files are compressed if enabled via configuration and old log
   307  // files are removed, keeping at most l.MaxBackups files, as long as
   308  // none of them are older than MaxAge.
   309  func (l *Logger) millRunOnce() error {
   310  	if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress {
   311  		return nil
   312  	}
   313  
   314  	files, err := l.oldLogFiles()
   315  	if err != nil {
   316  		return err
   317  	}
   318  
   319  	var compress, remove []logInfo
   320  
   321  	if l.MaxBackups > 0 && l.MaxBackups < len(files) {
   322  		preserved := make(map[string]bool)
   323  		var remaining []logInfo
   324  		for _, f := range files {
   325  			// Only count the uncompressed log file or the
   326  			// compressed log file, not both.
   327  			fn := f.Name()
   328  			if strings.HasSuffix(fn, compressSuffix) {
   329  				fn = fn[:len(fn)-len(compressSuffix)]
   330  			}
   331  			preserved[fn] = true
   332  
   333  			if len(preserved) > l.MaxBackups {
   334  				remove = append(remove, f)
   335  			} else {
   336  				remaining = append(remaining, f)
   337  			}
   338  		}
   339  		files = remaining
   340  	}
   341  	if l.MaxAge > 0 {
   342  		diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge))
   343  		cutoff := currentTime().Add(-1 * diff)
   344  
   345  		var remaining []logInfo
   346  		for _, f := range files {
   347  			if f.timestamp.Before(cutoff) {
   348  				remove = append(remove, f)
   349  			} else {
   350  				remaining = append(remaining, f)
   351  			}
   352  		}
   353  		files = remaining
   354  	}
   355  
   356  	if l.Compress {
   357  		for _, f := range files {
   358  			if !strings.HasSuffix(f.Name(), compressSuffix) {
   359  				compress = append(compress, f)
   360  			}
   361  		}
   362  	}
   363  
   364  	for _, f := range remove {
   365  		errRemove := os.Remove(filepath.Join(l.dir(), f.Name()))
   366  		if err == nil && errRemove != nil {
   367  			err = errRemove
   368  		}
   369  	}
   370  	for _, f := range compress {
   371  		fn := filepath.Join(l.dir(), f.Name())
   372  		errCompress := compressLogFile(fn, fn+compressSuffix)
   373  		if err == nil && errCompress != nil {
   374  			err = errCompress
   375  		}
   376  	}
   377  
   378  	return err
   379  }
   380  
   381  // millRun runs in a goroutine to manage post-rotation compression and removal
   382  // of old log files.
   383  func (l *Logger) millRun() {
   384  	for _ = range l.millCh {
   385  		// what am I going to do, log this?
   386  		_ = l.millRunOnce()
   387  	}
   388  }
   389  
   390  // mill performs post-rotation compression and removal of stale log files,
   391  // starting the mill goroutine if necessary.
   392  func (l *Logger) mill() {
   393  	l.startMill.Do(func() {
   394  		l.millCh = make(chan bool, 1)
   395  		go l.millRun()
   396  	})
   397  	select {
   398  	case l.millCh <- true:
   399  	default:
   400  	}
   401  }
   402  
   403  // oldLogFiles returns the list of backup log files stored in the same
   404  // directory as the current log file, sorted by ModTime
   405  func (l *Logger) oldLogFiles() ([]logInfo, error) {
   406  	files, err := ioutil.ReadDir(l.dir())
   407  	if err != nil {
   408  		return nil, fmt.Errorf("can't read log file directory: %s", err)
   409  	}
   410  	logFiles := []logInfo{}
   411  
   412  	prefix, ext := l.prefixAndExt()
   413  
   414  	for _, f := range files {
   415  		if f.IsDir() {
   416  			continue
   417  		}
   418  		if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil {
   419  			logFiles = append(logFiles, logInfo{t, f})
   420  			continue
   421  		}
   422  		if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil {
   423  			logFiles = append(logFiles, logInfo{t, f})
   424  			continue
   425  		}
   426  		// error parsing means that the suffix at the end was not generated
   427  		// by lumberjack, and therefore it's not a backup file.
   428  	}
   429  
   430  	sort.Sort(byFormatTime(logFiles))
   431  
   432  	return logFiles, nil
   433  }
   434  
   435  // timeFromName extracts the formatted time from the filename by stripping off
   436  // the filename's prefix and extension. This prevents someone's filename from
   437  // confusing time.parse.
   438  func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) {
   439  	if !strings.HasPrefix(filename, prefix) {
   440  		return time.Time{}, errors.New("mismatched prefix")
   441  	}
   442  	if !strings.HasSuffix(filename, ext) {
   443  		return time.Time{}, errors.New("mismatched extension")
   444  	}
   445  	ts := filename[len(prefix) : len(filename)-len(ext)]
   446  	if l.BackupTimeFormat == "" {
   447  		l.BackupTimeFormat = defaultBackupTimeFormat
   448  	}
   449  	return time.Parse(l.BackupTimeFormat, ts)
   450  }
   451  
   452  // max returns the maximum size in bytes of log files before rolling.
   453  func (l *Logger) max() int64 {
   454  	if l.MaxSize == 0 {
   455  		return int64(defaultMaxSize * megabyte)
   456  	}
   457  	return int64(l.MaxSize) * int64(megabyte)
   458  }
   459  
   460  // dir returns the directory for the current filename.
   461  func (l *Logger) dir() string {
   462  	return filepath.Dir(l.filename())
   463  }
   464  
   465  // prefixAndExt returns the filename part and extension part from the Logger's
   466  // filename.
   467  func (l *Logger) prefixAndExt() (prefix, ext string) {
   468  	filename := filepath.Base(l.filename())
   469  	ext = filepath.Ext(filename)
   470  	prefix = filename[:len(filename)-len(ext)] + "-"
   471  	return prefix, ext
   472  }
   473  
   474  // compressLogFile compresses the given log file, removing the
   475  // uncompressed log file if successful.
   476  func compressLogFile(src, dst string) (err error) {
   477  	f, err := os.Open(src)
   478  	if err != nil {
   479  		return fmt.Errorf("failed to open log file: %v", err)
   480  	}
   481  	defer f.Close()
   482  
   483  	fi, err := os_Stat(src)
   484  	if err != nil {
   485  		return fmt.Errorf("failed to stat log file: %v", err)
   486  	}
   487  
   488  	if err := chown(dst, fi); err != nil {
   489  		return fmt.Errorf("failed to chown compressed log file: %v", err)
   490  	}
   491  
   492  	// If this file already exists, we presume it was created by
   493  	// a previous attempt to compress the log file.
   494  	gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode())
   495  	if err != nil {
   496  		return fmt.Errorf("failed to open compressed log file: %v", err)
   497  	}
   498  	defer gzf.Close()
   499  
   500  	gz := gzip.NewWriter(gzf)
   501  
   502  	defer func() {
   503  		if err != nil {
   504  			os.Remove(dst)
   505  			err = fmt.Errorf("failed to compress log file: %v", err)
   506  		}
   507  	}()
   508  
   509  	if _, err := io.Copy(gz, f); err != nil {
   510  		return err
   511  	}
   512  	if err := gz.Close(); err != nil {
   513  		return err
   514  	}
   515  	if err := gzf.Close(); err != nil {
   516  		return err
   517  	}
   518  
   519  	if err := f.Close(); err != nil {
   520  		return err
   521  	}
   522  	if err := os.Remove(src); err != nil {
   523  		return err
   524  	}
   525  
   526  	return nil
   527  }
   528  
   529  // logInfo is a convenience struct to return the filename and its embedded
   530  // timestamp.
   531  type logInfo struct {
   532  	timestamp time.Time
   533  	os.FileInfo
   534  }
   535  
   536  // byFormatTime sorts by newest time formatted in the name.
   537  type byFormatTime []logInfo
   538  
   539  func (b byFormatTime) Less(i, j int) bool {
   540  	return b[i].timestamp.After(b[j].timestamp)
   541  }
   542  
   543  func (b byFormatTime) Swap(i, j int) {
   544  	b[i], b[j] = b[j], b[i]
   545  }
   546  
   547  func (b byFormatTime) Len() int {
   548  	return len(b)
   549  }