github.com/keybase/client/go@v0.0.0-20241007131713-f10651d043c8/status/utils.go (about)

     1  // Copyright 2019 Keybase, Inc. All rights reserved. Use of
     2  // this source code is governed by the included BSD license.
     3  
     4  package status
     5  
     6  import (
     7  	"archive/tar"
     8  	"bufio"
     9  	"bytes"
    10  	"compress/gzip"
    11  	"encoding/json"
    12  	"fmt"
    13  	"io"
    14  
    15  	"mime/multipart"
    16  	"os"
    17  	"os/exec"
    18  	"path/filepath"
    19  	"runtime"
    20  	"sort"
    21  	"strconv"
    22  	"strings"
    23  	"time"
    24  
    25  	humanize "github.com/dustin/go-humanize"
    26  	"github.com/keybase/client/go/install"
    27  	"github.com/keybase/client/go/libkb"
    28  	"github.com/keybase/client/go/logger"
    29  	"github.com/keybase/client/go/protocol/keybase1"
    30  	jsonw "github.com/keybase/go-jsonw"
    31  )
    32  
    33  // MergeStatusJSON merges the given `obj` into the given `status` JSON blob.
    34  // If any errors occur the original `status` is returned. Otherwise a new JSON
    35  // blob is created of the form {"status": status, key: obj}
    36  func MergeStatusJSON(obj interface{}, key, status string) string {
    37  	if err := jsonw.EnsureMaxDepthBytesDefault([]byte(status)); err != nil {
    38  		return status
    39  	}
    40  
    41  	var statusObj map[string]interface{}
    42  	if err := json.Unmarshal([]byte(status), &statusObj); err != nil {
    43  		return status
    44  	}
    45  
    46  	statusMap := make(map[string]interface{})
    47  	statusMap["status"] = statusObj
    48  	statusMap[key] = obj
    49  
    50  	fullStatus, err := json.Marshal(statusMap)
    51  	if err != nil {
    52  		return status
    53  	}
    54  	return string(fullStatus)
    55  }
    56  
    57  // getServiceLog uses the EffectiveLogPath if available and falls back to the
    58  // ServiceLogFileName otherwise.
    59  func getServiceLog(mctx libkb.MetaContext, logDir string) string {
    60  	serviceLogPath, ok := mctx.G().Env.GetEffectiveLogFile()
    61  	if ok && serviceLogPath != "" {
    62  		var err error
    63  		serviceLogPath, err = filepath.Abs(serviceLogPath)
    64  		if err != nil {
    65  			mctx.Debug("Unable to get abspath for effective log file %v", err)
    66  			serviceLogPath = ""
    67  		}
    68  	}
    69  	if serviceLogPath == "" {
    70  		return filepath.Join(logDir, libkb.ServiceLogFileName)
    71  	}
    72  	return serviceLogPath
    73  }
    74  
    75  func logFilesFromStatus(g *libkb.GlobalContext, fstatus *keybase1.FullStatus) Logs {
    76  	logDir := g.Env.GetLogDir()
    77  	installLogPath, err := install.InstallLogPath()
    78  	if err != nil {
    79  		g.Log.Errorf("Error (InstallLogPath): %s", err)
    80  	}
    81  
    82  	watchdogLogPath, err := install.WatchdogLogPath(filepath.Join(logDir, "watchdog*.log"))
    83  	if err != nil {
    84  		g.Log.Errorf("Error (WatchdogLogPath): %s", err)
    85  	}
    86  
    87  	traceDir := logDir
    88  	cpuProfileDir := logDir
    89  	if fstatus != nil {
    90  		return Logs{
    91  			GUI:        fstatus.Desktop.Log,
    92  			Kbfs:       fstatus.Kbfs.Log,
    93  			KbfsPerf:   fstatus.Kbfs.PerfLog,
    94  			Service:    fstatus.Service.Log,
    95  			EK:         fstatus.Service.EkLog,
    96  			Perf:       fstatus.Service.PerfLog,
    97  			Updater:    fstatus.Updater.Log,
    98  			Start:      fstatus.Start.Log,
    99  			System:     install.SystemLogPath(),
   100  			Git:        fstatus.Git.Log,
   101  			GitPerf:    fstatus.Git.PerfLog,
   102  			Install:    installLogPath,
   103  			Trace:      traceDir,
   104  			CPUProfile: cpuProfileDir,
   105  			Watchdog:   watchdogLogPath,
   106  		}
   107  	}
   108  
   109  	return Logs{
   110  		GUI:      filepath.Join(logDir, libkb.GUILogFileName),
   111  		Kbfs:     filepath.Join(logDir, libkb.KBFSLogFileName),
   112  		Service:  getServiceLog(libkb.NewMetaContextTODO(g), logDir),
   113  		EK:       filepath.Join(logDir, libkb.EKLogFileName),
   114  		Perf:     filepath.Join(logDir, libkb.PerfLogFileName),
   115  		KbfsPerf: filepath.Join(logDir, libkb.KBFSPerfLogFileName),
   116  		Updater:  filepath.Join(logDir, libkb.UpdaterLogFileName),
   117  		Start:    filepath.Join(logDir, libkb.StartLogFileName),
   118  		Git:      filepath.Join(logDir, libkb.GitLogFileName),
   119  		GitPerf:  filepath.Join(logDir, libkb.GitPerfLogFileName),
   120  		Install:  installLogPath,
   121  		Trace:    traceDir,
   122  		Watchdog: watchdogLogPath,
   123  	}
   124  }
   125  
   126  func addFile(mpart *multipart.Writer, param, filename string, data []byte) error {
   127  	if len(data) == 0 {
   128  		return nil
   129  	}
   130  
   131  	part, err := mpart.CreateFormFile(param, filename)
   132  	if err != nil {
   133  		return err
   134  	}
   135  	_, err = io.Copy(part, bytes.NewBuffer(data))
   136  	return err
   137  }
   138  
   139  func addGzippedFile(mpart *multipart.Writer, param, filename, data string) error {
   140  	if len(data) == 0 {
   141  		return nil
   142  	}
   143  
   144  	part, err := mpart.CreateFormFile(param, filename)
   145  	if err != nil {
   146  		return err
   147  	}
   148  	gz := gzip.NewWriter(part)
   149  	if _, err := gz.Write([]byte(data)); err != nil {
   150  		return err
   151  	}
   152  	return gz.Close()
   153  }
   154  
   155  // tail the logs that start with the stem `stem`, which are of type `which`.
   156  // Get the most recent `numBytes` from the concatenation of the files.
   157  func tail(log logger.Logger, which string, stem string, numBytes int) (ret string) {
   158  
   159  	numFiles := 0
   160  
   161  	log.Debug("+ tailing %s file with stem %q", which, stem)
   162  	defer func() {
   163  		log.Debug("- collected %d bytes from %d files", len(ret), numFiles)
   164  	}()
   165  
   166  	if len(stem) == 0 {
   167  		log.Debug("| skipping %s logs (since no stem given)", which)
   168  		return
   169  	}
   170  
   171  	lognames := listLogFiles(log, stem)
   172  	var parts []string
   173  	remaining := numBytes
   174  
   175  	// Keep reading logs in reverse chronological order until we've read nothing
   176  	// more, or we've filled up numBytes worth of buffer, or we didn't have to read
   177  	// the whole file.
   178  	for _, logname := range lognames {
   179  		data, seeked := tailFile(log, which, logname, remaining)
   180  		if len(data) == 0 {
   181  			break
   182  		}
   183  		parts = append(parts, data)
   184  		numFiles++
   185  		remaining -= len(data)
   186  		if remaining <= 0 || seeked {
   187  			break
   188  		}
   189  	}
   190  
   191  	// Reverse the array; took this one-line from StackOverflow answer
   192  	for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 {
   193  		parts[i], parts[j] = parts[j], parts[i]
   194  	}
   195  
   196  	return strings.Join(parts, "")
   197  }
   198  
   199  type nameAndMTime struct {
   200  	name  string
   201  	mtime time.Time
   202  }
   203  
   204  type nameAndMTimes []nameAndMTime
   205  
   206  func (a nameAndMTimes) Len() int           { return len(a) }
   207  func (a nameAndMTimes) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
   208  func (a nameAndMTimes) Less(i, j int) bool { return a[i].mtime.After(a[j].mtime) }
   209  
   210  // List logfiles that match the glob filename*, and return then in reverse chronological order.
   211  // We'll need to read the first, and maybe the second
   212  func listLogFiles(log logger.Logger, stem string) (ret []string) {
   213  	stem = filepath.Clean(stem)
   214  	dir := filepath.Dir(stem)
   215  	base := filepath.Base(stem)
   216  	files, err := os.ReadDir(dir)
   217  
   218  	defer func() {
   219  		log.Debug("listLogFiles(%q) -> %v", stem, ret)
   220  	}()
   221  
   222  	// In the worst case, just return the stem in an array
   223  	defret := []string{stem}
   224  
   225  	if err != nil {
   226  		log.Debug("failed to list directory %q: %s", dir, err)
   227  		return defret
   228  	}
   229  
   230  	var tmp []nameAndMTime
   231  	for _, d := range files {
   232  		fullname := filepath.Clean(filepath.Join(dir, d.Name()))
   233  		// Use the stat on the file (and not from the directory read)
   234  		// since the latter doesn't work reliably on Windows
   235  		finfo, err := os.Stat(fullname)
   236  		if err != nil {
   237  			log.Debug("Cannot stat %q: %s", fullname, err)
   238  			continue
   239  		}
   240  		if strings.HasPrefix(d.Name(), base) {
   241  			tmp = append(tmp, nameAndMTime{fullname, finfo.ModTime()})
   242  		}
   243  	}
   244  	if len(tmp) == 0 {
   245  		log.Debug("no files found matching \"%s*\"; falling back to default glob", stem)
   246  		return defret
   247  	}
   248  
   249  	// Sort the files in reverse chronological mtime order. We should get the raw stem first.
   250  	sort.Sort(nameAndMTimes(tmp))
   251  	log.Debug("Sorted file list: %+v", tmp)
   252  
   253  	for _, f := range tmp {
   254  		ret = append(ret, f.name)
   255  	}
   256  
   257  	// If we didn't get the raw stem first, then we have a problem, so just use only the
   258  	// raw stem and ignore the rest of our work.
   259  	if stem != ret[0] {
   260  		log.Debug("Expected to get %q first, but got %q instead! Falling back to one log only", stem, ret[0])
   261  		return defret
   262  	}
   263  	return ret
   264  }
   265  
   266  // findFirstNewline first the first newline in the given byte array, and then returns the
   267  // rest of the byte array. Should be safe to use on utf-8 strings.
   268  func findFirstNewline(b []byte) []byte {
   269  	index := bytes.IndexByte(b, '\n')
   270  	if index < 0 || index == len(b)-1 {
   271  		return nil
   272  	}
   273  	return b[(index + 1):]
   274  }
   275  
   276  func appendError(log logger.Logger, collected []byte, format string, args ...interface{}) []byte {
   277  	msg := "Error reading logs: " + fmt.Sprintf(format, args...)
   278  	log.Errorf(msg)
   279  	return append(collected, []byte("\n"+msg+"\n")...)
   280  }
   281  
   282  // Get logs from the systemd journal. Currently we don't use this for most of
   283  // our logging, since it's not persisted across boot on some systems. But we do
   284  // use it for startup logs.
   285  func tailSystemdJournal(log logger.Logger, userUnits []string, numBytes int) (ret string) {
   286  	log.Debug("+ tailing journalctl for %#v (%d bytes)", userUnits, numBytes)
   287  	defer func() {
   288  		log.Debug("- scanned %d bytes", len(ret))
   289  	}()
   290  
   291  	// Journalctl doesn't provide a "last N bytes" flag directly, so instead we
   292  	// use "last N lines". Large log files in practice seem to be about 150
   293  	// bits per line. We'll request lines on that assumption, but if we get
   294  	// more than 2x as many bytes as we expected, we'll stop reading and
   295  	// include a big error.
   296  	guessedLines := numBytes / 150
   297  	maxBytes := numBytes * 2
   298  
   299  	// We intentionally avoid the --user flag to journalctl. That would make us
   300  	// skip over the system journal, but in e.g. Ubuntu 16.04, that's where
   301  	// user units write their logs.
   302  	// Unfortunately, this causes permission errors in some operating systems
   303  	// like Debian Stretch, but it is not fatal. as we ignore errors in this
   304  	// function.
   305  	args := []string{
   306  		"--lines=" + strconv.Itoa(guessedLines),
   307  	}
   308  	if len(userUnits) == 0 {
   309  		panic("without --user-unit we would scrape all system logs!!!")
   310  	}
   311  	for _, userUnit := range userUnits {
   312  		args = append(args, "--user-unit="+userUnit)
   313  	}
   314  	journalCmd := exec.Command("journalctl", args...)
   315  	journalCmd.Stderr = os.Stderr
   316  	stdout, err := journalCmd.StdoutPipe()
   317  	if err != nil {
   318  		msg := fmt.Sprintf("Failed to open a pipe for journalctl: %s", err)
   319  		log.Errorf(msg)
   320  		return msg
   321  	}
   322  	err = journalCmd.Start()
   323  	if err != nil {
   324  		msg := fmt.Sprintf("Failed to run journalctl: %s", err)
   325  		log.Errorf(msg)
   326  		return msg
   327  	}
   328  
   329  	// Once we start reading output, don't short-circuit on errors. Just log
   330  	// them, and return whatever we got.
   331  	stdoutLimited := io.LimitReader(stdout, int64(maxBytes))
   332  	output, err := io.ReadAll(stdoutLimited)
   333  	if err != nil {
   334  		output = appendError(log, output, "Error reading from journalctl pipe: %s", err)
   335  	}
   336  	// We must close stdout before Wait, or else Wait might deadlock.
   337  	stdout.Close()
   338  	err = journalCmd.Wait()
   339  	if err != nil {
   340  		output = appendError(log, output, "Journalctl exited with an error: %s", err)
   341  	}
   342  	if len(output) >= maxBytes {
   343  		output = appendError(log, output, "Journal lines longer than expected. Logs truncated.")
   344  	}
   345  	return string(output)
   346  }
   347  
   348  // tailFile takes the last n bytes, but advances to the first newline. Returns the log (as a string)
   349  // and a bool, indicating if we read the full log, or we had to advance into the log to find the newline.
   350  func tailFile(log logger.Logger, which string, filename string, numBytes int) (ret string, seeked bool) {
   351  
   352  	log.Debug("+ tailing %s log %q (%d bytes)", which, filename, numBytes)
   353  	defer func() {
   354  		log.Debug("- scanned %d bytes", len(ret))
   355  	}()
   356  
   357  	seeked = false
   358  	if filename == "" {
   359  		return ret, seeked
   360  	}
   361  	finfo, err := os.Stat(filename)
   362  	if os.IsNotExist(err) {
   363  		log.Debug("log %q doesn't exist", filename)
   364  		return ret, seeked
   365  	}
   366  	f, err := os.Open(filename)
   367  	if err != nil {
   368  		log.Errorf("error opening log %q: %s", filename, err)
   369  		return ret, seeked
   370  	}
   371  	defer f.Close()
   372  	if finfo.Size() > int64(numBytes) {
   373  		seeked = true
   374  		_, err = f.Seek(int64(-numBytes), io.SeekEnd)
   375  		if err != nil {
   376  			log.Errorf("Can't seek log %q: %s", filename, err)
   377  			return ret, seeked
   378  		}
   379  	}
   380  	buf, err := io.ReadAll(f)
   381  	if err != nil {
   382  		log.Errorf("Failure in reading file %q: %s", filename, err)
   383  		return ret, seeked
   384  	}
   385  	if seeked {
   386  		buf = findFirstNewline(buf)
   387  	}
   388  	return string(buf), seeked
   389  }
   390  
   391  func addFileToTar(tw *tar.Writer, path string) error {
   392  	file, err := os.Open(path)
   393  	if err != nil {
   394  		return err
   395  	}
   396  	defer file.Close()
   397  
   398  	if stat, err := file.Stat(); err == nil {
   399  		header := tar.Header{
   400  			Typeflag: tar.TypeReg,
   401  			Name:     filepath.Base(path),
   402  			Size:     stat.Size(),
   403  			Mode:     int64(0600),
   404  			ModTime:  stat.ModTime(),
   405  		}
   406  		if err := tw.WriteHeader(&header); err != nil {
   407  			return err
   408  		}
   409  		if _, err := io.Copy(tw, file); err != nil {
   410  			return err
   411  		}
   412  	}
   413  	return nil
   414  }
   415  
   416  func addFilesToTarGz(log logger.Logger, w io.Writer, paths []string) bool {
   417  	gw := gzip.NewWriter(w)
   418  	defer gw.Close()
   419  	tw := tar.NewWriter(gw)
   420  	defer tw.Close()
   421  
   422  	added := false
   423  	for _, path := range paths {
   424  		err := addFileToTar(tw, path)
   425  		if err != nil {
   426  			log.Warning("Error adding %q to tar file: %s", path, err)
   427  			continue
   428  		}
   429  		log.Debug("Added trace file %q", path)
   430  		added = true
   431  	}
   432  	return added
   433  }
   434  
   435  func getBundledFiles(log logger.Logger, files []string, maxFileCount int) []byte {
   436  	// Send the newest files.
   437  	if len(files) > maxFileCount {
   438  		files = files[len(files)-maxFileCount:]
   439  	}
   440  
   441  	buf := bytes.NewBuffer(nil)
   442  	added := addFilesToTarGz(log, buf, files)
   443  	if !added {
   444  		return nil
   445  	}
   446  	return buf.Bytes()
   447  }
   448  
   449  func getTraceBundle(log logger.Logger, traceDir string) []byte {
   450  	traceFiles, err := libkb.GetSortedTraceFiles(traceDir)
   451  	if err != nil {
   452  		log.Warning("Error getting trace files in %q: %s", traceDir, err)
   453  		return nil
   454  	}
   455  
   456  	return getBundledFiles(log, traceFiles, libkb.MaxTraceFileCount)
   457  }
   458  
   459  func getCPUProfileBundle(log logger.Logger, cpuProfileDir string) []byte {
   460  	cpuProfileFiles, err := libkb.GetSortedCPUProfileFiles(cpuProfileDir)
   461  	if err != nil {
   462  		log.Warning("Error getting CPU profile files in %q: %s", cpuProfileDir, err)
   463  		return nil
   464  	}
   465  
   466  	return getBundledFiles(log, cpuProfileFiles, libkb.MaxCPUProfileFileCount)
   467  }
   468  
   469  func getPlatformInfo() keybase1.PlatformInfo {
   470  	return keybase1.PlatformInfo{
   471  		Os:        runtime.GOOS,
   472  		Arch:      runtime.GOARCH,
   473  		GoVersion: runtime.Version(),
   474  	}
   475  }
   476  
   477  // DirSize walks the file tree the size of the given directory
   478  func DirSize(dirPath string) (size uint64, numFiles int, err error) {
   479  	err = filepath.Walk(dirPath, func(_ string, info os.FileInfo, err error) error {
   480  		if err != nil {
   481  			return err
   482  		}
   483  		if !info.IsDir() {
   484  			size += uint64(info.Size())
   485  			numFiles++
   486  		}
   487  		return nil
   488  	})
   489  	return size, numFiles, err
   490  }
   491  
   492  func CacheSizeInfo(g *libkb.GlobalContext) (info []keybase1.DirSizeInfo, err error) {
   493  	cacheDir := g.GetCacheDir()
   494  	files, err := os.ReadDir(cacheDir)
   495  	if err != nil {
   496  		return nil, err
   497  	}
   498  
   499  	var totalSize uint64
   500  	var totalFiles int
   501  	for _, file := range files {
   502  		if !file.IsDir() {
   503  			info, err := file.Info()
   504  			if err != nil {
   505  				return nil, err
   506  			}
   507  			totalSize += uint64(info.Size())
   508  			continue
   509  		}
   510  		dirPath := filepath.Join(cacheDir, file.Name())
   511  		size, numFiles, err := DirSize(dirPath)
   512  		if err != nil {
   513  			return nil, err
   514  		}
   515  		totalSize += size
   516  		totalFiles += numFiles
   517  		info = append(info, keybase1.DirSizeInfo{
   518  			Name:      dirPath,
   519  			NumFiles:  numFiles,
   520  			HumanSize: humanize.Bytes(size),
   521  		})
   522  	}
   523  	info = append(info, keybase1.DirSizeInfo{
   524  		Name:      cacheDir,
   525  		NumFiles:  totalFiles,
   526  		HumanSize: humanize.Bytes(totalSize),
   527  	})
   528  	return info, nil
   529  }
   530  
   531  func GetFirstClient(v []keybase1.ClientStatus, typ keybase1.ClientType) *keybase1.ClientDetails {
   532  	for _, cli := range v {
   533  		if cli.Details.ClientType == typ {
   534  			return &cli.Details
   535  		}
   536  	}
   537  	return nil
   538  }
   539  
   540  // zipLogs takes a slice of logs and returns `numBytes` worth of the
   541  // most recent log lines from the sorted set of all lines across all
   542  // logs.  The lines in each log should already be sorted, and the logs
   543  // should be sortable alphabetically between each other (e.g., each
   544  // line in every log should start with a timestamp in the same
   545  // format).
   546  func zipLogs(numBytes int, logs ...string) (res string) {
   547  	scanners := make([]*bufio.Scanner, len(logs))
   548  	for i, log := range logs {
   549  		scanners[i] = bufio.NewScanner(strings.NewReader(log))
   550  	}
   551  
   552  	// nextLines is the next chronological line in each log.  It's
   553  	// empty if we need to get another line from the log.
   554  	nextLines := make([]string, len(logs))
   555  	var buf bytes.Buffer
   556  	for {
   557  		// Fill in the next line.
   558  		minLine := 0
   559  		for i, currLine := range nextLines {
   560  			if currLine == "" {
   561  				for scanners[i].Scan() {
   562  					nextLines[i] = scanners[i].Text()
   563  					if nextLines[i] != "" {
   564  						break
   565  					}
   566  				}
   567  			}
   568  
   569  			// If we still don't have a line, just skip this log.
   570  			if nextLines[i] == "" {
   571  				continue
   572  			}
   573  
   574  			if minLine != i && nextLines[i] < nextLines[minLine] {
   575  				minLine = i
   576  			}
   577  		}
   578  
   579  		line := nextLines[minLine]
   580  		if line == "" {
   581  			// We're done!
   582  			break
   583  		}
   584  
   585  		buf.WriteString(line + "\n")
   586  		nextLines[minLine] = ""
   587  	}
   588  
   589  	if buf.Len() > numBytes {
   590  		b := buf.Bytes()[buf.Len()-numBytes:]
   591  		return string(findFirstNewline(b))
   592  	}
   593  
   594  	return buf.String()
   595  }