github.com/keybase/client/go@v0.0.0-20240309051027-028f7c731f8b/kbfs/kbfsgit/runner.go (about)

     1  // Copyright 2017 Keybase Inc. All rights reserved.
     2  // Use of this source code is governed by a BSD
     3  // license that can be found in the LICENSE file.
     4  
     5  package kbfsgit
     6  
     7  import (
     8  	"bufio"
     9  	"context"
    10  	"encoding/json"
    11  	"fmt"
    12  	"io"
    13  	"os"
    14  	"path/filepath"
    15  	"runtime"
    16  	"runtime/pprof"
    17  	"strconv"
    18  	"strings"
    19  	"sync"
    20  	"time"
    21  
    22  	"github.com/keybase/client/go/kbfs/data"
    23  	"github.com/keybase/client/go/kbfs/idutil"
    24  	"github.com/keybase/client/go/kbfs/kbfsmd"
    25  	"github.com/keybase/client/go/kbfs/libfs"
    26  	"github.com/keybase/client/go/kbfs/libgit"
    27  	"github.com/keybase/client/go/kbfs/libkbfs"
    28  	"github.com/keybase/client/go/kbfs/tlf"
    29  	"github.com/keybase/client/go/kbfs/tlfhandle"
    30  	"github.com/keybase/client/go/libkb"
    31  	"github.com/keybase/client/go/logger"
    32  	"github.com/keybase/client/go/protocol/keybase1"
    33  	"github.com/pkg/errors"
    34  	billy "gopkg.in/src-d/go-billy.v4"
    35  	"gopkg.in/src-d/go-billy.v4/osfs"
    36  	gogit "gopkg.in/src-d/go-git.v4"
    37  	gogitcfg "gopkg.in/src-d/go-git.v4/config"
    38  	"gopkg.in/src-d/go-git.v4/plumbing"
    39  	gogitobj "gopkg.in/src-d/go-git.v4/plumbing/object"
    40  	gogitstor "gopkg.in/src-d/go-git.v4/plumbing/storer"
    41  	"gopkg.in/src-d/go-git.v4/storage"
    42  	"gopkg.in/src-d/go-git.v4/storage/filesystem"
    43  )
    44  
    45  const (
    46  	gitCmdCapabilities = "capabilities"
    47  	gitCmdList         = "list"
    48  	gitCmdFetch        = "fetch"
    49  	gitCmdPush         = "push"
    50  	gitCmdOption       = "option"
    51  
    52  	gitOptionVerbosity = "verbosity"
    53  	gitOptionProgress  = "progress"
    54  	gitOptionCloning   = "cloning"
    55  	gitOptionPushcert  = "pushcert"
    56  	gitOptionIfAsked   = "if-asked"
    57  
    58  	gitLFSInitEvent      = "init"
    59  	gitLFSUploadEvent    = "upload"
    60  	gitLFSDownloadEvent  = "download"
    61  	gitLFSCompleteEvent  = "complete"
    62  	gitLFSTerminateEvent = "terminate"
    63  	gitLFSProgressEvent  = "progress"
    64  
    65  	// Debug tag ID for an individual git command passed to the process.
    66  	ctxCommandOpID = "GITCMDID"
    67  
    68  	kbfsgitPrefix = "keybase://"
    69  	repoSplitter  = "/"
    70  	kbfsRepoDir   = ".kbfs_git"
    71  
    72  	publicName  = "public"
    73  	privateName = "private"
    74  	teamName    = "team"
    75  
    76  	// localRepoRemoteName is the name of the remote that gets added
    77  	// locally to the config of the KBFS bare repo, pointing to the
    78  	// git repo stored at the `gitDir` passed to `newRunner`.
    79  	//
    80  	// In go-git, there is no way to hook two go-git.Repository
    81  	// instances together to do fetches/pulls between them. One of the
    82  	// two repos has to be defined as a "remote" to the other one in
    83  	// order to use the nice Fetch and Pull commands. (There might be
    84  	// other more involved ways to transfer objects manually
    85  	// one-by-one, but that seems like it would be pretty sad.)
    86  	//
    87  	// Since there is no standard remote protocol for keybase yet
    88  	// (that's what we're building!), it's not supported by go-git
    89  	// itself. That means our only option is to treat the local
    90  	// on-disk repo as a "remote" with respect to the bare KBFS repo,
    91  	// and do everything in reverse: for example, when a user does a
    92  	// push, we actually fetch from the local repo and write the
    93  	// objects into the bare repo.
    94  	localRepoRemoteName = "local"
    95  
    96  	packedRefsPath     = "packed-refs"
    97  	packedRefsTempPath = "._packed-refs"
    98  
    99  	defaultMaxLooseRefs         = 50
   100  	defaultPruneMinLooseObjects = -1
   101  	defaultMaxObjectPacks       = 50
   102  	minGCInterval               = 7 * 24 * time.Hour
   103  
   104  	unlockPrintBytesStatusThreshold = time.Second / 2
   105  	gcPrintStatusThreshold          = time.Second
   106  
   107  	maxCommitsToVisitPerRef = 20
   108  )
   109  
   110  type ctxCommandTagKey int
   111  
   112  const (
   113  	ctxCommandIDKey ctxCommandTagKey = iota
   114  )
   115  
   116  type runnerProcessType int
   117  
   118  const (
   119  	processGit runnerProcessType = iota
   120  	processLFS
   121  	processLFSNoProgress
   122  )
   123  
   124  type runner struct {
   125  	config      libkbfs.Config
   126  	log         logger.Logger
   127  	h           *tlfhandle.Handle
   128  	remote      string
   129  	repo        string
   130  	gitDir      string
   131  	uniqID      string
   132  	input       io.Reader
   133  	output      io.Writer
   134  	errput      io.Writer
   135  	gcDone      bool
   136  	processType runnerProcessType
   137  
   138  	verbosity int64
   139  	progress  bool
   140  	cloning   bool
   141  
   142  	logSync     sync.Once
   143  	logSyncDone sync.Once
   144  
   145  	printStageLock   sync.Mutex
   146  	needPrintDone    bool
   147  	stageStartTime   time.Time
   148  	stageMemProfName string
   149  	stageCPUProfPath string
   150  }
   151  
   152  func newRunnerWithType(ctx context.Context, config libkbfs.Config,
   153  	remote, repo, gitDir string, input io.Reader, output, errput io.Writer,
   154  	processType runnerProcessType) (
   155  	*runner, error) {
   156  	tlfAndRepo := strings.TrimPrefix(repo, kbfsgitPrefix)
   157  	parts := strings.Split(tlfAndRepo, repoSplitter)
   158  	if len(parts) != 3 {
   159  		return nil, errors.Errorf("Repo should be in the format "+
   160  			"%s<tlfType>%s<tlf>%s<repo>, but got %s",
   161  			kbfsgitPrefix, repoSplitter, repoSplitter, tlfAndRepo)
   162  	}
   163  
   164  	var t tlf.Type
   165  	switch parts[0] {
   166  	case publicName:
   167  		t = tlf.Public
   168  	case privateName:
   169  		t = tlf.Private
   170  	case teamName:
   171  		t = tlf.SingleTeam
   172  	default:
   173  		return nil, errors.Errorf("Unrecognized TLF type: %s", parts[0])
   174  	}
   175  
   176  	h, err := libkbfs.GetHandleFromFolderNameAndType(
   177  		ctx, config.KBPKI(), config.MDOps(), config, parts[1], t)
   178  	if err != nil {
   179  		return nil, err
   180  	}
   181  
   182  	// Use the device ID and PID to make a unique ID (for generating
   183  	// temp files in KBFS).
   184  	session, err := idutil.GetCurrentSessionIfPossible(
   185  		ctx, config.KBPKI(), h.Type() == tlf.Public)
   186  	if err != nil {
   187  		return nil, err
   188  	}
   189  	uniqID := fmt.Sprintf("%s-%d", session.VerifyingKey.String(), os.Getpid())
   190  
   191  	return &runner{
   192  		config:      config,
   193  		log:         config.MakeLogger(""),
   194  		h:           h,
   195  		remote:      remote,
   196  		repo:        parts[2],
   197  		gitDir:      gitDir,
   198  		uniqID:      uniqID,
   199  		input:       input,
   200  		output:      output,
   201  		errput:      errput,
   202  		processType: processType,
   203  		verbosity:   1,
   204  		progress:    true,
   205  	}, nil
   206  }
   207  
   208  // newRunner creates a new runner for git commands.  It expects `repo`
   209  // to be in the form "keybase://private/user/reponame".  `remote`
   210  // is the local name assigned to that URL, while `gitDir` is the
   211  // filepath leading to the .git directory of the caller's local
   212  // on-disk repo.
   213  func newRunner(ctx context.Context, config libkbfs.Config,
   214  	remote, repo, gitDir string, input io.Reader, output, errput io.Writer) (
   215  	*runner, error) {
   216  	return newRunnerWithType(
   217  		ctx, config, remote, repo, gitDir, input, output, errput, processGit)
   218  }
   219  
   220  // handleCapabilities: from https://git-scm.com/docs/git-remote-helpers
   221  //
   222  // Lists the capabilities of the helper, one per line, ending with a
   223  // blank line. Each capability may be preceded with *, which marks
   224  // them mandatory for git versions using the remote helper to
   225  // understand. Any unknown mandatory capability is a fatal error.
   226  func (r *runner) handleCapabilities() error {
   227  	caps := []string{
   228  		gitCmdFetch,
   229  		gitCmdPush,
   230  		gitCmdOption,
   231  	}
   232  	for _, c := range caps {
   233  		_, err := r.output.Write([]byte(c + "\n"))
   234  		if err != nil {
   235  			return err
   236  		}
   237  	}
   238  	_, err := r.output.Write([]byte("\n"))
   239  	return err
   240  }
   241  
   242  // getElapsedStr gets an additional string to append to the errput
   243  // message at the end of a phase.  It includes the measured time of
   244  // the phase, and if verbosity is high enough, it includes the
   245  // location of a memory profile taken at the end of the phase.
   246  func (r *runner) getElapsedStr(
   247  	ctx context.Context, startTime time.Time, profName string,
   248  	cpuProfFullPath string) string {
   249  	if r.verbosity < 2 {
   250  		return ""
   251  	}
   252  	elapsed := r.config.Clock().Now().Sub(startTime)
   253  	elapsedStr := fmt.Sprintf(" [%s]", elapsed)
   254  
   255  	if r.verbosity >= 3 {
   256  		profName = filepath.Join(os.TempDir(), profName)
   257  		f, err := os.Create(profName)
   258  		if err != nil {
   259  			r.log.CDebugf(ctx, err.Error())
   260  		} else {
   261  			runtime.GC()
   262  			err := pprof.WriteHeapProfile(f)
   263  			if err != nil {
   264  				r.log.CDebugf(ctx, "Couldn't write heap profile: %+v", err)
   265  			}
   266  			f.Close()
   267  		}
   268  		elapsedStr += " [memprof " + profName + "]"
   269  	}
   270  
   271  	if cpuProfFullPath != "" {
   272  		pprof.StopCPUProfile()
   273  		elapsedStr += " [cpuprof " + cpuProfFullPath + "]"
   274  	}
   275  
   276  	return elapsedStr
   277  }
   278  
   279  func (r *runner) printDoneOrErr(
   280  	ctx context.Context, err error, startTime time.Time) {
   281  	if r.verbosity < 1 {
   282  		return
   283  	}
   284  	profName := "mem.init.prof"
   285  	elapsedStr := r.getElapsedStr(ctx, startTime, profName, "")
   286  	var writeErr error
   287  	if err != nil {
   288  		_, writeErr = r.errput.Write([]byte(err.Error() + elapsedStr + "\n"))
   289  	} else {
   290  		_, writeErr = r.errput.Write([]byte("done." + elapsedStr + "\n"))
   291  	}
   292  	if writeErr != nil {
   293  		r.log.CDebugf(ctx, "Couldn't write error: %+v", err)
   294  	}
   295  }
   296  
   297  func (r *runner) isManagedByApp() bool {
   298  	switch r.h.Type() {
   299  	case tlf.Public:
   300  		// Public TLFs are never managed by the app.
   301  		return false
   302  	case tlf.SingleTeam:
   303  		// Single-team TLFs are always managed by the app.
   304  		return true
   305  	case tlf.Private:
   306  		// Only single-user private TLFs are managed by the app.  So
   307  		// if the canonical name contains any commas, readers, or
   308  		// spaces, it's not managed by the app.
   309  		name := string(r.h.GetCanonicalName())
   310  		return !strings.ContainsAny(name, " ,"+tlf.ReaderSep)
   311  	default:
   312  		panic(fmt.Sprintf("Unexpected type: %s", r.h.Type()))
   313  	}
   314  }
   315  
   316  func (r *runner) makeFS(ctx context.Context) (fs *libfs.FS, err error) {
   317  	// Only allow lazy creates for TLFs that aren't managed by the
   318  	// Keybase app.
   319  	if r.isManagedByApp() {
   320  		fs, _, err = libgit.GetRepoAndID(
   321  			ctx, r.config, r.h, r.repo, r.uniqID)
   322  	} else {
   323  		fs, _, err = libgit.GetOrCreateRepoAndID(
   324  			ctx, r.config, r.h, r.repo, r.uniqID)
   325  	}
   326  	if err != nil {
   327  		return nil, err
   328  	}
   329  	return fs, nil
   330  }
   331  
   332  func (r *runner) initRepoIfNeeded(ctx context.Context, forCmd string) (
   333  	repo *gogit.Repository, fs *libfs.FS, err error) {
   334  	// This function might be called multiple times per function, but
   335  	// the subsequent calls will use the local cache.  So only print
   336  	// these messages once.
   337  	if r.verbosity >= 1 {
   338  		var startTime time.Time
   339  		r.logSync.Do(func() {
   340  			startTime = r.config.Clock().Now()
   341  			_, err := r.errput.Write([]byte("Syncing with Keybase... "))
   342  			if err != nil {
   343  				r.log.CDebugf(ctx, "Couldn't write: %+v", err)
   344  			}
   345  		})
   346  		defer func() {
   347  			r.logSyncDone.Do(func() { r.printDoneOrErr(ctx, err, startTime) })
   348  		}()
   349  	}
   350  
   351  	fs, err = r.makeFS(ctx)
   352  	if err != nil {
   353  		return nil, nil, err
   354  	}
   355  
   356  	// We don't persist remotes to the config on disk for two
   357  	// reasons. 1) gogit/gcfg has a bug where it can't handle
   358  	// backslashes in remote URLs, and 2) we don't want to persist the
   359  	// remotes anyway since they'll contain local paths and wouldn't
   360  	// make sense to other devices, plus that could leak local info.
   361  	var storage storage.Storer
   362  	storage, err = libgit.NewGitConfigWithoutRemotesStorer(fs)
   363  	if err != nil {
   364  		return nil, nil, err
   365  	}
   366  
   367  	if forCmd == gitCmdFetch {
   368  		r.log.CDebugf(ctx, "Using on-demand storer")
   369  		// Wrap it in an on-demand storer, so we don't try to read all the
   370  		// objects of big repos into memory at once.
   371  		storage, err = libgit.NewOnDemandStorer(storage)
   372  		if err != nil {
   373  			return nil, nil, err
   374  		}
   375  	}
   376  
   377  	config, err := storage.Config()
   378  	if err != nil {
   379  		return nil, nil, err
   380  	}
   381  	if config.Pack.Window > 0 {
   382  		// Turn delta compression off, both to avoid messing up the
   383  		// on-demand storer, and to avoid the unnecessary computation
   384  		// since we're not transferring the objects over a network.
   385  		// TODO: this results in uncompressed local git repo after
   386  		// fetches, so we should either run:
   387  		//
   388  		// `git repack -a -d -f --depth=250 --window=250` as needed.
   389  		// (via https://stackoverflow.com/questions/7102053/git-pull-without-remotely-compressing-objects)
   390  		//
   391  		// or we should document that the user should do so.
   392  		r.log.CDebugf(ctx, "Disabling pack compression by using a 0 window")
   393  		config.Pack.Window = 0
   394  		err = storage.SetConfig(config)
   395  		if err != nil {
   396  			return nil, nil, err
   397  		}
   398  	}
   399  
   400  	// TODO: This needs to take a server lock when initializing a
   401  	// repo.
   402  	r.log.CDebugf(ctx, "Attempting to init or open repo %s", r.repo)
   403  	repo, err = gogit.Init(storage, nil)
   404  	if err == gogit.ErrRepositoryAlreadyExists {
   405  		repo, err = gogit.Open(storage, nil)
   406  	}
   407  	if err != nil {
   408  		return nil, nil, err
   409  	}
   410  
   411  	return repo, fs, nil
   412  }
   413  
   414  func percent(n int64, d int64) float64 {
   415  	return float64(100) * (float64(n) / float64(d))
   416  }
   417  
   418  func humanizeBytes(n int64, d int64) string {
   419  	const kb = 1024
   420  	const kbf = float64(kb)
   421  	const mb = kb * 1024
   422  	const mbf = float64(mb)
   423  	const gb = mb * 1024
   424  	const gbf = float64(gb)
   425  	// Special case the counting of bytes, when there's no denominator.
   426  	if d == 1 {
   427  		switch {
   428  		case n < kb:
   429  			return fmt.Sprintf("%d bytes", n)
   430  		case n < mb:
   431  			return fmt.Sprintf("%.2f KB", float64(n)/kbf)
   432  		case n < gb:
   433  			return fmt.Sprintf("%.2f MB", float64(n)/mbf)
   434  		}
   435  		return fmt.Sprintf("%.2f GB", float64(n)/gbf)
   436  	}
   437  
   438  	switch {
   439  	case d < kb:
   440  		return fmt.Sprintf("%d/%d bytes", n, d)
   441  	case d < mb:
   442  		return fmt.Sprintf("%.2f/%.2f KB", float64(n)/kbf, float64(d)/kbf)
   443  	case d < gb:
   444  		return fmt.Sprintf("%.2f/%.2f MB", float64(n)/mbf, float64(d)/mbf)
   445  	}
   446  	return fmt.Sprintf("%.2f/%.2f GB", float64(n)/gbf, float64(d)/gbf)
   447  }
   448  
   449  // printStageEndIfNeeded should only be used to end stages started with
   450  // printStageStart.
   451  func (r *runner) printStageEndIfNeeded(ctx context.Context) {
   452  	r.printStageLock.Lock()
   453  	defer r.printStageLock.Unlock()
   454  	// go-git grabs the lock right after plumbing.StatusIndexOffset, but before
   455  	// sending the Done status update. As a result, it would look like we are
   456  	// flushing the journal before plumbing.StatusIndexOffset is done. So
   457  	// instead, print "done." only if it's not printed yet.
   458  	if r.needPrintDone {
   459  		elapsedStr := r.getElapsedStr(ctx,
   460  			r.stageStartTime, r.stageMemProfName, r.stageCPUProfPath)
   461  		_, err := r.errput.Write([]byte("done." + elapsedStr + "\n"))
   462  		if err != nil {
   463  			r.log.CDebugf(ctx, "Couldn't write: %+v", err)
   464  		}
   465  		r.needPrintDone = false
   466  	}
   467  }
   468  
   469  func (r *runner) printStageStart(ctx context.Context,
   470  	toPrint []byte, memProfName, cpuProfName string) {
   471  	if len(toPrint) == 0 {
   472  		return
   473  	}
   474  	r.printStageEndIfNeeded(ctx)
   475  
   476  	r.printStageLock.Lock()
   477  	defer r.printStageLock.Unlock()
   478  
   479  	r.stageStartTime = r.config.Clock().Now()
   480  	r.stageMemProfName = memProfName
   481  
   482  	if len(cpuProfName) > 0 && r.verbosity >= 4 {
   483  		cpuProfPath := filepath.Join(
   484  			os.TempDir(), cpuProfName)
   485  		f, err := os.Create(cpuProfPath)
   486  		if err != nil {
   487  			r.log.CDebugf(
   488  				ctx, "Couldn't create CPU profile: %s", cpuProfName)
   489  			cpuProfPath = ""
   490  		} else {
   491  			err := pprof.StartCPUProfile(f)
   492  			if err != nil {
   493  				r.log.CDebugf(ctx, "Couldn't start CPU profile: %+v", err)
   494  			}
   495  		}
   496  		r.stageCPUProfPath = cpuProfPath
   497  	}
   498  
   499  	_, err := r.errput.Write(toPrint)
   500  	if err != nil {
   501  		r.log.CDebugf(ctx, "Couldn't write: %+v", err)
   502  	}
   503  	r.needPrintDone = true
   504  }
   505  
   506  func (r *runner) printGitJournalStart(ctx context.Context) {
   507  	adj := "encrypted"
   508  	if r.h.Type() == tlf.Public {
   509  		adj = "signed"
   510  	}
   511  	if r.verbosity >= 1 {
   512  		r.printStageStart(ctx,
   513  			[]byte(fmt.Sprintf("Syncing %s data to Keybase: ", adj)),
   514  			"mem.flush.prof", "")
   515  	}
   516  }
   517  
   518  func (r *runner) printGitJournalMessage(
   519  	ctx context.Context, lastByteCount int, totalSize, sizeLeft int64) int {
   520  	const bytesFmt string = "(%.2f%%) %s... "
   521  	eraseStr := strings.Repeat("\b", lastByteCount)
   522  	flushed := totalSize - sizeLeft
   523  	if flushed < 0 {
   524  		flushed = 0
   525  	}
   526  	str := fmt.Sprintf(
   527  		bytesFmt, percent(flushed, totalSize),
   528  		humanizeBytes(flushed, totalSize))
   529  	if r.verbosity >= 1 && r.progress {
   530  		_, err := r.errput.Write([]byte(eraseStr + str))
   531  		if err != nil {
   532  			r.log.CDebugf(ctx, "Couldn't write: %+v", err)
   533  		}
   534  	}
   535  	return len(str)
   536  }
   537  
   538  // caller should make sure doneCh is closed when journal is all flushed.
   539  func (r *runner) printJournalStatus(
   540  	ctx context.Context, jManager *libkbfs.JournalManager, tlfID tlf.ID,
   541  	doneCh <-chan struct{}, printStart func(context.Context),
   542  	printProgress func(context.Context, int, int64, int64) int,
   543  	printEnd func(context.Context)) {
   544  	printEnd(ctx)
   545  	// Note: the "first" status here gets us the number of unflushed
   546  	// bytes left at the time we started printing.  However, we don't
   547  	// have the total number of bytes being flushed to the server
   548  	// throughout the whole operation, which would be more
   549  	// informative.  It would be better to have that as the
   550  	// denominator, but there's no easy way to get it right now.
   551  	firstStatus, err := jManager.JournalStatus(tlfID)
   552  	if err != nil {
   553  		r.log.CDebugf(ctx, "Error getting status: %+v", err)
   554  		return
   555  	}
   556  	if firstStatus.UnflushedBytes == 0 {
   557  		return
   558  	}
   559  	printStart(ctx)
   560  	lastByteCount := printProgress(
   561  		ctx, 0, firstStatus.UnflushedBytes, firstStatus.UnflushedBytes)
   562  
   563  	r.log.CDebugf(ctx, "Waiting for %d journal bytes to flush",
   564  		firstStatus.UnflushedBytes)
   565  
   566  	ticker := time.NewTicker(1 * time.Second)
   567  	defer ticker.Stop()
   568  	for {
   569  		select {
   570  		case <-ticker.C:
   571  			status, err := jManager.JournalStatus(tlfID)
   572  			if err != nil {
   573  				r.log.CDebugf(ctx, "Error getting status: %+v", err)
   574  				return
   575  			}
   576  
   577  			lastByteCount = printProgress(
   578  				ctx, lastByteCount, firstStatus.UnflushedBytes,
   579  				status.UnflushedBytes)
   580  		case <-doneCh:
   581  			// doneCh is closed. So assume journal flushing is done and
   582  			// take the shortcut.
   583  			_ = printProgress(
   584  				ctx, lastByteCount, firstStatus.UnflushedBytes, 0)
   585  
   586  			printEnd(ctx)
   587  			return
   588  		}
   589  	}
   590  }
   591  
   592  func (r *runner) waitForJournalWithPrinters(
   593  	ctx context.Context, printStart func(context.Context),
   594  	printProgress func(context.Context, int, int64, int64) int,
   595  	printEnd func(context.Context)) error {
   596  	// See if there are any deleted repos to clean up before we flush
   597  	// the journal.
   598  	err := libgit.CleanOldDeletedReposTimeLimited(ctx, r.config, r.h)
   599  	if err != nil {
   600  		return err
   601  	}
   602  
   603  	rootNode, _, err := r.config.KBFSOps().GetOrCreateRootNode(
   604  		ctx, r.h, data.MasterBranch)
   605  	if err != nil {
   606  		return err
   607  	}
   608  
   609  	err = r.config.KBFSOps().SyncAll(ctx, rootNode.GetFolderBranch())
   610  	if err != nil {
   611  		return err
   612  	}
   613  
   614  	jManager, err := libkbfs.GetJournalManager(r.config)
   615  	if err != nil {
   616  		r.log.CDebugf(ctx, "No journal server: %+v", err)
   617  		return nil
   618  	}
   619  
   620  	_, err = jManager.JournalStatus(rootNode.GetFolderBranch().Tlf)
   621  	if err != nil {
   622  		r.log.CDebugf(ctx, "No journal: %+v", err)
   623  		return nil
   624  	}
   625  
   626  	printDoneCh := make(chan struct{})
   627  	waitDoneCh := make(chan struct{})
   628  	go func() {
   629  		r.printJournalStatus(
   630  			ctx, jManager, rootNode.GetFolderBranch().Tlf, waitDoneCh,
   631  			printStart, printProgress, printEnd)
   632  		close(printDoneCh)
   633  	}()
   634  
   635  	// This squashes everything written to the journal into a single
   636  	// revision, to make sure that no partial states of the bare repo
   637  	// are seen by other readers of the TLF.  It also waits for any
   638  	// necessary conflict resolution to complete.
   639  	err = jManager.FinishSingleOp(ctx, rootNode.GetFolderBranch().Tlf,
   640  		nil, keybase1.MDPriorityGit)
   641  	if err != nil {
   642  		return err
   643  	}
   644  	close(waitDoneCh)
   645  	<-printDoneCh
   646  
   647  	// Make sure that everything is truly flushed.
   648  	status, err := jManager.JournalStatus(rootNode.GetFolderBranch().Tlf)
   649  	if err != nil {
   650  		return err
   651  	}
   652  
   653  	if status.RevisionStart != kbfsmd.RevisionUninitialized {
   654  		r.log.CDebugf(ctx, "Journal status: %+v", status)
   655  		return errors.New("Journal is non-empty after a wait")
   656  	}
   657  	return nil
   658  }
   659  
   660  func (r *runner) waitForJournal(ctx context.Context) error {
   661  	return r.waitForJournalWithPrinters(
   662  		ctx, r.printGitJournalStart, r.printGitJournalMessage,
   663  		r.printStageEndIfNeeded)
   664  }
   665  
   666  // handleList: From https://git-scm.com/docs/git-remote-helpers
   667  //
   668  // Lists the refs, one per line, in the format "<value> <name> [<attr>
   669  // …​]". The value may be a hex sha1 hash, "@<dest>" for a symref, or
   670  // "?" to indicate that the helper could not get the value of the
   671  // ref. A space-separated list of attributes follows the name;
   672  // unrecognized attributes are ignored. The list ends with a blank
   673  // line.
   674  func (r *runner) handleList(ctx context.Context, args []string) (err error) {
   675  	forPush := false
   676  	if len(args) == 1 && args[0] == "for-push" {
   677  		r.log.CDebugf(ctx, "Excluding symbolic refs during a for-push list")
   678  		forPush = true
   679  	} else if len(args) > 0 {
   680  		return errors.Errorf("Bad list request: %v", args)
   681  	}
   682  
   683  	repo, _, err := r.initRepoIfNeeded(ctx, gitCmdList)
   684  	if err != nil {
   685  		return err
   686  	}
   687  
   688  	refs, err := repo.References()
   689  	if err != nil {
   690  		return err
   691  	}
   692  
   693  	var symRefs []string
   694  	hashesSeen := false
   695  	for {
   696  		ref, err := refs.Next()
   697  		if errors.Cause(err) == io.EOF {
   698  			break
   699  		}
   700  		if err != nil {
   701  			return err
   702  		}
   703  
   704  		value := ""
   705  		switch ref.Type() {
   706  		case plumbing.HashReference:
   707  			value = ref.Hash().String()
   708  			hashesSeen = true
   709  		case plumbing.SymbolicReference:
   710  			value = "@" + ref.Target().String()
   711  		default:
   712  			value = "?"
   713  		}
   714  		refStr := value + " " + ref.Name().String() + "\n"
   715  		if ref.Type() == plumbing.SymbolicReference {
   716  			// Don't list any symbolic references until we're sure
   717  			// there's at least one object available.  Otherwise
   718  			// cloning an empty repo will result in an error because
   719  			// the HEAD symbolic ref points to a ref that doesn't
   720  			// exist.
   721  			symRefs = append(symRefs, refStr)
   722  			continue
   723  		}
   724  		r.log.CDebugf(ctx, "Listing ref %s", refStr)
   725  		_, err = r.output.Write([]byte(refStr))
   726  		if err != nil {
   727  			return err
   728  		}
   729  	}
   730  
   731  	if hashesSeen && !forPush {
   732  		for _, refStr := range symRefs {
   733  			r.log.CDebugf(ctx, "Listing symbolic ref %s", refStr)
   734  			_, err = r.output.Write([]byte(refStr))
   735  			if err != nil {
   736  				return err
   737  			}
   738  		}
   739  	}
   740  
   741  	err = r.waitForJournal(ctx)
   742  	if err != nil {
   743  		return err
   744  	}
   745  	r.log.CDebugf(ctx, "Done waiting for journal")
   746  
   747  	_, err = r.output.Write([]byte("\n"))
   748  	return err
   749  }
   750  
   751  var gogitStagesToStatus = map[plumbing.StatusStage]string{
   752  	plumbing.StatusCount:     "Counting and decrypting: ",
   753  	plumbing.StatusRead:      "Reading and decrypting metadata: ",
   754  	plumbing.StatusFixChains: "Fixing: ",
   755  	plumbing.StatusSort:      "Sorting... ",
   756  	plumbing.StatusDelta:     "Calculating deltas: ",
   757  	// For us, a "send" actually means fetch.
   758  	plumbing.StatusSend: "Fetching and decrypting objects: ",
   759  	// For us, a "fetch" actually means writing objects to
   760  	// the local journal.
   761  	plumbing.StatusFetch:       "Preparing and encrypting: ",
   762  	plumbing.StatusIndexHash:   "Indexing hashes: ",
   763  	plumbing.StatusIndexCRC:    "Indexing CRCs: ",
   764  	plumbing.StatusIndexOffset: "Indexing offsets: ",
   765  }
   766  
   767  func humanizeObjects(n int, d int) string {
   768  	const k = 1000
   769  	const m = k * 1000
   770  	// Special case the counting of objects, when there's no denominator.
   771  	if d == 1 {
   772  		if n < k {
   773  			return fmt.Sprintf("%d", n)
   774  		} else if n < m {
   775  			return fmt.Sprintf("%.2fK", float64(n)/k)
   776  		}
   777  		return fmt.Sprintf("%.2fM", float64(n)/m)
   778  	}
   779  
   780  	if d < k {
   781  		return fmt.Sprintf("%d/%d", n, d)
   782  	} else if d < m {
   783  		return fmt.Sprintf("%.2f/%.2fK", float64(n)/k, float64(d)/k)
   784  	}
   785  	return fmt.Sprintf("%.2f/%.2fM", float64(n)/m, float64(d)/m)
   786  }
   787  
   788  func (r *runner) printJournalStatusUntilFlushed(
   789  	ctx context.Context, doneCh <-chan struct{}) {
   790  	rootNode, _, err := r.config.KBFSOps().GetOrCreateRootNode(
   791  		ctx, r.h, data.MasterBranch)
   792  	if err != nil {
   793  		r.log.CDebugf(ctx, "GetOrCreateRootNode error: %+v", err)
   794  		return
   795  	}
   796  
   797  	err = r.config.KBFSOps().SyncAll(ctx, rootNode.GetFolderBranch())
   798  	if err != nil {
   799  		r.log.CDebugf(ctx, "SyncAll error: %+v", err)
   800  		return
   801  	}
   802  
   803  	jManager, err := libkbfs.GetJournalManager(r.config)
   804  	if err != nil {
   805  		r.log.CDebugf(ctx, "No journal server: %+v", err)
   806  	}
   807  
   808  	r.printJournalStatus(
   809  		ctx, jManager, rootNode.GetFolderBranch().Tlf, doneCh,
   810  		r.printGitJournalStart, r.printGitJournalMessage,
   811  		r.printStageEndIfNeeded)
   812  }
   813  
   814  func (r *runner) processGogitStatus(ctx context.Context,
   815  	statusChan <-chan plumbing.StatusUpdate, fsEvents <-chan libfs.FSEvent) {
   816  	if r.h.Type() == tlf.Public {
   817  		gogitStagesToStatus[plumbing.StatusFetch] = "Preparing and signing: "
   818  	}
   819  
   820  	currStage := plumbing.StatusUnknown
   821  	lastByteCount := 0
   822  	for {
   823  		if statusChan == nil && fsEvents == nil {
   824  			// statusChan is never passed in as nil. So if it's nil, it's been
   825  			// closed in the select/case below because receive failed. So
   826  			// instead of letting select block forever, we break out of the
   827  			// loop here.
   828  			break
   829  		}
   830  		select {
   831  		case update, ok := <-statusChan:
   832  			if !ok {
   833  				statusChan = nil
   834  				continue
   835  			}
   836  			if update.Stage != currStage {
   837  				if currStage != plumbing.StatusUnknown {
   838  					r.printStageEndIfNeeded(ctx)
   839  				}
   840  				r.printStageStart(ctx,
   841  					[]byte(gogitStagesToStatus[update.Stage]),
   842  					fmt.Sprintf("mem.%d.prof", update.Stage),
   843  					fmt.Sprintf("cpu.%d.prof", update.Stage),
   844  				)
   845  				lastByteCount = 0
   846  				if stage, ok := gogitStagesToStatus[update.Stage]; ok {
   847  					r.log.CDebugf(ctx, "Entering stage: %s - %d total objects",
   848  						stage, update.ObjectsTotal)
   849  				}
   850  			}
   851  			eraseStr := strings.Repeat("\b", lastByteCount)
   852  			newStr := ""
   853  
   854  			switch update.Stage {
   855  			case plumbing.StatusDone:
   856  				r.log.CDebugf(ctx, "Status processing done")
   857  				return
   858  			case plumbing.StatusCount:
   859  				newStr = fmt.Sprintf(
   860  					"%s objects... ", humanizeObjects(update.ObjectsTotal, 1))
   861  			case plumbing.StatusSort:
   862  			default:
   863  				newStr = fmt.Sprintf(
   864  					"(%.2f%%) %s objects... ",
   865  					percent(int64(update.ObjectsDone), int64(update.ObjectsTotal)),
   866  					humanizeObjects(update.ObjectsDone, update.ObjectsTotal))
   867  			}
   868  
   869  			lastByteCount = len(newStr)
   870  			if r.progress {
   871  				_, err := r.errput.Write([]byte(eraseStr + newStr))
   872  				if err != nil {
   873  					r.log.CDebugf(ctx, "Couldn't write: %+v", err)
   874  				}
   875  			}
   876  
   877  			currStage = update.Stage
   878  		case fsEvent, ok := <-fsEvents:
   879  			if !ok {
   880  				fsEvents = nil
   881  				continue
   882  			}
   883  			switch fsEvent.EventType {
   884  			case libfs.FSEventLock, libfs.FSEventUnlock:
   885  				r.printStageEndIfNeeded(ctx)
   886  				// Since we flush all blocks in Lock, subsequent calls to
   887  				// Lock/Unlock normally don't take much time. So we only print
   888  				// journal status if it's been longer than
   889  				// unlockPrintBytesStatusThreshold and fsEvent.Done hasn't been
   890  				// closed.
   891  				timer := time.NewTimer(unlockPrintBytesStatusThreshold)
   892  				select {
   893  				case <-timer.C:
   894  					r.printJournalStatusUntilFlushed(ctx, fsEvent.Done)
   895  				case <-fsEvent.Done:
   896  					timer.Stop()
   897  				case <-ctx.Done():
   898  					timer.Stop()
   899  				}
   900  			}
   901  		}
   902  	}
   903  	r.log.CDebugf(ctx, "Status channel closed")
   904  	r.printStageEndIfNeeded(ctx)
   905  }
   906  
   907  // recursiveByteCount returns a sum of the size of all files under the
   908  // directory represented by `fs`.  It also returns the length of the
   909  // last string it printed to `r.errput` as `toErase`, to aid in
   910  // overwriting the text on the next update.
   911  func (r *runner) recursiveByteCount(
   912  	ctx context.Context, fs billy.Filesystem, totalSoFar int64, toErase int) (
   913  	bytes int64, toEraseRet int, err error) {
   914  	fileInfos, err := fs.ReadDir("/")
   915  	if err != nil {
   916  		return 0, 0, err
   917  	}
   918  
   919  	for _, fi := range fileInfos {
   920  		if fi.IsDir() {
   921  			if fi.Name() == "." {
   922  				continue
   923  			}
   924  			chrootFS, err := fs.Chroot(fi.Name())
   925  			if err != nil {
   926  				return 0, 0, err
   927  			}
   928  			var chrootBytes int64
   929  			chrootBytes, toErase, err = r.recursiveByteCount(
   930  				ctx, chrootFS, totalSoFar+bytes, toErase)
   931  			if err != nil {
   932  				return 0, 0, err
   933  			}
   934  			bytes += chrootBytes
   935  		} else {
   936  			bytes += fi.Size()
   937  			if r.progress {
   938  				// This function only runs if r.verbosity >= 1.
   939  				eraseStr := strings.Repeat("\b", toErase)
   940  				newStr := fmt.Sprintf(
   941  					"%s... ", humanizeBytes(totalSoFar+bytes, 1))
   942  				toErase = len(newStr)
   943  				_, err := r.errput.Write([]byte(eraseStr + newStr))
   944  				if err != nil {
   945  					return 0, 0, err
   946  				}
   947  			}
   948  		}
   949  	}
   950  	return bytes, toErase, nil
   951  }
   952  
   953  // statusWriter is a simple io.Writer shim that logs to `r.errput` the
   954  // number of bytes written to `output`.
   955  type statusWriter struct {
   956  	r           *runner
   957  	output      io.Writer
   958  	soFar       int64
   959  	totalBytes  int64
   960  	nextToErase int
   961  }
   962  
   963  var _ io.Writer = (*statusWriter)(nil)
   964  
   965  func (sw *statusWriter) Write(p []byte) (n int, err error) {
   966  	n, err = sw.output.Write(p)
   967  	if err != nil {
   968  		return n, err
   969  	}
   970  
   971  	sw.soFar += int64(len(p))
   972  	eraseStr := strings.Repeat("\b", sw.nextToErase)
   973  	newStr := fmt.Sprintf("(%.2f%%) %s... ",
   974  		percent(sw.soFar, sw.totalBytes),
   975  		humanizeBytes(sw.soFar, sw.totalBytes))
   976  	_, err = sw.r.errput.Write([]byte(eraseStr + newStr))
   977  	if err != nil {
   978  		return n, err
   979  	}
   980  	sw.nextToErase = len(newStr)
   981  	return n, nil
   982  }
   983  
   984  func (r *runner) copyFile(
   985  	ctx context.Context, from billy.Filesystem, to billy.Filesystem,
   986  	name string, sw *statusWriter) (err error) {
   987  	f, err := from.Open(name)
   988  	if err != nil {
   989  		return err
   990  	}
   991  	defer f.Close()
   992  	toF, err := to.Create(name)
   993  	if err != nil {
   994  		return err
   995  	}
   996  	defer toF.Close()
   997  
   998  	var w io.Writer = toF
   999  	// Wrap the destination file in a status shim if we are supposed
  1000  	// to report progress.
  1001  	if sw != nil && r.progress {
  1002  		sw.output = toF
  1003  		w = sw
  1004  	}
  1005  	_, err = io.Copy(w, f)
  1006  	return err
  1007  }
  1008  
  1009  func (r *runner) copyFileWithCount(
  1010  	ctx context.Context, from billy.Filesystem, to billy.Filesystem,
  1011  	name, countingText, countingProf, copyingText, copyingProf string) error {
  1012  	var sw *statusWriter
  1013  	if r.verbosity >= 1 {
  1014  		// Get the total number of bytes we expect to fetch, for the
  1015  		// progress report.
  1016  		startTime := r.config.Clock().Now()
  1017  		zeroStr := fmt.Sprintf("%s... ", humanizeBytes(0, 1))
  1018  		_, err := r.errput.Write(
  1019  			[]byte(fmt.Sprintf("%s: %s", countingText, zeroStr)))
  1020  		if err != nil {
  1021  			return err
  1022  		}
  1023  		fi, err := from.Stat(name)
  1024  		if err != nil {
  1025  			return err
  1026  		}
  1027  		eraseStr := strings.Repeat("\b", len(zeroStr))
  1028  		newStr := fmt.Sprintf("%s... ", humanizeBytes(fi.Size(), 1))
  1029  		_, err = r.errput.Write([]byte(eraseStr + newStr))
  1030  		if err != nil {
  1031  			return err
  1032  		}
  1033  
  1034  		elapsedStr := r.getElapsedStr(
  1035  			ctx, startTime, fmt.Sprintf("mem.%s.prof", countingProf), "")
  1036  		_, err = r.errput.Write([]byte("done." + elapsedStr + "\n"))
  1037  		if err != nil {
  1038  			return err
  1039  		}
  1040  
  1041  		sw = &statusWriter{r, nil, 0, fi.Size(), 0}
  1042  		_, err = r.errput.Write([]byte(fmt.Sprintf("%s: ", copyingText)))
  1043  		if err != nil {
  1044  			return err
  1045  		}
  1046  	}
  1047  
  1048  	// Copy the file directly into the other file system.
  1049  	startTime := r.config.Clock().Now()
  1050  	err := r.copyFile(ctx, from, to, name, sw)
  1051  	if err != nil {
  1052  		return err
  1053  	}
  1054  
  1055  	if r.verbosity >= 1 {
  1056  		elapsedStr := r.getElapsedStr(
  1057  			ctx, startTime, fmt.Sprintf("mem.%s.prof", copyingProf), "")
  1058  		_, err = r.errput.Write([]byte("done." + elapsedStr + "\n"))
  1059  		if err != nil {
  1060  			return err
  1061  		}
  1062  	}
  1063  	return nil
  1064  }
  1065  
  1066  // recursiveCopy copies the entire subdirectory rooted at `fs` to
  1067  // `localFS`.
  1068  func (r *runner) recursiveCopy(
  1069  	ctx context.Context, from billy.Filesystem, to billy.Filesystem,
  1070  	sw *statusWriter) (err error) {
  1071  	fileInfos, err := from.ReadDir("")
  1072  	if err != nil {
  1073  		return err
  1074  	}
  1075  
  1076  	for _, fi := range fileInfos {
  1077  		if fi.IsDir() {
  1078  			if fi.Name() == "." {
  1079  				continue
  1080  			}
  1081  			err := to.MkdirAll(fi.Name(), 0775)
  1082  			if err != nil {
  1083  				return err
  1084  			}
  1085  			chrootFrom, err := from.Chroot(fi.Name())
  1086  			if err != nil {
  1087  				return err
  1088  			}
  1089  			chrootTo, err := to.Chroot(fi.Name())
  1090  			if err != nil {
  1091  				return err
  1092  			}
  1093  			err = r.recursiveCopy(ctx, chrootFrom, chrootTo, sw)
  1094  			if err != nil {
  1095  				return err
  1096  			}
  1097  		} else {
  1098  			err := r.copyFile(ctx, from, to, fi.Name(), sw)
  1099  			if err != nil {
  1100  				return err
  1101  			}
  1102  		}
  1103  	}
  1104  	return nil
  1105  }
  1106  
  1107  func (r *runner) recursiveCopyWithCounts(
  1108  	ctx context.Context, from billy.Filesystem, to billy.Filesystem,
  1109  	countingText, countingProf, copyingText, copyingProf string) error {
  1110  	var sw *statusWriter
  1111  	if r.verbosity >= 1 {
  1112  		// Get the total number of bytes we expect to fetch, for the
  1113  		// progress report.
  1114  		startTime := r.config.Clock().Now()
  1115  		_, err := r.errput.Write([]byte(fmt.Sprintf("%s: ", countingText)))
  1116  		if err != nil {
  1117  			return err
  1118  		}
  1119  		b, _, err := r.recursiveByteCount(ctx, from, 0, 0)
  1120  		if err != nil {
  1121  			return err
  1122  		}
  1123  		elapsedStr := r.getElapsedStr(
  1124  			ctx, startTime, fmt.Sprintf("mem.%s.prof", countingProf), "")
  1125  		_, err = r.errput.Write([]byte("done." + elapsedStr + "\n"))
  1126  		if err != nil {
  1127  			return err
  1128  		}
  1129  
  1130  		sw = &statusWriter{r, nil, 0, b, 0}
  1131  		_, err = r.errput.Write([]byte(fmt.Sprintf("%s: ", copyingText)))
  1132  		if err != nil {
  1133  			return err
  1134  		}
  1135  	}
  1136  
  1137  	// Copy the entire subdirectory straight into the other file
  1138  	// system.  This saves time and memory relative to going through
  1139  	// go-git.
  1140  	startTime := r.config.Clock().Now()
  1141  	err := r.recursiveCopy(ctx, from, to, sw)
  1142  	if err != nil {
  1143  		return err
  1144  	}
  1145  
  1146  	if r.verbosity >= 1 {
  1147  		elapsedStr := r.getElapsedStr(
  1148  			ctx, startTime, fmt.Sprintf("mem.%s.prof", copyingProf), "")
  1149  		_, err := r.errput.Write([]byte("done." + elapsedStr + "\n"))
  1150  		if err != nil {
  1151  			return err
  1152  		}
  1153  	}
  1154  	return nil
  1155  }
  1156  
  1157  // checkGC should only be called from the main command-processing
  1158  // goroutine.
  1159  func (r *runner) checkGC(ctx context.Context) (err error) {
  1160  	if r.gcDone {
  1161  		return nil
  1162  	}
  1163  	r.gcDone = true
  1164  
  1165  	if !r.isManagedByApp() {
  1166  		r.log.CDebugf(ctx, "Skipping GC check")
  1167  		return nil
  1168  	}
  1169  
  1170  	r.log.CDebugf(ctx, "Checking for GC")
  1171  
  1172  	var stageSync sync.Once
  1173  	ctx, cancel := context.WithCancel(ctx)
  1174  	defer cancel()
  1175  	go func() {
  1176  		timer := time.NewTimer(gcPrintStatusThreshold)
  1177  		select {
  1178  		case <-timer.C:
  1179  			stageSync.Do(func() {
  1180  				r.printStageStart(ctx,
  1181  					[]byte("Checking repo for inefficiencies... "),
  1182  					"mem.gc.prof", "cpu.gc.prof")
  1183  			})
  1184  		case <-ctx.Done():
  1185  			timer.Stop()
  1186  		}
  1187  	}()
  1188  	defer func() {
  1189  		// Prevent stage from starting after we finish the stage.
  1190  		stageSync.Do(func() {})
  1191  		if err == nil {
  1192  			r.printStageEndIfNeeded(ctx)
  1193  		}
  1194  	}()
  1195  
  1196  	fs, _, err := libgit.GetRepoAndID(
  1197  		ctx, r.config, r.h, r.repo, r.uniqID)
  1198  	if _, noRepo := errors.Cause(err).(libkb.RepoDoesntExistError); noRepo {
  1199  		r.log.CDebugf(ctx, "No such repo: %v", err)
  1200  		return nil
  1201  	} else if err != nil {
  1202  		return err
  1203  	}
  1204  
  1205  	lastGCTime, err := libgit.LastGCTime(ctx, fs)
  1206  	if err != nil {
  1207  		return err
  1208  	}
  1209  	if r.config.Clock().Now().Sub(lastGCTime) < minGCInterval {
  1210  		r.log.CDebugf(ctx, "Last GC happened at %s; skipping GC check",
  1211  			lastGCTime)
  1212  		return nil
  1213  	}
  1214  
  1215  	storage, err := libgit.NewGitConfigWithoutRemotesStorer(fs)
  1216  	if err != nil {
  1217  		return err
  1218  	}
  1219  
  1220  	gco := libgit.GCOptions{
  1221  		MaxLooseRefs:         defaultMaxLooseRefs,
  1222  		PruneMinLooseObjects: defaultPruneMinLooseObjects,
  1223  		PruneExpireTime:      time.Time{},
  1224  		MaxObjectPacks:       defaultMaxObjectPacks,
  1225  	}
  1226  	doPackRefs, _, doPruneLoose, doObjectRepack, _, err := libgit.NeedsGC(
  1227  		storage, gco)
  1228  	if err != nil {
  1229  		return err
  1230  	}
  1231  	if !doPackRefs && !doPruneLoose && !doObjectRepack {
  1232  		r.log.CDebugf(ctx, "No GC needed")
  1233  		return nil
  1234  	}
  1235  	r.log.CDebugf(ctx, "GC needed: doPackRefs=%t, doPruneLoose=%t, "+
  1236  		"doObjectRepack=%t", doPackRefs, doPruneLoose, doObjectRepack)
  1237  	command := fmt.Sprintf("keybase git gc %s", r.repo)
  1238  	if r.h.Type() == tlf.SingleTeam {
  1239  		tid := r.h.FirstResolvedWriter()
  1240  		teamName, err := r.config.KBPKI().GetNormalizedUsername(
  1241  			ctx, tid, r.config.OfflineAvailabilityForID(r.h.TlfID()))
  1242  		if err != nil {
  1243  			return err
  1244  		}
  1245  		command += fmt.Sprintf(" --team %s", teamName)
  1246  	}
  1247  	_, err = r.errput.Write([]byte(
  1248  		"Tip: this repo could be sped up with some " +
  1249  			"garbage collection. Run this command:\n"))
  1250  	if err != nil {
  1251  		return err
  1252  	}
  1253  	_, err = r.errput.Write([]byte("\t" + command + "\n"))
  1254  	return err
  1255  }
  1256  
  1257  // handleClone copies all the object files of a KBFS repo directly
  1258  // into the local git dir, instead of using go-git to calculate the
  1259  // full set of objects that are to be transferred (which is slow and
  1260  // memory inefficient).  If the user requested only a single branch of
  1261  // cloning, this will copy more objects that necessary, but still only
  1262  // a single ref will show up for the user.  TODO: Maybe we should run
  1263  // `git gc` for the user on the local repo?
  1264  func (r *runner) handleClone(ctx context.Context) (err error) {
  1265  	_, _, err = r.initRepoIfNeeded(ctx, "clone")
  1266  	if err != nil {
  1267  		return err
  1268  	}
  1269  
  1270  	r.log.CDebugf(ctx, "Cloning into %s", r.gitDir)
  1271  
  1272  	fs, _, err := libgit.GetOrCreateRepoAndID(
  1273  		ctx, r.config, r.h, r.repo, r.uniqID)
  1274  	if err != nil {
  1275  		return err
  1276  	}
  1277  	fsObjects, err := fs.Chroot("objects")
  1278  	if err != nil {
  1279  		return err
  1280  	}
  1281  
  1282  	localObjectsPath := filepath.Join(r.gitDir, "objects")
  1283  	err = os.MkdirAll(localObjectsPath, 0775)
  1284  	if err != nil {
  1285  		return err
  1286  	}
  1287  	localFSObjects := osfs.New(localObjectsPath)
  1288  
  1289  	err = r.recursiveCopyWithCounts(
  1290  		ctx, fsObjects, localFSObjects,
  1291  		"Counting", "count", "Cryptographic cloning", "clone")
  1292  	if err != nil {
  1293  		return err
  1294  	}
  1295  
  1296  	err = r.checkGC(ctx)
  1297  	if err != nil {
  1298  		return err
  1299  	}
  1300  
  1301  	_, err = r.output.Write([]byte("\n"))
  1302  	return err
  1303  }
  1304  
  1305  // handleFetchBatch: From https://git-scm.com/docs/git-remote-helpers
  1306  //
  1307  // fetch <sha1> <name>
  1308  // Fetches the given object, writing the necessary objects to the
  1309  // database. Fetch commands are sent in a batch, one per line,
  1310  // terminated with a blank line. Outputs a single blank line when all
  1311  // fetch commands in the same batch are complete. Only objects which
  1312  // were reported in the output of list with a sha1 may be fetched this
  1313  // way.
  1314  //
  1315  // Optionally may output a lock <file> line indicating a file under
  1316  // GIT_DIR/objects/pack which is keeping a pack until refs can be
  1317  // suitably updated.
  1318  func (r *runner) handleFetchBatch(ctx context.Context, args [][]string) (
  1319  	err error) {
  1320  	repo, _, err := r.initRepoIfNeeded(ctx, gitCmdFetch)
  1321  	if err != nil {
  1322  		return err
  1323  	}
  1324  
  1325  	r.log.CDebugf(ctx, "Fetching %d refs into %s", len(args), r.gitDir)
  1326  
  1327  	remote, err := repo.CreateRemote(&gogitcfg.RemoteConfig{
  1328  		Name: localRepoRemoteName,
  1329  		URLs: []string{r.gitDir},
  1330  	})
  1331  
  1332  	var refSpecs []gogitcfg.RefSpec
  1333  	var deleteRefSpecs []gogitcfg.RefSpec
  1334  	for i, fetch := range args {
  1335  		if len(fetch) != 2 {
  1336  			return errors.Errorf("Bad fetch request: %v", fetch)
  1337  		}
  1338  		refInBareRepo := fetch[1]
  1339  
  1340  		// Push into a local ref with a temporary name, because the
  1341  		// git process that invoked us will get confused if we make a
  1342  		// ref with the same name.  Later, delete this temporary ref.
  1343  		localTempRef := fmt.Sprintf("%s-%s-%d",
  1344  			plumbing.ReferenceName(refInBareRepo).Short(), r.uniqID, i)
  1345  		refSpec := fmt.Sprintf(
  1346  			"%s:refs/remotes/%s/%s", refInBareRepo, r.remote, localTempRef)
  1347  		r.log.CDebugf(ctx, "Fetching %s", refSpec)
  1348  
  1349  		refSpecs = append(refSpecs, gogitcfg.RefSpec(refSpec))
  1350  		deleteRefSpecs = append(deleteRefSpecs, gogitcfg.RefSpec(
  1351  			fmt.Sprintf(":refs/remotes/%s/%s", r.remote, localTempRef)))
  1352  	}
  1353  
  1354  	var statusChan plumbing.StatusChan
  1355  	if r.verbosity >= 1 {
  1356  		s := make(chan plumbing.StatusUpdate)
  1357  		defer close(s)
  1358  		statusChan = plumbing.StatusChan(s)
  1359  		go r.processGogitStatus(ctx, s, nil)
  1360  	}
  1361  
  1362  	// Now "push" into the local repo to get it to store objects
  1363  	// from the KBFS bare repo.
  1364  	err = remote.PushContext(ctx, &gogit.PushOptions{
  1365  		RemoteName: localRepoRemoteName,
  1366  		RefSpecs:   refSpecs,
  1367  		StatusChan: statusChan,
  1368  	})
  1369  	if err != nil && err != gogit.NoErrAlreadyUpToDate {
  1370  		return err
  1371  	}
  1372  
  1373  	// Delete the temporary refspecs now that the objects are
  1374  	// safely stored in the local repo.
  1375  	err = remote.PushContext(ctx, &gogit.PushOptions{
  1376  		RemoteName: localRepoRemoteName,
  1377  		RefSpecs:   deleteRefSpecs,
  1378  	})
  1379  	if err != nil && err != gogit.NoErrAlreadyUpToDate {
  1380  		return err
  1381  	}
  1382  
  1383  	err = r.waitForJournal(ctx)
  1384  	if err != nil {
  1385  		return err
  1386  	}
  1387  	r.log.CDebugf(ctx, "Done waiting for journal")
  1388  
  1389  	err = r.checkGC(ctx)
  1390  	if err != nil {
  1391  		return err
  1392  	}
  1393  
  1394  	_, err = r.output.Write([]byte("\n"))
  1395  	return err
  1396  }
  1397  
  1398  // canPushAll returns true if a) the KBFS repo is currently empty, and
  1399  // b) we've been asked to push all the local references (i.e.,
  1400  // --all/--mirror).
  1401  func (r *runner) canPushAll(
  1402  	ctx context.Context, repo *gogit.Repository, args [][]string) (
  1403  	canPushAll, kbfsRepoEmpty bool, err error) {
  1404  	refs, err := repo.References()
  1405  	if err != nil {
  1406  		return false, false, err
  1407  	}
  1408  	defer refs.Close()
  1409  
  1410  	// Iterate through the remote references.
  1411  	for {
  1412  		ref, err := refs.Next()
  1413  		if errors.Cause(err) == io.EOF {
  1414  			break
  1415  		} else if err != nil {
  1416  			return false, false, err
  1417  		}
  1418  
  1419  		if ref.Type() != plumbing.SymbolicReference {
  1420  			r.log.CDebugf(ctx, "Remote has at least one non-symbolic ref: %s",
  1421  				ref.String())
  1422  			return false, false, nil
  1423  		}
  1424  	}
  1425  
  1426  	// Build a set of all the source refs that the user is pushing.
  1427  	sources := make(map[string]bool)
  1428  	for _, push := range args {
  1429  		if len(push) != 1 {
  1430  			return false, false, errors.Errorf("Bad push request: %v", push)
  1431  		}
  1432  		refspec := gogitcfg.RefSpec(push[0])
  1433  		// If some ref is being pushed to a different name on the
  1434  		// remote, we can't do a push-all.
  1435  		if refspec.Src() != refspec.Dst("").String() {
  1436  			return false, true, nil
  1437  		}
  1438  
  1439  		src := refspec.Src()
  1440  		sources[src] = true
  1441  	}
  1442  
  1443  	localGit := osfs.New(r.gitDir)
  1444  	localStorer, err := filesystem.NewStorage(localGit)
  1445  	if err != nil {
  1446  		return false, false, err
  1447  	}
  1448  	localRefs, err := localStorer.IterReferences()
  1449  	if err != nil {
  1450  		return false, false, err
  1451  	}
  1452  
  1453  	// Check whether all of the local refs are being used as a source
  1454  	// for this push.  If not, we can't blindly push everything.
  1455  	for {
  1456  		ref, err := localRefs.Next()
  1457  		if errors.Cause(err) == io.EOF {
  1458  			break
  1459  		}
  1460  		if err != nil {
  1461  			return false, false, err
  1462  		}
  1463  
  1464  		if ref.Type() == plumbing.SymbolicReference {
  1465  			continue
  1466  		}
  1467  
  1468  		// If the local repo has a non-symbolic ref that's not being
  1469  		// pushed, we can't push everything blindly, otherwise we
  1470  		// might leak some data.
  1471  		if !sources[ref.Name().String()] {
  1472  			r.log.CDebugf(ctx, "Not pushing local ref %s", ref)
  1473  			return false, true, nil
  1474  		}
  1475  	}
  1476  
  1477  	return true, true, nil
  1478  }
  1479  
  1480  func (r *runner) pushAll(ctx context.Context, fs *libfs.FS) (err error) {
  1481  	r.log.CDebugf(ctx, "Pushing the entire local repo")
  1482  	localFS := osfs.New(r.gitDir)
  1483  
  1484  	// First copy objects.
  1485  	localFSObjects, err := localFS.Chroot("objects")
  1486  	if err != nil {
  1487  		return err
  1488  	}
  1489  	fsObjects, err := fs.Chroot("objects")
  1490  	if err != nil {
  1491  		return err
  1492  	}
  1493  
  1494  	verb := "encrypting"
  1495  	if r.h.Type() == tlf.Public {
  1496  		verb = "signing"
  1497  	}
  1498  
  1499  	err = r.recursiveCopyWithCounts(
  1500  		ctx, localFSObjects, fsObjects,
  1501  		"Counting objects", "countobj",
  1502  		fmt.Sprintf("Preparing and %s objects", verb), "pushobj")
  1503  	if err != nil {
  1504  		return err
  1505  	}
  1506  
  1507  	// Hold the packed refs lock file while transferring, so we don't
  1508  	// clash with anyone else trying to push-init this repo.  go-git
  1509  	// takes the same lock while writing packed-refs during a
  1510  	// `Remote.Fetch()` operation (used in `pushSome()` below).
  1511  	lockFile, err := fs.Create(packedRefsTempPath)
  1512  	if err != nil {
  1513  		return err
  1514  	}
  1515  	defer func() {
  1516  		closeErr := lockFile.Close()
  1517  		if closeErr != nil && err == nil {
  1518  			err = closeErr
  1519  		}
  1520  	}()
  1521  
  1522  	err = lockFile.Lock()
  1523  	if err != nil {
  1524  		return err
  1525  	}
  1526  
  1527  	// Next, copy refs.
  1528  	localFSRefs, err := localFS.Chroot("refs")
  1529  	if err != nil {
  1530  		return err
  1531  	}
  1532  	fsRefs, err := fs.Chroot("refs")
  1533  	if err != nil {
  1534  		return err
  1535  	}
  1536  	err = r.recursiveCopyWithCounts(
  1537  		ctx, localFSRefs, fsRefs,
  1538  		"Counting refs", "countref",
  1539  		fmt.Sprintf("Preparing and %s refs", verb), "pushref")
  1540  	if err != nil {
  1541  		return err
  1542  	}
  1543  
  1544  	// Finally, packed refs if it exists.
  1545  	_, err = localFS.Stat(packedRefsPath)
  1546  	if os.IsNotExist(err) {
  1547  		return nil
  1548  	} else if err != nil {
  1549  		return err
  1550  	}
  1551  
  1552  	return r.copyFileWithCount(ctx, localFS, fs, packedRefsPath,
  1553  		"Counting packed refs", "countprefs",
  1554  		fmt.Sprintf("Preparing and %s packed refs", verb), "pushprefs")
  1555  }
  1556  
  1557  func dstNameFromRefString(refStr string) plumbing.ReferenceName {
  1558  	return gogitcfg.RefSpec(refStr).Dst("")
  1559  }
  1560  
  1561  // parentCommitsForRef returns a map of refs with a list of commits for each
  1562  // ref, newest first. It only includes commits that exist in `localStorer` but
  1563  // not in `remoteStorer`.
  1564  func (r *runner) parentCommitsForRef(ctx context.Context,
  1565  	localStorer gogitstor.Storer, remoteStorer gogitstor.Storer,
  1566  	refs map[gogitcfg.RefSpec]bool) (libgit.RefDataByName, error) {
  1567  
  1568  	commitsByRef := make(libgit.RefDataByName, len(refs))
  1569  	haves := make(map[plumbing.Hash]bool)
  1570  
  1571  	for refspec := range refs {
  1572  		if refspec.IsDelete() {
  1573  			commitsByRef[refspec.Dst("")] = &libgit.RefData{
  1574  				IsDelete: true,
  1575  			}
  1576  			continue
  1577  		}
  1578  		refName := plumbing.ReferenceName(refspec.Src())
  1579  		resolved, err := gogitstor.ResolveReference(localStorer, refName)
  1580  		if err != nil {
  1581  			r.log.CDebugf(ctx, "Error resolving ref %s", refName)
  1582  		}
  1583  		if resolved != nil {
  1584  			refName = resolved.Name()
  1585  		}
  1586  
  1587  		ref, err := localStorer.Reference(refName)
  1588  		if err != nil {
  1589  			r.log.CDebugf(ctx, "Error getting reference %s: %+v",
  1590  				refName, err)
  1591  			continue
  1592  		}
  1593  		hash := ref.Hash()
  1594  
  1595  		// Get the HEAD commit for the ref from the local repository.
  1596  		commit, err := gogitobj.GetCommit(localStorer, hash)
  1597  		if err != nil {
  1598  			r.log.CDebugf(ctx, "Error getting commit for hash %s (%s): %+v",
  1599  				string(hash[:]), refName, err)
  1600  			continue
  1601  		}
  1602  
  1603  		// Iterate through the commits backward, until we experience any of the
  1604  		// following:
  1605  		// 1. Find a commit that the remote knows about,
  1606  		// 2. Reach our maximum number of commits to check,
  1607  		// 3. Run out of commits.
  1608  		walker := gogitobj.NewCommitPreorderIter(commit, haves, nil)
  1609  		toVisit := maxCommitsToVisitPerRef
  1610  		dstRefName := refspec.Dst("")
  1611  		commitsByRef[dstRefName] = &libgit.RefData{
  1612  			IsDelete: refspec.IsDelete(),
  1613  			Commits:  make([]*gogitobj.Commit, 0, maxCommitsToVisitPerRef),
  1614  		}
  1615  		err = walker.ForEach(func(c *gogitobj.Commit) error {
  1616  			haves[c.Hash] = true
  1617  			toVisit--
  1618  			// If toVisit starts out at 0 (indicating there is no
  1619  			// max), then it will be negative here and we won't stop
  1620  			// early.
  1621  			if toVisit == 0 {
  1622  				// Append a sentinel value to communicate that there would be
  1623  				// more commits.
  1624  				commitsByRef[dstRefName].Commits =
  1625  					append(commitsByRef[dstRefName].Commits,
  1626  						libgit.CommitSentinelValue)
  1627  				return gogitstor.ErrStop
  1628  			}
  1629  			hasEncodedObjectErr := remoteStorer.HasEncodedObject(c.Hash)
  1630  			if hasEncodedObjectErr == nil {
  1631  				return gogitstor.ErrStop
  1632  			}
  1633  			commitsByRef[dstRefName].Commits =
  1634  				append(commitsByRef[dstRefName].Commits, c)
  1635  			return nil
  1636  		})
  1637  		if err != nil {
  1638  			return nil, err
  1639  		}
  1640  	}
  1641  	return commitsByRef, nil
  1642  }
  1643  
  1644  func (r *runner) pushSome(
  1645  	ctx context.Context, repo *gogit.Repository, fs *libfs.FS, args [][]string,
  1646  	kbfsRepoEmpty bool) (map[string]error, error) {
  1647  	r.log.CDebugf(ctx, "Pushing %d refs into %s", len(args), r.gitDir)
  1648  
  1649  	remote, err := repo.CreateRemote(&gogitcfg.RemoteConfig{
  1650  		Name: localRepoRemoteName,
  1651  		URLs: []string{r.gitDir},
  1652  	})
  1653  	if err != nil {
  1654  		return nil, err
  1655  	}
  1656  
  1657  	results := make(map[string]error, len(args))
  1658  	var refspecs []gogitcfg.RefSpec
  1659  	refs := make(map[string]bool, len(args))
  1660  	for _, push := range args {
  1661  		if len(push) != 1 {
  1662  			return nil, errors.Errorf("Bad push request: %v", push)
  1663  		}
  1664  		refspec := gogitcfg.RefSpec(push[0])
  1665  		err := refspec.Validate()
  1666  		if err != nil {
  1667  			return nil, err
  1668  		}
  1669  
  1670  		// Delete the reference in the repo if needed; otherwise,
  1671  		// fetch from the local repo into the remote repo.
  1672  		if refspec.IsDelete() {
  1673  			dst := dstNameFromRefString(push[0])
  1674  			if refspec.IsWildcard() {
  1675  				results[dst.String()] = errors.Errorf(
  1676  					"Wildcards not supported for deletes: %s", refspec)
  1677  				continue
  1678  			}
  1679  			err = repo.Storer.RemoveReference(dst)
  1680  			if err == gogit.NoErrAlreadyUpToDate {
  1681  				err = nil
  1682  			}
  1683  			results[dst.String()] = err
  1684  		} else {
  1685  			refs[refspec.Src()] = true
  1686  			refspecs = append(refspecs, refspec)
  1687  		}
  1688  		if err != nil {
  1689  			r.log.CDebugf(ctx, "Error fetching %s: %+v", refspec, err)
  1690  		}
  1691  	}
  1692  
  1693  	if len(refspecs) > 0 {
  1694  		var statusChan plumbing.StatusChan
  1695  		if r.verbosity >= 1 {
  1696  			s := make(chan plumbing.StatusUpdate)
  1697  			defer close(s)
  1698  			statusChan = plumbing.StatusChan(s)
  1699  			go func() {
  1700  				events := make(chan libfs.FSEvent)
  1701  				fs.SubscribeToEvents(events)
  1702  				r.processGogitStatus(ctx, s, events)
  1703  				fs.UnsubscribeToEvents(events)
  1704  				// Drain any pending writes to the channel.
  1705  				for range events {
  1706  				}
  1707  			}()
  1708  		}
  1709  
  1710  		if kbfsRepoEmpty {
  1711  			r.log.CDebugf(
  1712  				ctx, "Requesting a pack-refs file for %d refs", len(refspecs))
  1713  		}
  1714  
  1715  		err = remote.FetchContext(ctx, &gogit.FetchOptions{
  1716  			RemoteName: localRepoRemoteName,
  1717  			RefSpecs:   refspecs,
  1718  			StatusChan: statusChan,
  1719  			PackRefs:   kbfsRepoEmpty,
  1720  		})
  1721  		if err == gogit.NoErrAlreadyUpToDate {
  1722  			err = nil
  1723  		}
  1724  
  1725  		// All non-deleted refspecs in the batch get the same error.
  1726  		for _, refspec := range refspecs {
  1727  			dst := refspec.Dst("")
  1728  			results[dst.String()] = err
  1729  		}
  1730  	}
  1731  	return results, nil
  1732  }
  1733  
  1734  // handlePushBatch: From https://git-scm.com/docs/git-remote-helpers
  1735  //
  1736  // push +<src>:<dst>
  1737  // Pushes the given local <src> commit or branch to the remote branch
  1738  // described by <dst>. A batch sequence of one or more push commands
  1739  // is terminated with a blank line (if there is only one reference to
  1740  // push, a single push command is followed by a blank line). For
  1741  // example, the following would be two batches of push, the first
  1742  // asking the remote-helper to push the local ref master to the remote
  1743  // ref master and the local HEAD to the remote branch, and the second
  1744  // asking to push ref foo to ref bar (forced update requested by the
  1745  // +).
  1746  //
  1747  // push refs/heads/master:refs/heads/master
  1748  // push HEAD:refs/heads/branch
  1749  // \n
  1750  // push +refs/heads/foo:refs/heads/bar
  1751  // \n
  1752  //
  1753  // Zero or more protocol options may be entered after the last push
  1754  // command, before the batch’s terminating blank line.
  1755  //
  1756  // When the push is complete, outputs one or more ok <dst> or error
  1757  // <dst> <why>? lines to indicate success or failure of each pushed
  1758  // ref. The status report output is terminated by a blank line. The
  1759  // option field <why> may be quoted in a C style string if it contains
  1760  // an LF.
  1761  func (r *runner) handlePushBatch(ctx context.Context, args [][]string) (
  1762  	commits libgit.RefDataByName, err error) {
  1763  	repo, fs, err := r.initRepoIfNeeded(ctx, gitCmdPush)
  1764  	if err != nil {
  1765  		return nil, err
  1766  	}
  1767  
  1768  	canPushAll, kbfsRepoEmpty, err := r.canPushAll(ctx, repo, args)
  1769  	if err != nil {
  1770  		return nil, err
  1771  	}
  1772  
  1773  	localGit := osfs.New(r.gitDir)
  1774  	localStorer, err := filesystem.NewStorage(localGit)
  1775  	if err != nil {
  1776  		return nil, err
  1777  	}
  1778  
  1779  	refspecs := make(map[gogitcfg.RefSpec]bool, len(args))
  1780  	for _, push := range args {
  1781  		// `canPushAll` already validates the push reference.
  1782  		refspec := gogitcfg.RefSpec(push[0])
  1783  		refspecs[refspec] = true
  1784  	}
  1785  
  1786  	// Get all commits associated with the refs. This must happen before the
  1787  	// push for us to be able to calculate the difference.
  1788  	commits, err = r.parentCommitsForRef(ctx, localStorer,
  1789  		repo.Storer, refspecs)
  1790  	if err != nil {
  1791  		return nil, err
  1792  	}
  1793  
  1794  	var results map[string]error
  1795  	// Ignore pushAll for commit collection, for now.
  1796  	if canPushAll {
  1797  		err = r.pushAll(ctx, fs)
  1798  		// All refs in the batch get the same error.
  1799  		results = make(map[string]error, len(args))
  1800  		for _, push := range args {
  1801  			// `canPushAll` already validates the push reference.
  1802  			dst := dstNameFromRefString(push[0]).String()
  1803  			results[dst] = err
  1804  		}
  1805  	} else {
  1806  		results, err = r.pushSome(ctx, repo, fs, args, kbfsRepoEmpty)
  1807  	}
  1808  	if err != nil {
  1809  		return nil, err
  1810  	}
  1811  
  1812  	err = r.waitForJournal(ctx)
  1813  	if err != nil {
  1814  		return nil, err
  1815  	}
  1816  	r.log.CDebugf(ctx, "Done waiting for journal")
  1817  
  1818  	for d, e := range results {
  1819  		result := ""
  1820  		if e == nil {
  1821  			result = fmt.Sprintf("ok %s", d)
  1822  		} else {
  1823  			result = fmt.Sprintf("error %s %s", d, e.Error())
  1824  		}
  1825  		_, err = r.output.Write([]byte(result + "\n"))
  1826  		if err != nil {
  1827  			return nil, err
  1828  		}
  1829  	}
  1830  
  1831  	// Remove any errored commits so that we don't send an update
  1832  	// message about them.
  1833  	for refspec := range refspecs {
  1834  		dst := refspec.Dst("")
  1835  		if results[dst.String()] != nil {
  1836  			r.log.CDebugf(
  1837  				ctx, "Removing commit result for errored push on refspec %s",
  1838  				refspec)
  1839  			delete(commits, dst)
  1840  		}
  1841  	}
  1842  
  1843  	if len(commits) > 0 {
  1844  		err = libgit.UpdateRepoMD(ctx, r.config, r.h, fs,
  1845  			keybase1.GitPushType_DEFAULT, "", commits)
  1846  		if err != nil {
  1847  			return nil, err
  1848  		}
  1849  	}
  1850  
  1851  	err = r.checkGC(ctx)
  1852  	if err != nil {
  1853  		return nil, err
  1854  	}
  1855  
  1856  	_, err = r.output.Write([]byte("\n"))
  1857  	if err != nil {
  1858  		return nil, err
  1859  	}
  1860  	return commits, nil
  1861  }
  1862  
  1863  // handleOption: https://git-scm.com/docs/git-remote-helpers#git-remote-helpers-emoptionemltnamegtltvaluegt
  1864  //
  1865  // option <name> <value>
  1866  // Sets the transport helper option <name> to <value>. Outputs a
  1867  // single line containing one of ok (option successfully set),
  1868  // unsupported (option not recognized) or error <msg> (option <name>
  1869  // is supported but <value> is not valid for it). Options should be
  1870  // set before other commands, and may influence the behavior of those
  1871  // commands.
  1872  func (r *runner) handleOption(ctx context.Context, args []string) (err error) {
  1873  	defer func() {
  1874  		if err != nil {
  1875  			_, _ = r.output.Write(
  1876  				[]byte(fmt.Sprintf("error %s\n", err.Error())))
  1877  		}
  1878  	}()
  1879  
  1880  	if len(args) != 2 {
  1881  		return errors.Errorf("Bad option request: %v", args)
  1882  	}
  1883  
  1884  	option := args[0]
  1885  	result := ""
  1886  	switch option {
  1887  	case gitOptionVerbosity:
  1888  		v, err := strconv.ParseInt(args[1], 10, 64)
  1889  		if err != nil {
  1890  			return err
  1891  		}
  1892  		r.verbosity = v
  1893  		r.log.CDebugf(ctx, "Setting verbosity to %d", v)
  1894  		result = "ok"
  1895  	case gitOptionProgress:
  1896  		b, err := strconv.ParseBool(args[1])
  1897  		if err != nil {
  1898  			return err
  1899  		}
  1900  		r.progress = b
  1901  		r.log.CDebugf(ctx, "Setting progress to %t", b)
  1902  		result = "ok"
  1903  	case gitOptionCloning:
  1904  		b, err := strconv.ParseBool(args[1])
  1905  		if err != nil {
  1906  			return err
  1907  		}
  1908  		r.cloning = b
  1909  		r.log.CDebugf(ctx, "Setting cloning to %t", b)
  1910  		result = "ok"
  1911  	case gitOptionPushcert:
  1912  		if args[1] == gitOptionIfAsked {
  1913  			// "if-asked" means we should sign only if the server
  1914  			// supports it. Our server doesn't support it, but we
  1915  			// should still accept the configuration.
  1916  			result = "ok"
  1917  		} else {
  1918  			b, err := strconv.ParseBool(args[1])
  1919  			if err != nil {
  1920  				return err
  1921  			}
  1922  			if b {
  1923  				// We don't support signing.
  1924  				r.log.CDebugf(ctx, "Signing is unsupported")
  1925  				result = "unsupported"
  1926  			} else {
  1927  				result = "ok"
  1928  			}
  1929  		}
  1930  	default:
  1931  		result = "unsupported"
  1932  	}
  1933  
  1934  	_, err = r.output.Write([]byte(result + "\n"))
  1935  	return err
  1936  }
  1937  
  1938  type lfsProgress struct {
  1939  	Event          string `json:"event"`
  1940  	Oid            string `json:"oid"`
  1941  	BytesSoFar     int    `json:"bytesSoFar"`
  1942  	BytesSinceLast int    `json:"bytesSinceLast"`
  1943  }
  1944  
  1945  // lfsProgressWriter is a simple io.Writer shim that writes progress
  1946  // messages to `r.output` for LFS.  Its `printOne` function can also
  1947  // be passed to `runner.waitForJournalWithPrinters` in order to print
  1948  // periodic progress messages.
  1949  type lfsProgressWriter struct {
  1950  	r                     *runner
  1951  	output                io.Writer
  1952  	oid                   string
  1953  	start                 int
  1954  	soFar                 int     // how much in absolute bytes has been copied
  1955  	totalForCopy          int     // how much in absolue bytes will be copied
  1956  	plaintextSize         int     // how much LFS expects to be copied
  1957  	factorOfPlaintextSize float64 // what frac of the above size is this copy?
  1958  }
  1959  
  1960  var _ io.Writer = (*lfsProgressWriter)(nil)
  1961  
  1962  func (lpw *lfsProgressWriter) getProgress(newSoFar int) lfsProgress {
  1963  	last := lpw.soFar
  1964  	lpw.soFar = newSoFar
  1965  
  1966  	f := 1.0
  1967  	if lpw.plaintextSize > 0 {
  1968  		f = lpw.factorOfPlaintextSize *
  1969  			(float64(lpw.plaintextSize) / float64(lpw.totalForCopy))
  1970  	}
  1971  
  1972  	return lfsProgress{
  1973  		Event:          gitLFSProgressEvent,
  1974  		Oid:            lpw.oid,
  1975  		BytesSoFar:     lpw.start + int(float64(lpw.soFar)*f),
  1976  		BytesSinceLast: int(float64(lpw.soFar-last) * f),
  1977  	}
  1978  }
  1979  
  1980  func (lpw *lfsProgressWriter) Write(p []byte) (n int, err error) {
  1981  	n, err = lpw.output.Write(p)
  1982  	if err != nil {
  1983  		return n, err
  1984  	}
  1985  
  1986  	if lpw.r.processType == processLFSNoProgress {
  1987  		return n, nil
  1988  	}
  1989  
  1990  	prog := lpw.getProgress(lpw.soFar + n)
  1991  
  1992  	progBytes, err := json.Marshal(prog)
  1993  	if err != nil {
  1994  		return n, err
  1995  	}
  1996  	_, err = lpw.r.output.Write(append(progBytes, []byte("\n")...))
  1997  	if err != nil {
  1998  		return n, err
  1999  	}
  2000  	return n, nil
  2001  }
  2002  
  2003  func (lpw *lfsProgressWriter) printOne(
  2004  	ctx context.Context, _ int, totalSize, sizeLeft int64) int {
  2005  	if lpw.r.processType == processLFSNoProgress {
  2006  		return 0
  2007  	}
  2008  
  2009  	if lpw.totalForCopy == 0 {
  2010  		lpw.totalForCopy = int(totalSize)
  2011  	}
  2012  
  2013  	prog := lpw.getProgress(int(totalSize - sizeLeft))
  2014  
  2015  	progBytes, err := json.Marshal(prog)
  2016  	if err != nil {
  2017  		lpw.r.log.CDebugf(ctx, "Error while json marshaling: %+v", err)
  2018  		return 0
  2019  	}
  2020  	_, err = lpw.r.output.Write(append(progBytes, []byte("\n")...))
  2021  	if err != nil {
  2022  		lpw.r.log.CDebugf(ctx, "Error while writing: %+v", err)
  2023  	}
  2024  	return 0
  2025  }
  2026  
  2027  func (r *runner) copyFileLFS(
  2028  	ctx context.Context, from billy.Filesystem, to billy.Filesystem,
  2029  	fromName, toName, oid string, totalSize int,
  2030  	progressScale float64) (err error) {
  2031  	f, err := from.Open(fromName)
  2032  	if err != nil {
  2033  		return err
  2034  	}
  2035  	defer f.Close()
  2036  	toF, err := to.Create(toName)
  2037  	if err != nil {
  2038  		return err
  2039  	}
  2040  	defer toF.Close()
  2041  
  2042  	// Scale the progress by the given factor.
  2043  	w := &lfsProgressWriter{
  2044  		r:                     r,
  2045  		oid:                   oid,
  2046  		output:                toF,
  2047  		totalForCopy:          totalSize,
  2048  		plaintextSize:         totalSize,
  2049  		factorOfPlaintextSize: progressScale,
  2050  	}
  2051  	_, err = io.Copy(w, f)
  2052  	return err
  2053  }
  2054  
  2055  func (r *runner) handleLFSUpload(
  2056  	ctx context.Context, oid string, localPath string, size int) (err error) {
  2057  	fs, err := r.makeFS(ctx)
  2058  	if err != nil {
  2059  		return err
  2060  	}
  2061  	err = fs.MkdirAll(libgit.LFSSubdir, 0600)
  2062  	if err != nil {
  2063  		return err
  2064  	}
  2065  	fs, err = fs.ChrootAsLibFS(libgit.LFSSubdir)
  2066  	if err != nil {
  2067  		return err
  2068  	}
  2069  
  2070  	// Get an FS for the local directory.
  2071  	dir, file := filepath.Split(localPath)
  2072  	if dir == "" || file == "" {
  2073  		return errors.Errorf("Invalid local path %s", localPath)
  2074  	}
  2075  
  2076  	localFS := osfs.New(dir)
  2077  	// Have the copy count for 40% of the overall upload (arbitrary).
  2078  	err = r.copyFileLFS(ctx, localFS, fs, file, oid, oid, size, 0.4)
  2079  	if err != nil {
  2080  		return err
  2081  	}
  2082  	printNothing := func(_ context.Context) {}
  2083  	// Have the flush count for 60% of the overall upload (arbitrary).
  2084  	w := &lfsProgressWriter{
  2085  		r:                     r,
  2086  		oid:                   oid,
  2087  		start:                 int(float64(size) * 0.4),
  2088  		plaintextSize:         size,
  2089  		factorOfPlaintextSize: 0.60,
  2090  	}
  2091  	return r.waitForJournalWithPrinters(
  2092  		ctx, printNothing, w.printOne, printNothing)
  2093  }
  2094  
  2095  func (r *runner) handleLFSDownload(
  2096  	ctx context.Context, oid string, size int) (localPath string, err error) {
  2097  	fs, err := r.makeFS(ctx)
  2098  	if err != nil {
  2099  		return "", err
  2100  	}
  2101  	err = fs.MkdirAll(libgit.LFSSubdir, 0600)
  2102  	if err != nil {
  2103  		return "", err
  2104  	}
  2105  	fs, err = fs.ChrootAsLibFS(libgit.LFSSubdir)
  2106  	if err != nil {
  2107  		return "", err
  2108  	}
  2109  
  2110  	// Put the file in a temporary location; lfs will move it to the
  2111  	// right location.
  2112  	dir := "."
  2113  	localFS := osfs.New(dir)
  2114  	localFileName := ".kbfs_lfs_" + oid
  2115  
  2116  	err = r.copyFileLFS(ctx, fs, localFS, oid, localFileName, oid, size, 1.0)
  2117  	if err != nil {
  2118  		return "", err
  2119  	}
  2120  	return filepath.Join(dir, localFileName), nil
  2121  }
  2122  
  2123  func (r *runner) processCommand(
  2124  	ctx context.Context, commandChan <-chan string) (err error) {
  2125  	var fetchBatch, pushBatch [][]string
  2126  	for {
  2127  		select {
  2128  		case cmd := <-commandChan:
  2129  			ctx := libkbfs.CtxWithRandomIDReplayable(
  2130  				ctx, ctxCommandIDKey, ctxCommandOpID, r.log)
  2131  
  2132  			cmdParts := strings.Fields(cmd)
  2133  			if len(cmdParts) == 0 {
  2134  				switch {
  2135  				case len(fetchBatch) > 0:
  2136  					if r.cloning {
  2137  						r.log.CDebugf(ctx, "Processing clone")
  2138  						err = r.handleClone(ctx)
  2139  						if err != nil {
  2140  							return err
  2141  						}
  2142  					} else {
  2143  						r.log.CDebugf(ctx, "Processing fetch batch")
  2144  						err = r.handleFetchBatch(ctx, fetchBatch)
  2145  						if err != nil {
  2146  							return err
  2147  						}
  2148  					}
  2149  					fetchBatch = nil
  2150  					continue
  2151  				case len(pushBatch) > 0:
  2152  					r.log.CDebugf(ctx, "Processing push batch")
  2153  					_, err = r.handlePushBatch(ctx, pushBatch)
  2154  					if err != nil {
  2155  						return err
  2156  					}
  2157  					pushBatch = nil
  2158  					continue
  2159  				default:
  2160  					r.log.CDebugf(ctx, "Done processing commands")
  2161  					return nil
  2162  				}
  2163  			}
  2164  
  2165  			switch cmdParts[0] {
  2166  			case gitCmdCapabilities:
  2167  				err = r.handleCapabilities()
  2168  			case gitCmdList:
  2169  				err = r.handleList(ctx, cmdParts[1:])
  2170  			case gitCmdFetch:
  2171  				if len(pushBatch) > 0 {
  2172  					return errors.New(
  2173  						"Cannot fetch in the middle of a push batch")
  2174  				}
  2175  				fetchBatch = append(fetchBatch, cmdParts[1:])
  2176  			case gitCmdPush:
  2177  				if len(fetchBatch) > 0 {
  2178  					return errors.New(
  2179  						"Cannot push in the middle of a fetch batch")
  2180  				}
  2181  				pushBatch = append(pushBatch, cmdParts[1:])
  2182  			case gitCmdOption:
  2183  				err = r.handleOption(ctx, cmdParts[1:])
  2184  			default:
  2185  				err = errors.Errorf("Unsupported command: %s", cmdParts[0])
  2186  			}
  2187  			if err != nil {
  2188  				return err
  2189  			}
  2190  		case <-ctx.Done():
  2191  			return ctx.Err()
  2192  		}
  2193  	}
  2194  }
  2195  
  2196  type lfsError struct {
  2197  	Code    int    `json:"code"`
  2198  	Message string `json:"message"`
  2199  }
  2200  
  2201  type lfsRequest struct {
  2202  	Event     string `json:"event"`
  2203  	Oid       string `json:"oid"`
  2204  	Size      int    `json:"size,omitempty"`
  2205  	Path      string `json:"path,omitempty"`
  2206  	Operation string `json:"operation,omitempty"`
  2207  	Remote    string `json:"remote,omitempty"`
  2208  }
  2209  
  2210  type lfsResponse struct {
  2211  	Event string    `json:"event"`
  2212  	Oid   string    `json:"oid"`
  2213  	Path  string    `json:"path,omitempty"`
  2214  	Error *lfsError `json:"error,omitempty"`
  2215  }
  2216  
  2217  func (r *runner) processCommandLFS(
  2218  	ctx context.Context, commandChan <-chan string) (err error) {
  2219  lfsLoop:
  2220  	for {
  2221  		select {
  2222  		case cmd := <-commandChan:
  2223  			ctx := libkbfs.CtxWithRandomIDReplayable(
  2224  				ctx, ctxCommandIDKey, ctxCommandOpID, r.log)
  2225  
  2226  			var req lfsRequest
  2227  			err := json.Unmarshal([]byte(cmd), &req)
  2228  			if err != nil {
  2229  				return err
  2230  			}
  2231  
  2232  			resp := lfsResponse{
  2233  				Event: gitLFSCompleteEvent,
  2234  				Oid:   req.Oid,
  2235  			}
  2236  			switch req.Event {
  2237  			case gitLFSInitEvent:
  2238  				r.log.CDebugf(
  2239  					ctx, "Initialize message, operation=%s, remote=%s",
  2240  					req.Operation, req.Remote)
  2241  				_, err := r.output.Write([]byte("{}\n"))
  2242  				if err != nil {
  2243  					return err
  2244  				}
  2245  				continue lfsLoop
  2246  			case gitLFSUploadEvent:
  2247  				r.log.CDebugf(ctx, "Handling upload, oid=%s", req.Oid)
  2248  				err := r.handleLFSUpload(ctx, req.Oid, req.Path, req.Size)
  2249  				if err != nil {
  2250  					resp.Error = &lfsError{Code: 1, Message: err.Error()}
  2251  				}
  2252  			case gitLFSDownloadEvent:
  2253  				r.log.CDebugf(ctx, "Handling download, oid=%s", req.Oid)
  2254  				p, err := r.handleLFSDownload(ctx, req.Oid, req.Size)
  2255  				if err != nil {
  2256  					resp.Error = &lfsError{Code: 1, Message: err.Error()}
  2257  				} else {
  2258  					resp.Path = filepath.ToSlash(p)
  2259  				}
  2260  			case gitLFSTerminateEvent:
  2261  				return nil
  2262  			}
  2263  
  2264  			respBytes, err := json.Marshal(resp)
  2265  			if err != nil {
  2266  				return err
  2267  			}
  2268  			_, err = r.output.Write(append(respBytes, []byte("\n")...))
  2269  			if err != nil {
  2270  				return err
  2271  			}
  2272  		case <-ctx.Done():
  2273  			return ctx.Err()
  2274  		}
  2275  	}
  2276  }
  2277  
  2278  func (r *runner) processCommands(ctx context.Context) (err error) {
  2279  	r.log.CDebugf(ctx, "Ready to process")
  2280  	reader := bufio.NewReader(r.input)
  2281  	var wg sync.WaitGroup
  2282  	defer wg.Wait()
  2283  	ctx, cancel := context.WithCancel(ctx)
  2284  	defer cancel()
  2285  	// Allow the creation of .kbfs_git within KBFS.
  2286  	ctx = context.WithValue(ctx, libkbfs.CtxAllowNameKey, kbfsRepoDir)
  2287  
  2288  	// Process the commands with a separate queue in a separate
  2289  	// goroutine, so we can exit as soon as EOF is received
  2290  	// (indicating the corresponding `git` command has been
  2291  	// interrupted).
  2292  	commandChan := make(chan string, 100)
  2293  	processorErrChan := make(chan error, 1)
  2294  	wg.Add(1)
  2295  	go func() {
  2296  		defer wg.Done()
  2297  		switch r.processType {
  2298  		case processGit:
  2299  			processorErrChan <- r.processCommand(ctx, commandChan)
  2300  		case processLFS, processLFSNoProgress:
  2301  			processorErrChan <- r.processCommandLFS(ctx, commandChan)
  2302  		default:
  2303  			panic(fmt.Sprintf("Unknown process type: %v", r.processType))
  2304  		}
  2305  	}()
  2306  
  2307  	for {
  2308  		stdinErrChan := make(chan error, 1)
  2309  		go func() {
  2310  			cmd, err := reader.ReadString('\n')
  2311  			if err != nil {
  2312  				stdinErrChan <- err
  2313  				return
  2314  			}
  2315  
  2316  			r.log.CDebugf(ctx, "Received command: %s", cmd)
  2317  			commandChan <- cmd
  2318  			stdinErrChan <- nil
  2319  		}()
  2320  
  2321  		select {
  2322  		case err := <-stdinErrChan:
  2323  			if errors.Cause(err) == io.EOF {
  2324  				r.log.CDebugf(ctx, "Done processing commands")
  2325  				return nil
  2326  			} else if err != nil {
  2327  				return err
  2328  			}
  2329  			// Otherwise continue to read the next command.
  2330  		case err := <-processorErrChan:
  2331  			return err
  2332  		case <-ctx.Done():
  2333  			return ctx.Err()
  2334  		}
  2335  	}
  2336  }