github.com/keybase/client/go@v0.0.0-20240309051027-028f7c731f8b/kbfs/libgit/repo.go (about)

     1  // Copyright 2017 Keybase Inc. All rights reserved.
     2  // Use of this source code is governed by a BSD
     3  // license that can be found in the LICENSE file.
     4  
     5  package libgit
     6  
     7  import (
     8  	"bytes"
     9  	"context"
    10  	"encoding/hex"
    11  	"fmt"
    12  	"io"
    13  	"os"
    14  	"path"
    15  	"regexp"
    16  	"strconv"
    17  	"strings"
    18  	"time"
    19  
    20  	"github.com/keybase/client/go/kbfs/data"
    21  	"github.com/keybase/client/go/kbfs/idutil"
    22  	"github.com/keybase/client/go/kbfs/libfs"
    23  	"github.com/keybase/client/go/kbfs/libkbfs"
    24  	"github.com/keybase/client/go/kbfs/tlfhandle"
    25  	"github.com/keybase/client/go/libkb"
    26  	"github.com/keybase/client/go/logger"
    27  	"github.com/keybase/client/go/protocol/keybase1"
    28  	"github.com/pkg/errors"
    29  	billy "gopkg.in/src-d/go-billy.v4"
    30  	gogit "gopkg.in/src-d/go-git.v4"
    31  	"gopkg.in/src-d/go-git.v4/plumbing"
    32  	"gopkg.in/src-d/go-git.v4/plumbing/object"
    33  	"gopkg.in/src-d/go-git.v4/plumbing/storer"
    34  	"gopkg.in/src-d/go-git.v4/storage"
    35  	"gopkg.in/src-d/go-git.v4/storage/filesystem"
    36  )
    37  
    38  const (
    39  	kbfsRepoDir              = ".kbfs_git"
    40  	kbfsConfigName           = "kbfs_config"
    41  	kbfsConfigNameTemp       = "._kbfs_config"
    42  	gitSuffixToIgnore        = ".git"
    43  	kbfsDeletedReposDir      = ".kbfs_deleted_repos"
    44  	minDeletedAgeForCleaning = 1 * time.Hour
    45  	cleaningTimeLimit        = 2 * time.Second
    46  	repoGCLockFileName       = ".gc"
    47  	repoGCInProgressFileName = ".gc_in_progress"
    48  	gcTimeLimit              = 1 * time.Hour
    49  )
    50  
    51  // CommitSentinelValue marks the end of a list of commits, where there are
    52  // still commits that haven't been read.
    53  // Use the zero-value `nil`.
    54  var CommitSentinelValue *object.Commit
    55  
    56  // This character set is what Github supports in repo names.  It's
    57  // probably to avoid any problems when cloning onto filesystems that
    58  // have different Unicode decompression schemes
    59  // (https://en.wikipedia.org/wiki/Unicode_equivalence).  There's no
    60  // internal reason to be so restrictive, but it probably makes sense
    61  // to start off more restrictive and then relax things later as we
    62  // test.
    63  var repoNameRE = regexp.MustCompile(`^([a-zA-Z0-9][a-zA-Z0-9_\.-]*)$`)
    64  
    65  // RefData stores the data for a ref.
    66  type RefData struct {
    67  	IsDelete bool
    68  	Commits  []*object.Commit
    69  }
    70  
    71  // RefDataByName represents a map of reference names to data about that ref.
    72  type RefDataByName map[plumbing.ReferenceName]*RefData
    73  
    74  func checkValidRepoName(repoName string, config libkbfs.Config) bool {
    75  	return len(repoName) >= 1 &&
    76  		uint32(len(repoName)) <= config.MaxNameBytes() &&
    77  		(os.Getenv("KBFS_GIT_REPONAME_SKIP_CHECK") != "" ||
    78  			repoNameRE.MatchString(repoName))
    79  }
    80  
    81  // For the common "repo doesn't exist" case, use the error type that the client can recognize.
    82  func castNoSuchNameError(err error, repoName string) error {
    83  	switch errors.Cause(err).(type) {
    84  	case idutil.NoSuchNameError:
    85  		return libkb.RepoDoesntExistError{
    86  			Name: repoName,
    87  		}
    88  	default:
    89  		return err
    90  	}
    91  }
    92  
    93  // CleanOldDeletedRepos completely removes any "deleted" repos that
    94  // have been deleted for longer than `minDeletedAgeForCleaning`.  The
    95  // caller is responsible for syncing any data to disk, if desired.
    96  func CleanOldDeletedRepos(
    97  	ctx context.Context, config libkbfs.Config,
    98  	tlfHandle *tlfhandle.Handle) (err error) {
    99  	fs, err := libfs.NewFS(
   100  		ctx, config, tlfHandle, data.MasterBranch,
   101  		path.Join(kbfsRepoDir, kbfsDeletedReposDir),
   102  		"" /* uniq ID isn't used for removals */, keybase1.MDPriorityGit)
   103  	switch errors.Cause(err).(type) {
   104  	case idutil.NoSuchNameError:
   105  		// Nothing to clean.
   106  		return nil
   107  	case nil:
   108  	default:
   109  		return err
   110  	}
   111  
   112  	deletedRepos, err := fs.ReadDir("/")
   113  	if err != nil {
   114  		return err
   115  	}
   116  
   117  	if len(deletedRepos) == 0 {
   118  		return nil
   119  	}
   120  
   121  	log := config.MakeLogger("")
   122  	now := config.Clock().Now()
   123  
   124  	log.CDebugf(ctx, "Checking %d deleted repos for cleaning in %s",
   125  		len(deletedRepos), tlfHandle.GetCanonicalPath())
   126  	defer func() {
   127  		log.CDebugf(ctx, "Done checking deleted repos: %+v", err)
   128  	}()
   129  	for _, fi := range deletedRepos {
   130  		parts := strings.Split(fi.Name(), "-")
   131  		if len(parts) < 2 {
   132  			log.CDebugf(ctx,
   133  				"Ignoring deleted repo name with wrong format: %s", fi.Name())
   134  			continue
   135  		}
   136  
   137  		deletedTimeUnixNano, err := strconv.ParseInt(
   138  			parts[len(parts)-1], 10, 64)
   139  		if err != nil {
   140  			log.CDebugf(ctx,
   141  				"Ignoring deleted repo name with wrong format: %s: %+v",
   142  				fi.Name(), err)
   143  			continue
   144  		}
   145  
   146  		deletedTime := time.Unix(0, deletedTimeUnixNano)
   147  		if deletedTime.Add(minDeletedAgeForCleaning).After(now) {
   148  			// Repo was deleted too recently.
   149  			continue
   150  		}
   151  
   152  		log.CDebugf(ctx, "Cleaning deleted repo %s", fi.Name())
   153  		err = libfs.RecursiveDelete(ctx, fs, fi)
   154  		if err != nil {
   155  			return err
   156  		}
   157  	}
   158  	return nil
   159  }
   160  
   161  // CleanOldDeletedReposTimeLimited is the same as
   162  // `CleanOldDeletedRepos`, except it limits the time spent on
   163  // cleaning, deleting as much data as possible within the given time
   164  // limit (without returning an error).
   165  func CleanOldDeletedReposTimeLimited(
   166  	ctx context.Context, config libkbfs.Config,
   167  	tlfHandle *tlfhandle.Handle) error {
   168  	ctx, cancel := context.WithTimeout(ctx, cleaningTimeLimit)
   169  	defer cancel()
   170  	err := CleanOldDeletedRepos(ctx, config, tlfHandle)
   171  	switch errors.Cause(err) {
   172  	case context.DeadlineExceeded, context.Canceled:
   173  		return nil
   174  	default:
   175  		if _, ok := errors.Cause(err).(libkbfs.OfflineUnsyncedError); ok {
   176  			return nil
   177  		}
   178  		return err
   179  	}
   180  }
   181  
   182  // UpdateRepoMD lets the Keybase service know that a repo's MD has
   183  // been updated.
   184  func UpdateRepoMD(ctx context.Context, config libkbfs.Config,
   185  	tlfHandle *tlfhandle.Handle, fs billy.Filesystem,
   186  	pushType keybase1.GitPushType,
   187  	oldRepoName string, refDataByName RefDataByName) error {
   188  	folder := tlfHandle.ToFavorite().ToKBFolderHandle(false)
   189  
   190  	// Get the user-formatted repo name.
   191  	f, err := fs.Open(kbfsConfigName)
   192  	if err != nil {
   193  		return err
   194  	}
   195  	defer f.Close()
   196  	buf, err := io.ReadAll(f)
   197  	if err != nil {
   198  		return err
   199  	}
   200  	c, err := configFromBytes(buf)
   201  	if err != nil {
   202  		return err
   203  	}
   204  
   205  	gitRefMetadata := make([]keybase1.GitRefMetadata, 0, len(refDataByName))
   206  	for refName, refData := range refDataByName {
   207  		hasMoreCommits := false
   208  		kbCommits := make([]keybase1.GitCommit, 0, len(refData.Commits))
   209  		for _, c := range refData.Commits {
   210  			if c == CommitSentinelValue {
   211  				// Accept a sentinel value at the end of the commit list that
   212  				// indicates that there would have been more commits, but we
   213  				// stopped due to a cap.
   214  				hasMoreCommits = true
   215  				break
   216  			}
   217  			kbCommits = append(kbCommits, keybase1.GitCommit{
   218  				CommitHash:  hex.EncodeToString(c.Hash[:]),
   219  				Message:     c.Message,
   220  				AuthorName:  c.Author.Name,
   221  				AuthorEmail: c.Author.Email,
   222  				Ctime:       keybase1.Time(c.Author.When.Unix()),
   223  			})
   224  		}
   225  		gitRefMetadata = append(gitRefMetadata, keybase1.GitRefMetadata{
   226  			RefName:              string(refName),
   227  			Commits:              kbCommits,
   228  			MoreCommitsAvailable: hasMoreCommits,
   229  			IsDelete:             refData.IsDelete,
   230  		})
   231  	}
   232  	log := config.MakeLogger("")
   233  	log.CDebugf(ctx, "Putting git MD update")
   234  	err = config.KBPKI().PutGitMetadata(
   235  		ctx, folder, keybase1.RepoID(c.ID.String()),
   236  		keybase1.GitLocalMetadata{
   237  			RepoName:         keybase1.GitRepoName(c.Name),
   238  			Refs:             gitRefMetadata,
   239  			PushType:         pushType,
   240  			PreviousRepoName: keybase1.GitRepoName(oldRepoName),
   241  		})
   242  	if err != nil {
   243  		// Just log the put error, it shouldn't block the success of
   244  		// the overall git operation.
   245  		log.CDebugf(ctx, "Failed to put git metadata: %+v", err)
   246  	}
   247  	return nil
   248  }
   249  
   250  func normalizeRepoName(repoName string) string {
   251  	return strings.TrimSuffix(strings.ToLower(repoName), gitSuffixToIgnore)
   252  }
   253  
   254  func takeConfigLock(
   255  	fs *libfs.FS, tlfHandle *tlfhandle.Handle, repoName string) (
   256  	closer io.Closer, err error) {
   257  	// Double-check that the namespace of the FS matches the
   258  	// normalized repo name, so that we're locking only the config
   259  	// file within the actual repo we care about.  This is appended to
   260  	// the default locknamespace for a libfs.FS instance.
   261  	normalizedRepoName := normalizeRepoName(repoName)
   262  	nsPath := path.Join(
   263  		"/keybase", tlfHandle.Type().String(), kbfsRepoDir, normalizedRepoName)
   264  	expectedNamespace := make([]byte, len(nsPath))
   265  	copy(expectedNamespace, nsPath)
   266  	if !bytes.Equal(expectedNamespace, fs.GetLockNamespace()) {
   267  		return nil, errors.Errorf("Unexpected FS namespace for repo %s: %s",
   268  			repoName, string(fs.GetLockNamespace()))
   269  	}
   270  
   271  	// Lock a temp file to avoid a duplicate create of the actual
   272  	// file.  TODO: clean up this file at some point?
   273  	f, err := fs.Create(kbfsConfigNameTemp)
   274  	if err != nil && !os.IsExist(err) {
   275  		return nil, err
   276  	} else if os.IsExist(err) {
   277  		f, err = fs.Open(kbfsConfigNameTemp)
   278  	}
   279  	if err != nil {
   280  		return nil, err
   281  	}
   282  	defer func() {
   283  		if err != nil {
   284  			f.Close()
   285  		}
   286  	}()
   287  
   288  	// Take the lock
   289  	err = f.Lock()
   290  	if err != nil {
   291  		return nil, err
   292  	}
   293  	return f, nil
   294  }
   295  
   296  func makeExistingRepoError(
   297  	ctx context.Context, config libkbfs.Config, repoFS billy.Filesystem,
   298  	repoName string) error {
   299  	config.MakeLogger("").CDebugf(
   300  		ctx, "Config file for repo %s already exists", repoName)
   301  	f, err := repoFS.Open(kbfsConfigName)
   302  	if err != nil {
   303  		return err
   304  	}
   305  	defer f.Close()
   306  	buf, err := io.ReadAll(f)
   307  	if err != nil {
   308  		return err
   309  	}
   310  	existingConfig, err := configFromBytes(buf)
   311  	if err != nil {
   312  		return err
   313  	}
   314  	return errors.WithStack(libkb.RepoAlreadyExistsError{
   315  		DesiredName:  repoName,
   316  		ExistingName: existingConfig.Name,
   317  		ExistingID:   existingConfig.ID.String(),
   318  	})
   319  }
   320  
   321  func createNewRepoAndID(
   322  	ctx context.Context, config libkbfs.Config, tlfHandle *tlfhandle.Handle,
   323  	repoName string, fs *libfs.FS) (repoID ID, err error) {
   324  	// TODO: take a global repo lock here to make sure only one
   325  	// client generates the repo ID.
   326  	repoID, err = makeRandomID()
   327  	if err != nil {
   328  		return NullID, err
   329  	}
   330  	config.MakeLogger("").CDebugf(ctx,
   331  		"Creating a new repo %s in %s: repoID=%s",
   332  		repoName, tlfHandle.GetCanonicalPath(), repoID)
   333  
   334  	lockFile, err := takeConfigLock(fs, tlfHandle, repoName)
   335  	if err != nil {
   336  		return NullID, err
   337  	}
   338  	defer func() {
   339  		closeErr := lockFile.Close()
   340  		if err == nil {
   341  			err = closeErr
   342  		}
   343  	}()
   344  
   345  	_, err = fs.Stat(kbfsConfigName)
   346  	if err == nil {
   347  		// The config file already exists, so someone else already
   348  		// initialized the repo.
   349  		return NullID, makeExistingRepoError(ctx, config, fs, repoName)
   350  	} else if !os.IsNotExist(err) {
   351  		return NullID, err
   352  	}
   353  
   354  	f, err := fs.Create(kbfsConfigName)
   355  	if err != nil {
   356  		return NullID, err
   357  	}
   358  	defer f.Close()
   359  
   360  	session, err := config.KBPKI().GetCurrentSession(ctx)
   361  	if err != nil {
   362  		return NullID, err
   363  	}
   364  	c := &Config{
   365  		ID:         repoID,
   366  		Name:       repoName,
   367  		CreatorUID: session.UID.String(),
   368  		Ctime:      config.Clock().Now().UnixNano(),
   369  	}
   370  	buf, err := c.toBytes()
   371  	if err != nil {
   372  		return NullID, err
   373  	}
   374  	_, err = f.Write(buf)
   375  	if err != nil {
   376  		return NullID, err
   377  	}
   378  
   379  	err = UpdateRepoMD(
   380  		ctx, config, tlfHandle, fs, keybase1.GitPushType_CREATEREPO, "", nil)
   381  	if err != nil {
   382  		return NullID, err
   383  	}
   384  
   385  	return repoID, nil
   386  }
   387  
   388  func lookupOrCreateDir(ctx context.Context, config libkbfs.Config,
   389  	n libkbfs.Node, name string) (libkbfs.Node, error) {
   390  	newNode, _, err := config.KBFSOps().Lookup(ctx, n, n.ChildName(name))
   391  	switch errors.Cause(err).(type) {
   392  	case idutil.NoSuchNameError:
   393  		newNode, _, err = config.KBFSOps().CreateDir(ctx, n, n.ChildName(name))
   394  		if err != nil {
   395  			return nil, err
   396  		}
   397  	case nil:
   398  	default:
   399  		return nil, err
   400  	}
   401  	return newNode, nil
   402  }
   403  
   404  type repoOpType int
   405  
   406  const (
   407  	getOrCreate repoOpType = iota
   408  	createOnly
   409  	getOnly
   410  )
   411  
   412  func getOrCreateRepoAndID(
   413  	ctx context.Context, config libkbfs.Config, tlfHandle *tlfhandle.Handle,
   414  	repoName string, uniqID string, op repoOpType) (
   415  	fs *libfs.FS, id ID, err error) {
   416  	if !checkValidRepoName(repoName, config) {
   417  		return nil, NullID,
   418  			errors.WithStack(libkb.InvalidRepoNameError{Name: repoName})
   419  	}
   420  
   421  	rootNode, _, err := config.KBFSOps().GetOrCreateRootNode(
   422  		ctx, tlfHandle, data.MasterBranch)
   423  	if err != nil {
   424  		return nil, NullID, err
   425  	}
   426  	normalizedRepoName := normalizeRepoName(repoName)
   427  
   428  	// If the user doesn't have write access, but the repo doesn't
   429  	// exist, give them a nice error message.
   430  	repoExists := false
   431  	defer func() {
   432  		_, isWriteAccessErr := errors.Cause(err).(tlfhandle.WriteAccessError)
   433  		if !repoExists && isWriteAccessErr {
   434  			err = libkb.RepoDoesntExistError{Name: repoName}
   435  		}
   436  	}()
   437  
   438  	repoDir, err := lookupOrCreateDir(ctx, config, rootNode, kbfsRepoDir)
   439  	if err != nil {
   440  		return nil, NullID, err
   441  	}
   442  
   443  	// No need to obfuscate the repo name.
   444  	repoNamePPS := data.NewPathPartString(normalizedRepoName, nil)
   445  	_, repoEI, err := config.KBFSOps().Lookup(ctx, repoDir, repoNamePPS)
   446  	switch errors.Cause(err).(type) {
   447  	case idutil.NoSuchNameError:
   448  		if op == getOnly {
   449  			return nil, NullID,
   450  				errors.WithStack(libkb.RepoDoesntExistError{Name: repoName})
   451  		}
   452  		_, err = lookupOrCreateDir(ctx, config, repoDir, normalizedRepoName)
   453  		if err != nil {
   454  			return nil, NullID, err
   455  		}
   456  	case nil:
   457  		// If the repo was renamed to something else, we should
   458  		// override it with a new repo if we're in create-only mode.
   459  		if op == createOnly && repoEI.Type == data.Sym {
   460  			config.MakeLogger("").CDebugf(
   461  				ctx, "Overwriting symlink for repo %s with a new repo",
   462  				normalizedRepoName)
   463  			err = config.KBFSOps().RemoveEntry(ctx, repoDir, repoNamePPS)
   464  			if err != nil {
   465  				return nil, NullID, err
   466  			}
   467  			_, err = lookupOrCreateDir(ctx, config, repoDir, normalizedRepoName)
   468  			if err != nil {
   469  				return nil, NullID, err
   470  			}
   471  		}
   472  	default:
   473  		return nil, NullID, err
   474  	}
   475  
   476  	repoExists = true
   477  
   478  	fs, err = libfs.NewFS(
   479  		ctx, config, tlfHandle, data.MasterBranch,
   480  		path.Join(kbfsRepoDir, normalizedRepoName),
   481  		uniqID, keybase1.MDPriorityGit)
   482  	if err != nil {
   483  		return nil, NullID, err
   484  	}
   485  
   486  	f, err := fs.Open(kbfsConfigName)
   487  	if err != nil && !os.IsNotExist(err) {
   488  		return nil, NullID, err
   489  	} else if os.IsNotExist(err) {
   490  		if op == getOnly {
   491  			return nil, NullID, errors.WithStack(libkb.RepoDoesntExistError{Name: repoName})
   492  		}
   493  
   494  		// Create a new repo ID.
   495  		repoID, err := createNewRepoAndID(ctx, config, tlfHandle, repoName, fs)
   496  		if err != nil {
   497  			return nil, NullID, err
   498  		}
   499  		fs.SetLockNamespace(repoID.Bytes())
   500  		return fs, repoID, nil
   501  	}
   502  	defer f.Close()
   503  
   504  	buf, err := io.ReadAll(f)
   505  	if err != nil {
   506  		return nil, NullID, err
   507  	}
   508  	c, err := configFromBytes(buf)
   509  	if err != nil {
   510  		return nil, NullID, err
   511  	}
   512  
   513  	if op == createOnly {
   514  		// If this was already created, but we were expected to create
   515  		// it, then send back an error.
   516  		return nil, NullID, libkb.RepoAlreadyExistsError{
   517  			DesiredName:  repoName,
   518  			ExistingName: c.Name,
   519  			ExistingID:   c.ID.String(),
   520  		}
   521  	}
   522  
   523  	fs.SetLockNamespace(c.ID.Bytes())
   524  
   525  	return fs, c.ID, nil
   526  }
   527  
   528  // GetOrCreateRepoAndID returns a filesystem object rooted at the
   529  // specified repo, along with the stable repo ID.  If the repo hasn't
   530  // been created yet, it generates a new ID and creates the repo.  The
   531  // caller is responsible for syncing the FS and flushing the journal,
   532  // if desired.
   533  func GetOrCreateRepoAndID(
   534  	ctx context.Context, config libkbfs.Config, tlfHandle *tlfhandle.Handle,
   535  	repoName string, uniqID string) (*libfs.FS, ID, error) {
   536  	return getOrCreateRepoAndID(
   537  		ctx, config, tlfHandle, repoName, uniqID, getOrCreate)
   538  }
   539  
   540  // GetRepoAndID returns a filesystem object rooted at the
   541  // specified repo, along with the stable repo ID, if it already
   542  // exists.
   543  func GetRepoAndID(
   544  	ctx context.Context, config libkbfs.Config, tlfHandle *tlfhandle.Handle,
   545  	repoName string, uniqID string) (*libfs.FS, ID, error) {
   546  	return getOrCreateRepoAndID(
   547  		ctx, config, tlfHandle, repoName, uniqID, getOnly)
   548  }
   549  
   550  func makeUniqueID(ctx context.Context, config libkbfs.Config) (string, error) {
   551  	// Create a unique ID using the verifying key and the `config`
   552  	// object, which should be unique to each call in practice.
   553  	session, err := config.KBPKI().GetCurrentSession(ctx)
   554  	if err != nil {
   555  		return "", err
   556  	}
   557  	return fmt.Sprintf("%s-%p", session.VerifyingKey.String(), config), nil
   558  }
   559  
   560  // CreateRepoAndID returns a new stable repo ID for the provided
   561  // repoName in the given TLF.  If the repo has already been created,
   562  // it returns a `RepoAlreadyExistsError`.  If `repoName` already
   563  // exists, but is a symlink to another renamed directory, the symlink
   564  // will be removed in favor of the new repo.  The caller is
   565  // responsible for syncing the FS and flushing the journal, if
   566  // desired.  It expects the `config` object to be unique during the
   567  // lifetime of this call.
   568  func CreateRepoAndID(
   569  	ctx context.Context, config libkbfs.Config, tlfHandle *tlfhandle.Handle,
   570  	repoName string) (ID, error) {
   571  	uniqID, err := makeUniqueID(ctx, config)
   572  	if err != nil {
   573  		return NullID, err
   574  	}
   575  
   576  	fs, id, err := getOrCreateRepoAndID(
   577  		ctx, config, tlfHandle, repoName, uniqID, createOnly)
   578  	if err != nil {
   579  		return NullID, err
   580  	}
   581  	err = fs.SyncAll()
   582  	if err != nil {
   583  		return NullID, err
   584  	}
   585  	return id, err
   586  }
   587  
   588  // DeleteRepo "deletes" the given repo in the given TLF.  Right now it
   589  // simply moves the repo out of the way to a special directory, to
   590  // allow any concurrent writers to finish their pushes without
   591  // triggering conflict resolution.  The caller is responsible for
   592  // syncing the FS and flushing the journal, if desired.  It expects
   593  // the `config` object to be unique during the lifetime of this call.
   594  func DeleteRepo(
   595  	ctx context.Context, config libkbfs.Config, tlfHandle *tlfhandle.Handle,
   596  	repoName string) error {
   597  	// Create a unique ID using the verifying key and the `config`
   598  	// object, which should be unique to each call in practice.
   599  	session, err := config.KBPKI().GetCurrentSession(ctx)
   600  	if err != nil {
   601  		return err
   602  	}
   603  
   604  	kbfsOps := config.KBFSOps()
   605  	rootNode, _, err := kbfsOps.GetOrCreateRootNode(
   606  		ctx, tlfHandle, data.MasterBranch)
   607  	if err != nil {
   608  		return err
   609  	}
   610  	normalizedRepoName := normalizeRepoName(repoName)
   611  
   612  	repoNode, _, err := kbfsOps.Lookup(
   613  		ctx, rootNode, rootNode.ChildName(kbfsRepoDir))
   614  	if err != nil {
   615  		return castNoSuchNameError(err, repoName)
   616  	}
   617  
   618  	// No need to obfuscate the repo name.
   619  	repoNamePPS := data.NewPathPartString(normalizedRepoName, nil)
   620  	_, _, err = kbfsOps.Lookup(ctx, repoNode, repoNamePPS)
   621  	if err != nil {
   622  		return castNoSuchNameError(err, repoName)
   623  	}
   624  
   625  	ctx = context.WithValue(ctx, libkbfs.CtxAllowNameKey, kbfsDeletedReposDir)
   626  	deletedReposNode, err := lookupOrCreateDir(
   627  		ctx, config, repoNode, kbfsDeletedReposDir)
   628  	if err != nil {
   629  		return err
   630  	}
   631  
   632  	// For now, just rename the repo out of the way, using the device
   633  	// ID and the current time in nanoseconds to make uniqueness
   634  	// probable.
   635  	dirSuffix := fmt.Sprintf(
   636  		"%s-%d", session.VerifyingKey.String(), config.Clock().Now().UnixNano())
   637  	return kbfsOps.Rename(
   638  		ctx, repoNode, repoNamePPS, deletedReposNode,
   639  		deletedReposNode.ChildName(normalizedRepoName+dirSuffix))
   640  }
   641  
   642  func renameRepoInConfigFile(
   643  	ctx context.Context, repoFS billy.Filesystem, newRepoName string) error {
   644  	// Assume lock file is already taken for both the old repo and the
   645  	// new one.
   646  	f, err := repoFS.OpenFile(kbfsConfigName, os.O_RDWR, 0600)
   647  	if err != nil {
   648  		return err
   649  	}
   650  	defer f.Close()
   651  	buf, err := io.ReadAll(f)
   652  	if err != nil {
   653  		return err
   654  	}
   655  	c, err := configFromBytes(buf)
   656  	if err != nil {
   657  		return err
   658  	}
   659  	c.Name = newRepoName
   660  	buf, err = c.toBytes()
   661  	if err != nil {
   662  		return err
   663  	}
   664  	_, err = f.Seek(0, io.SeekStart)
   665  	if err != nil {
   666  		return err
   667  	}
   668  	err = f.Truncate(0)
   669  	if err != nil {
   670  		return err
   671  	}
   672  	_, err = f.Write(buf)
   673  	if err != nil {
   674  		return err
   675  	}
   676  	return nil
   677  }
   678  
   679  // RenameRepo renames the repo from an old name to a new name.  It
   680  // leaves a symlink behind so that old remotes will continue to work.
   681  // The caller is responsible for syncing the FS and flushing the
   682  // journal, if desired.
   683  func RenameRepo(
   684  	ctx context.Context, config libkbfs.Config, tlfHandle *tlfhandle.Handle,
   685  	oldRepoName, newRepoName string) (err error) {
   686  	if !checkValidRepoName(newRepoName, config) {
   687  		return errors.WithStack(libkb.InvalidRepoNameError{Name: newRepoName})
   688  	}
   689  
   690  	kbfsOps := config.KBFSOps()
   691  	rootNode, _, err := kbfsOps.GetOrCreateRootNode(
   692  		ctx, tlfHandle, data.MasterBranch)
   693  	if err != nil {
   694  		return err
   695  	}
   696  	normalizedOldRepoName := normalizeRepoName(oldRepoName)
   697  	normalizedNewRepoName := normalizeRepoName(newRepoName)
   698  
   699  	repoNode, _, err := kbfsOps.Lookup(
   700  		ctx, rootNode, rootNode.ChildName(kbfsRepoDir))
   701  	if err != nil {
   702  		return err
   703  	}
   704  
   705  	// Does the old repo definitely exist?
   706  	_, _, err = kbfsOps.Lookup(
   707  		ctx, repoNode, repoNode.ChildName(normalizedOldRepoName))
   708  	if err != nil {
   709  		return err
   710  	}
   711  
   712  	if oldRepoName == newRepoName {
   713  		// The names are the same, nothing else to do.
   714  		return nil
   715  	}
   716  
   717  	fs, err := libfs.NewFS(
   718  		ctx, config, tlfHandle, data.MasterBranch, path.Join(kbfsRepoDir),
   719  		"", keybase1.MDPriorityGit)
   720  	if err != nil {
   721  		return err
   722  	}
   723  
   724  	oldRepoFS, err := fs.Chroot(normalizedOldRepoName)
   725  	if err != nil {
   726  		return err
   727  	}
   728  
   729  	// Take locks in both repos during rename (same lock that's taken
   730  	// for new repo creation).
   731  	oldLockFile, err := takeConfigLock(
   732  		oldRepoFS.(*libfs.FS), tlfHandle, oldRepoName)
   733  	if err != nil {
   734  		return err
   735  	}
   736  	defer func() {
   737  		closeErr := oldLockFile.Close()
   738  		if err == nil {
   739  			err = closeErr
   740  		}
   741  	}()
   742  
   743  	if normalizedOldRepoName == normalizedNewRepoName {
   744  		// All we need to do is update the name in the config file,
   745  		// and the MD.
   746  		err = renameRepoInConfigFile(ctx, oldRepoFS, newRepoName)
   747  		if err != nil {
   748  			return err
   749  		}
   750  		// We pass in `oldRepoFS`, which now has the new repo name in its
   751  		// config.
   752  		return UpdateRepoMD(ctx, config, tlfHandle, oldRepoFS,
   753  			keybase1.GitPushType_RENAMEREPO, oldRepoName, nil)
   754  	}
   755  
   756  	// Does the new repo not exist yet? No need to obfuscate the repo name.
   757  	repoNamePPS := data.NewPathPartString(normalizedNewRepoName, nil)
   758  	_, ei, err := kbfsOps.Lookup(ctx, repoNode, repoNamePPS)
   759  	switch errors.Cause(err).(type) {
   760  	case idutil.NoSuchNameError:
   761  		// The happy path.
   762  	case nil:
   763  		if ei.Type == data.Sym {
   764  			config.MakeLogger("").CDebugf(
   765  				ctx, "Overwriting symlink for repo %s with a new repo",
   766  				normalizedNewRepoName)
   767  			err = config.KBFSOps().RemoveEntry(ctx, repoNode, repoNamePPS)
   768  			if err != nil {
   769  				return err
   770  			}
   771  		} else {
   772  			newRepoFS, err := fs.Chroot(normalizedNewRepoName)
   773  			if err != nil {
   774  				return err
   775  			}
   776  			// Someone else already created and initialized the repo.
   777  			return makeExistingRepoError(ctx, config, newRepoFS, newRepoName)
   778  		}
   779  	default:
   780  		return err
   781  	}
   782  
   783  	// Make the new repo subdir just so we can take the lock inside
   784  	// the new repo.  (We'll delete the new dir before the rename.)
   785  	err = fs.MkdirAll(normalizedNewRepoName, 0777)
   786  	if err != nil {
   787  		return err
   788  	}
   789  	newRepoFS, err := fs.Chroot(normalizedNewRepoName)
   790  	if err != nil {
   791  		return err
   792  	}
   793  	newLockFile, err := takeConfigLock(
   794  		newRepoFS.(*libfs.FS), tlfHandle, newRepoName)
   795  	if err != nil {
   796  		return err
   797  	}
   798  	defer func() {
   799  		closeErr := newLockFile.Close()
   800  		if err == nil {
   801  			err = closeErr
   802  		}
   803  	}()
   804  
   805  	// Rename this new dir out of the way before we rename.
   806  	fi, err := fs.Stat(normalizedNewRepoName)
   807  	if err != nil {
   808  		return err
   809  	}
   810  	err = libfs.RecursiveDelete(ctx, fs, fi)
   811  	if err != nil {
   812  		return err
   813  	}
   814  
   815  	// Now update the old config file and rename, and leave a symlink
   816  	// behind.  TODO: if any of the modifying steps below fail, we
   817  	// should technically clean up any modifications before return, so
   818  	// they don't get flushed.  However, with journaling on these are
   819  	// all local operations and all very unlikely to fail.
   820  	err = renameRepoInConfigFile(ctx, oldRepoFS, newRepoName)
   821  	if err != nil {
   822  		return err
   823  	}
   824  	err = fs.Rename(normalizedOldRepoName, normalizedNewRepoName)
   825  	if err != nil {
   826  		return err
   827  	}
   828  	err = fs.Symlink(normalizedNewRepoName, normalizedOldRepoName)
   829  	if err != nil {
   830  		return err
   831  	}
   832  	newRepoFS, err = fs.Chroot(normalizedNewRepoName)
   833  	if err != nil {
   834  		return err
   835  	}
   836  	return UpdateRepoMD(ctx, config, tlfHandle, newRepoFS,
   837  		keybase1.GitPushType_RENAMEREPO, oldRepoName, nil)
   838  }
   839  
   840  // GCOptions describe options foe garbage collection.
   841  type GCOptions struct {
   842  	// The most loose refs we will tolerate; if there are more loose
   843  	// refs, we should pack them.
   844  	MaxLooseRefs int
   845  	// The minimum number of potentially-expired loose objects we need
   846  	// to start the pruning process.  If < 0, pruning will not be done.
   847  	PruneMinLooseObjects int
   848  	// Any unreachable objects older than this time are subject to
   849  	// pruning.
   850  	PruneExpireTime time.Time
   851  	// The most object packs we will tolerate; if there are more
   852  	// object packs, we should re-pack all the objects.  If < 0,
   853  	// re-packing will not be done.
   854  	MaxObjectPacks int
   855  }
   856  
   857  // NeedsGC checks the given repo storage layer against the given
   858  // options to see what kinds of GC are needed on the repo.
   859  func NeedsGC(storage storage.Storer, options GCOptions) (
   860  	doPackRefs bool, numLooseRefs int, doPruneLoose, doObjectRepack bool,
   861  	numObjectPacks int, err error) {
   862  	numLooseRefs, err = storage.CountLooseRefs()
   863  	if err != nil {
   864  		return false, 0, false, false, 0, err
   865  	}
   866  
   867  	doPackRefs = numLooseRefs > options.MaxLooseRefs
   868  
   869  	if options.PruneMinLooseObjects >= 0 {
   870  		los, ok := storage.(storer.LooseObjectStorer)
   871  		if !ok {
   872  			panic("storage is unexpectedly not a LooseObjectStorer")
   873  		}
   874  
   875  		// Count the number of loose objects that are older than the
   876  		// expire time, to see if pruning is needed.
   877  		numLooseMaybePrune := 0
   878  		err = los.ForEachObjectHash(func(h plumbing.Hash) error {
   879  			t, err := los.LooseObjectTime(h)
   880  			if err != nil {
   881  				return err
   882  			}
   883  			if t.Before(options.PruneExpireTime) {
   884  				numLooseMaybePrune++
   885  				if numLooseMaybePrune >= options.PruneMinLooseObjects {
   886  					doPruneLoose = true
   887  					return storer.ErrStop
   888  				}
   889  			}
   890  			return nil
   891  		})
   892  		if err != nil {
   893  			return false, 0, false, false, 0, err
   894  		}
   895  	}
   896  
   897  	pos, ok := storage.(storer.PackedObjectStorer)
   898  	if !ok {
   899  		panic("storage is unexpectedly not a PackedObjectStorer")
   900  	}
   901  
   902  	packs, err := pos.ObjectPacks()
   903  	if err != nil {
   904  		return false, 0, false, false, 0, err
   905  	}
   906  	numObjectPacks = len(packs)
   907  	doObjectRepack = options.MaxObjectPacks >= 0 &&
   908  		numObjectPacks > options.MaxObjectPacks
   909  
   910  	return doPackRefs, numLooseRefs, doPruneLoose,
   911  		doObjectRepack, numObjectPacks, nil
   912  }
   913  
   914  func markSuccessfulGC(
   915  	ctx context.Context, config libkbfs.Config, fs billy.Filesystem) (
   916  	err error) {
   917  	changer, ok := fs.(billy.Change)
   918  	if !ok {
   919  		return errors.New("FS does not handle changing mtimes")
   920  	}
   921  
   922  	f, err := fs.Create(repoGCLockFileName)
   923  	if err != nil {
   924  		return err
   925  	}
   926  	err = f.Close()
   927  	if err != nil {
   928  		return err
   929  	}
   930  	return changer.Chtimes(
   931  		repoGCLockFileName, time.Time{}, config.Clock().Now())
   932  }
   933  
   934  func canDoGC(
   935  	ctx context.Context, config libkbfs.Config, fs *libfs.FS,
   936  	log logger.Logger) (bool, error) {
   937  	log.CDebugf(ctx, "Locking for GC")
   938  	f, err := fs.Create(repoGCLockFileName)
   939  	if err != nil {
   940  		return false, err
   941  	}
   942  	defer func() {
   943  		closeErr := f.Close()
   944  		if err == nil {
   945  			err = closeErr
   946  		}
   947  	}()
   948  	err = f.Lock()
   949  	if err != nil {
   950  		return false, err
   951  	}
   952  
   953  	return canDoWork(
   954  		ctx, config.MDServer(), config.Clock(), fs,
   955  		repoGCInProgressFileName, gcTimeLimit, log)
   956  }
   957  
   958  // GCRepo runs garbage collection on the specified repo, if it exceeds
   959  // any of the thresholds provided in `options`.
   960  func GCRepo(
   961  	ctx context.Context, config libkbfs.Config, tlfHandle *tlfhandle.Handle,
   962  	repoName string, options GCOptions) (err error) {
   963  	log := config.MakeLogger("")
   964  	log.CDebugf(ctx, "Checking whether GC is needed for %s/%s",
   965  		tlfHandle.GetCanonicalName(), repoName)
   966  
   967  	uniqID, err := makeUniqueID(ctx, config)
   968  	if err != nil {
   969  		return err
   970  	}
   971  
   972  	fs, _, err := getOrCreateRepoAndID(
   973  		ctx, config, tlfHandle, repoName, uniqID, getOnly)
   974  	if err != nil {
   975  		return err
   976  	}
   977  	defer func() {
   978  		if err == nil {
   979  			err = markSuccessfulGC(ctx, config, fs)
   980  		}
   981  	}()
   982  
   983  	fsStorer, err := filesystem.NewStorage(fs)
   984  	if err != nil {
   985  		return err
   986  	}
   987  	var fsStorage storage.Storer = fsStorer
   988  
   989  	// Wrap it in an on-demand storer, so we don't try to read all the
   990  	// objects of big repos into memory at once.
   991  	var storage storage.Storer
   992  	storage, err = NewOnDemandStorer(fsStorage)
   993  	if err != nil {
   994  		return err
   995  	}
   996  
   997  	// Wrap it in an "ephemeral" config with a fixed pack window, so
   998  	// we create packs with delta compression, but don't persist the
   999  	// pack window setting to disk.
  1000  	storage = &ephemeralGitConfigWithFixedPackWindow{
  1001  		storage,
  1002  		fsStorage.(storer.Initializer),
  1003  		fsStorage.(storer.PackfileWriter),
  1004  		fsStorage.(storer.LooseObjectStorer),
  1005  		fsStorage.(storer.PackedObjectStorer),
  1006  		10,
  1007  	}
  1008  
  1009  	doPackRefs, _, doPruneLoose, doObjectRepack, _, err := NeedsGC(
  1010  		storage, options)
  1011  	if err != nil {
  1012  		return err
  1013  	}
  1014  	if !doPackRefs && !doPruneLoose && !doObjectRepack {
  1015  		log.CDebugf(ctx, "Skipping GC")
  1016  		return nil
  1017  	}
  1018  
  1019  	doGC, err := canDoGC(ctx, config, fs, log)
  1020  	if err != nil {
  1021  		return err
  1022  	}
  1023  	if !doGC {
  1024  		log.CDebugf(ctx, "Skipping GC due to other worker")
  1025  		return nil
  1026  	}
  1027  
  1028  	defer func() {
  1029  		removeErr := fs.Remove(repoGCInProgressFileName)
  1030  		if err == nil {
  1031  			err = removeErr
  1032  		}
  1033  	}()
  1034  
  1035  	// Check the GC thresholds again since they might have changed
  1036  	// while getting the lock.
  1037  	doPackRefs, numLooseRefs, doPruneLoose, doObjectRepack,
  1038  		numObjectPacks, err := NeedsGC(storage, options)
  1039  	if err != nil {
  1040  		return err
  1041  	}
  1042  	if !doPackRefs && !doPruneLoose && !doObjectRepack {
  1043  		log.CDebugf(ctx, "GC no longer needed")
  1044  		return nil
  1045  	}
  1046  
  1047  	if doPackRefs {
  1048  		log.CDebugf(ctx, "Packing %s loose refs", numLooseRefs)
  1049  		err = storage.PackRefs()
  1050  		if err != nil {
  1051  			return err
  1052  		}
  1053  	}
  1054  
  1055  	if doPruneLoose {
  1056  		repo, err := gogit.Open(storage, nil)
  1057  		if err != nil {
  1058  			return err
  1059  		}
  1060  		err = repo.Prune(gogit.PruneOptions{
  1061  			OnlyObjectsOlderThan: options.PruneExpireTime,
  1062  			Handler:              repo.DeleteObject,
  1063  		})
  1064  		if err != nil {
  1065  			return err
  1066  		}
  1067  	}
  1068  
  1069  	if doObjectRepack {
  1070  		log.CDebugf(ctx, "Re-packing %d object packs", numObjectPacks)
  1071  		repo, err := gogit.Open(storage, nil)
  1072  		if err != nil {
  1073  			return err
  1074  		}
  1075  		err = repo.RepackObjects(&gogit.RepackConfig{
  1076  			OnlyDeletePacksOlderThan: options.PruneExpireTime,
  1077  		})
  1078  		if err != nil {
  1079  			return err
  1080  		}
  1081  	}
  1082  
  1083  	// TODO: add object re-packing.
  1084  	return nil
  1085  }
  1086  
  1087  // LastGCTime returns the last time the repo was successfully
  1088  // garbage-collected.
  1089  func LastGCTime(ctx context.Context, fs billy.Filesystem) (
  1090  	time.Time, error) {
  1091  	fi, err := fs.Stat(repoGCLockFileName)
  1092  	if os.IsNotExist(err) {
  1093  		return time.Time{}, nil
  1094  	} else if err != nil {
  1095  		return time.Time{}, err
  1096  	}
  1097  
  1098  	return fi.ModTime(), nil
  1099  }