github.com/keybase/client/go@v0.0.0-20241007131713-f10651d043c8/kbfs/libgit/repo.go (about) 1 // Copyright 2017 Keybase Inc. All rights reserved. 2 // Use of this source code is governed by a BSD 3 // license that can be found in the LICENSE file. 4 5 package libgit 6 7 import ( 8 "bytes" 9 "context" 10 "encoding/hex" 11 "fmt" 12 "io" 13 "os" 14 "path" 15 "regexp" 16 "strconv" 17 "strings" 18 "time" 19 20 "github.com/keybase/client/go/kbfs/data" 21 "github.com/keybase/client/go/kbfs/idutil" 22 "github.com/keybase/client/go/kbfs/libfs" 23 "github.com/keybase/client/go/kbfs/libkbfs" 24 "github.com/keybase/client/go/kbfs/tlfhandle" 25 "github.com/keybase/client/go/libkb" 26 "github.com/keybase/client/go/logger" 27 "github.com/keybase/client/go/protocol/keybase1" 28 "github.com/pkg/errors" 29 billy "gopkg.in/src-d/go-billy.v4" 30 gogit "gopkg.in/src-d/go-git.v4" 31 "gopkg.in/src-d/go-git.v4/plumbing" 32 "gopkg.in/src-d/go-git.v4/plumbing/object" 33 "gopkg.in/src-d/go-git.v4/plumbing/storer" 34 "gopkg.in/src-d/go-git.v4/storage" 35 "gopkg.in/src-d/go-git.v4/storage/filesystem" 36 ) 37 38 const ( 39 kbfsRepoDir = ".kbfs_git" 40 kbfsConfigName = "kbfs_config" 41 kbfsConfigNameTemp = "._kbfs_config" 42 GitSuffixToIgnore = ".git" 43 kbfsDeletedReposDir = ".kbfs_deleted_repos" 44 minDeletedAgeForCleaning = 1 * time.Hour 45 cleaningTimeLimit = 2 * time.Second 46 repoGCLockFileName = ".gc" 47 repoGCInProgressFileName = ".gc_in_progress" 48 gcTimeLimit = 1 * time.Hour 49 ) 50 51 // CommitSentinelValue marks the end of a list of commits, where there are 52 // still commits that haven't been read. 53 // Use the zero-value `nil`. 54 var CommitSentinelValue *object.Commit 55 56 // RepoNameRE is the uncompiled regex for repo names. 57 var RepoNameRE = `(?:[a-zA-Z0-9][a-zA-Z0-9_\.-]*)` 58 59 // This character set is what Github supports in repo names. It's 60 // probably to avoid any problems when cloning onto filesystems that 61 // have different Unicode decompression schemes 62 // (https://en.wikipedia.org/wiki/Unicode_equivalence). There's no 63 // internal reason to be so restrictive, but it probably makes sense 64 // to start off more restrictive and then relax things later as we 65 // test. 66 var repoNameRE = regexp.MustCompile(`^(` + RepoNameRE + `)$`) 67 68 // RefData stores the data for a ref. 69 type RefData struct { 70 IsDelete bool 71 Commits []*object.Commit 72 } 73 74 // RefDataByName represents a map of reference names to data about that ref. 75 type RefDataByName map[plumbing.ReferenceName]*RefData 76 77 func checkValidRepoName(repoName string, config libkbfs.Config) bool { 78 return len(repoName) >= 1 && 79 uint32(len(repoName)) <= config.MaxNameBytes() && 80 (os.Getenv("KBFS_GIT_REPONAME_SKIP_CHECK") != "" || 81 repoNameRE.MatchString(repoName)) 82 } 83 84 // For the common "repo doesn't exist" case, use the error type that the client can recognize. 85 func castNoSuchNameError(err error, repoName string) error { 86 switch errors.Cause(err).(type) { 87 case idutil.NoSuchNameError: 88 return libkb.RepoDoesntExistError{ 89 Name: repoName, 90 } 91 default: 92 return err 93 } 94 } 95 96 // CleanOldDeletedRepos completely removes any "deleted" repos that 97 // have been deleted for longer than `minDeletedAgeForCleaning`. The 98 // caller is responsible for syncing any data to disk, if desired. 99 func CleanOldDeletedRepos( 100 ctx context.Context, config libkbfs.Config, 101 tlfHandle *tlfhandle.Handle) (err error) { 102 fs, err := libfs.NewFS( 103 ctx, config, tlfHandle, data.MasterBranch, 104 path.Join(kbfsRepoDir, kbfsDeletedReposDir), 105 "" /* uniq ID isn't used for removals */, keybase1.MDPriorityGit) 106 switch errors.Cause(err).(type) { 107 case idutil.NoSuchNameError: 108 // Nothing to clean. 109 return nil 110 case nil: 111 default: 112 return err 113 } 114 115 deletedRepos, err := fs.ReadDir("/") 116 if err != nil { 117 return err 118 } 119 120 if len(deletedRepos) == 0 { 121 return nil 122 } 123 124 log := config.MakeLogger("") 125 now := config.Clock().Now() 126 127 log.CDebugf(ctx, "Checking %d deleted repos for cleaning in %s", 128 len(deletedRepos), tlfHandle.GetCanonicalPath()) 129 defer func() { 130 log.CDebugf(ctx, "Done checking deleted repos: %+v", err) 131 }() 132 for _, fi := range deletedRepos { 133 parts := strings.Split(fi.Name(), "-") 134 if len(parts) < 2 { 135 log.CDebugf(ctx, 136 "Ignoring deleted repo name with wrong format: %s", fi.Name()) 137 continue 138 } 139 140 deletedTimeUnixNano, err := strconv.ParseInt( 141 parts[len(parts)-1], 10, 64) 142 if err != nil { 143 log.CDebugf(ctx, 144 "Ignoring deleted repo name with wrong format: %s: %+v", 145 fi.Name(), err) 146 continue 147 } 148 149 deletedTime := time.Unix(0, deletedTimeUnixNano) 150 if deletedTime.Add(minDeletedAgeForCleaning).After(now) { 151 // Repo was deleted too recently. 152 continue 153 } 154 155 log.CDebugf(ctx, "Cleaning deleted repo %s", fi.Name()) 156 err = libfs.RecursiveDelete(ctx, fs, fi) 157 if err != nil { 158 return err 159 } 160 } 161 return nil 162 } 163 164 // CleanOldDeletedReposTimeLimited is the same as 165 // `CleanOldDeletedRepos`, except it limits the time spent on 166 // cleaning, deleting as much data as possible within the given time 167 // limit (without returning an error). 168 func CleanOldDeletedReposTimeLimited( 169 ctx context.Context, config libkbfs.Config, 170 tlfHandle *tlfhandle.Handle) error { 171 ctx, cancel := context.WithTimeout(ctx, cleaningTimeLimit) 172 defer cancel() 173 err := CleanOldDeletedRepos(ctx, config, tlfHandle) 174 switch errors.Cause(err) { 175 case context.DeadlineExceeded, context.Canceled: 176 return nil 177 default: 178 if _, ok := errors.Cause(err).(libkbfs.OfflineUnsyncedError); ok { 179 return nil 180 } 181 return err 182 } 183 } 184 185 // UpdateRepoMD lets the Keybase service know that a repo's MD has 186 // been updated. 187 func UpdateRepoMD(ctx context.Context, config libkbfs.Config, 188 tlfHandle *tlfhandle.Handle, fs billy.Filesystem, 189 pushType keybase1.GitPushType, 190 oldRepoName string, refDataByName RefDataByName) error { 191 folder := tlfHandle.ToFavorite().ToKBFolderHandle(false) 192 193 // Get the user-formatted repo name. 194 f, err := fs.Open(kbfsConfigName) 195 if err != nil { 196 return err 197 } 198 defer f.Close() 199 buf, err := io.ReadAll(f) 200 if err != nil { 201 return err 202 } 203 c, err := configFromBytes(buf) 204 if err != nil { 205 return err 206 } 207 208 gitRefMetadata := make([]keybase1.GitRefMetadata, 0, len(refDataByName)) 209 for refName, refData := range refDataByName { 210 hasMoreCommits := false 211 kbCommits := make([]keybase1.GitCommit, 0, len(refData.Commits)) 212 for _, c := range refData.Commits { 213 if c == CommitSentinelValue { 214 // Accept a sentinel value at the end of the commit list that 215 // indicates that there would have been more commits, but we 216 // stopped due to a cap. 217 hasMoreCommits = true 218 break 219 } 220 kbCommits = append(kbCommits, keybase1.GitCommit{ 221 CommitHash: hex.EncodeToString(c.Hash[:]), 222 Message: c.Message, 223 AuthorName: c.Author.Name, 224 AuthorEmail: c.Author.Email, 225 Ctime: keybase1.Time(c.Author.When.Unix()), 226 }) 227 } 228 gitRefMetadata = append(gitRefMetadata, keybase1.GitRefMetadata{ 229 RefName: string(refName), 230 Commits: kbCommits, 231 MoreCommitsAvailable: hasMoreCommits, 232 IsDelete: refData.IsDelete, 233 }) 234 } 235 log := config.MakeLogger("") 236 log.CDebugf(ctx, "Putting git MD update") 237 err = config.KBPKI().PutGitMetadata( 238 ctx, folder, keybase1.RepoID(c.ID.String()), 239 keybase1.GitLocalMetadata{ 240 RepoName: keybase1.GitRepoName(c.Name), 241 Refs: gitRefMetadata, 242 PushType: pushType, 243 PreviousRepoName: keybase1.GitRepoName(oldRepoName), 244 }) 245 if err != nil { 246 // Just log the put error, it shouldn't block the success of 247 // the overall git operation. 248 log.CDebugf(ctx, "Failed to put git metadata: %+v", err) 249 } 250 return nil 251 } 252 253 // NormalizeRepoName removes the .git suffix from repoName. 254 func NormalizeRepoName(repoName string) string { 255 return strings.TrimSuffix(strings.ToLower(repoName), GitSuffixToIgnore) 256 } 257 258 func takeConfigLock( 259 fs *libfs.FS, tlfHandle *tlfhandle.Handle, repoName string) ( 260 closer io.Closer, err error) { 261 // Double-check that the namespace of the FS matches the 262 // normalized repo name, so that we're locking only the config 263 // file within the actual repo we care about. This is appended to 264 // the default locknamespace for a libfs.FS instance. 265 normalizedRepoName := NormalizeRepoName(repoName) 266 nsPath := path.Join( 267 "/keybase", tlfHandle.Type().String(), kbfsRepoDir, normalizedRepoName) 268 expectedNamespace := make([]byte, len(nsPath)) 269 copy(expectedNamespace, nsPath) 270 if !bytes.Equal(expectedNamespace, fs.GetLockNamespace()) { 271 return nil, errors.Errorf("Unexpected FS namespace for repo %s: %s", 272 repoName, string(fs.GetLockNamespace())) 273 } 274 275 // Lock a temp file to avoid a duplicate create of the actual 276 // file. TODO: clean up this file at some point? 277 f, err := fs.Create(kbfsConfigNameTemp) 278 if err != nil && !os.IsExist(err) { 279 return nil, err 280 } else if os.IsExist(err) { 281 f, err = fs.Open(kbfsConfigNameTemp) 282 } 283 if err != nil { 284 return nil, err 285 } 286 defer func() { 287 if err != nil { 288 f.Close() 289 } 290 }() 291 292 // Take the lock 293 err = f.Lock() 294 if err != nil { 295 return nil, err 296 } 297 return f, nil 298 } 299 300 func makeExistingRepoError( 301 ctx context.Context, config libkbfs.Config, repoFS billy.Filesystem, 302 repoName string) error { 303 config.MakeLogger("").CDebugf( 304 ctx, "Config file for repo %s already exists", repoName) 305 f, err := repoFS.Open(kbfsConfigName) 306 if err != nil { 307 return err 308 } 309 defer f.Close() 310 buf, err := io.ReadAll(f) 311 if err != nil { 312 return err 313 } 314 existingConfig, err := configFromBytes(buf) 315 if err != nil { 316 return err 317 } 318 return errors.WithStack(libkb.RepoAlreadyExistsError{ 319 DesiredName: repoName, 320 ExistingName: existingConfig.Name, 321 ExistingID: existingConfig.ID.String(), 322 }) 323 } 324 325 func createNewRepoAndID( 326 ctx context.Context, config libkbfs.Config, tlfHandle *tlfhandle.Handle, 327 repoName string, fs *libfs.FS) (repoID ID, err error) { 328 // TODO: take a global repo lock here to make sure only one 329 // client generates the repo ID. 330 repoID, err = makeRandomID() 331 if err != nil { 332 return NullID, err 333 } 334 config.MakeLogger("").CDebugf(ctx, 335 "Creating a new repo %s in %s: repoID=%s", 336 repoName, tlfHandle.GetCanonicalPath(), repoID) 337 338 lockFile, err := takeConfigLock(fs, tlfHandle, repoName) 339 if err != nil { 340 return NullID, err 341 } 342 defer func() { 343 closeErr := lockFile.Close() 344 if err == nil { 345 err = closeErr 346 } 347 }() 348 349 _, err = fs.Stat(kbfsConfigName) 350 if err == nil { 351 // The config file already exists, so someone else already 352 // initialized the repo. 353 return NullID, makeExistingRepoError(ctx, config, fs, repoName) 354 } else if !os.IsNotExist(err) { 355 return NullID, err 356 } 357 358 f, err := fs.Create(kbfsConfigName) 359 if err != nil { 360 return NullID, err 361 } 362 defer f.Close() 363 364 session, err := config.KBPKI().GetCurrentSession(ctx) 365 if err != nil { 366 return NullID, err 367 } 368 c := &Config{ 369 ID: repoID, 370 Name: repoName, 371 CreatorUID: session.UID.String(), 372 Ctime: config.Clock().Now().UnixNano(), 373 } 374 buf, err := c.toBytes() 375 if err != nil { 376 return NullID, err 377 } 378 _, err = f.Write(buf) 379 if err != nil { 380 return NullID, err 381 } 382 383 err = UpdateRepoMD( 384 ctx, config, tlfHandle, fs, keybase1.GitPushType_CREATEREPO, "", nil) 385 if err != nil { 386 return NullID, err 387 } 388 389 return repoID, nil 390 } 391 392 func lookupOrCreateDir(ctx context.Context, config libkbfs.Config, 393 n libkbfs.Node, name string) (libkbfs.Node, error) { 394 newNode, _, err := config.KBFSOps().Lookup(ctx, n, n.ChildName(name)) 395 switch errors.Cause(err).(type) { 396 case idutil.NoSuchNameError: 397 newNode, _, err = config.KBFSOps().CreateDir(ctx, n, n.ChildName(name)) 398 if err != nil { 399 return nil, err 400 } 401 case nil: 402 default: 403 return nil, err 404 } 405 return newNode, nil 406 } 407 408 type repoOpType int 409 410 const ( 411 getOrCreate repoOpType = iota 412 createOnly 413 getOnly 414 ) 415 416 func getOrCreateRepoAndID( 417 ctx context.Context, config libkbfs.Config, tlfHandle *tlfhandle.Handle, 418 repoName string, uniqID string, op repoOpType) ( 419 fs *libfs.FS, id ID, err error) { 420 if !checkValidRepoName(repoName, config) { 421 return nil, NullID, 422 errors.WithStack(libkb.InvalidRepoNameError{Name: repoName}) 423 } 424 425 rootNode, _, err := config.KBFSOps().GetOrCreateRootNode( 426 ctx, tlfHandle, data.MasterBranch) 427 if err != nil { 428 return nil, NullID, err 429 } 430 normalizedRepoName := NormalizeRepoName(repoName) 431 432 // If the user doesn't have write access, but the repo doesn't 433 // exist, give them a nice error message. 434 repoExists := false 435 defer func() { 436 _, isWriteAccessErr := errors.Cause(err).(tlfhandle.WriteAccessError) 437 if !repoExists && isWriteAccessErr { 438 err = libkb.RepoDoesntExistError{Name: repoName} 439 } 440 }() 441 442 repoDir, err := lookupOrCreateDir(ctx, config, rootNode, kbfsRepoDir) 443 if err != nil { 444 return nil, NullID, err 445 } 446 447 // No need to obfuscate the repo name. 448 repoNamePPS := data.NewPathPartString(normalizedRepoName, nil) 449 _, repoEI, err := config.KBFSOps().Lookup(ctx, repoDir, repoNamePPS) 450 switch errors.Cause(err).(type) { 451 case idutil.NoSuchNameError: 452 if op == getOnly { 453 return nil, NullID, 454 errors.WithStack(libkb.RepoDoesntExistError{Name: repoName}) 455 } 456 _, err = lookupOrCreateDir(ctx, config, repoDir, normalizedRepoName) 457 if err != nil { 458 return nil, NullID, err 459 } 460 case nil: 461 // If the repo was renamed to something else, we should 462 // override it with a new repo if we're in create-only mode. 463 if op == createOnly && repoEI.Type == data.Sym { 464 config.MakeLogger("").CDebugf( 465 ctx, "Overwriting symlink for repo %s with a new repo", 466 normalizedRepoName) 467 err = config.KBFSOps().RemoveEntry(ctx, repoDir, repoNamePPS) 468 if err != nil { 469 return nil, NullID, err 470 } 471 _, err = lookupOrCreateDir(ctx, config, repoDir, normalizedRepoName) 472 if err != nil { 473 return nil, NullID, err 474 } 475 } 476 default: 477 return nil, NullID, err 478 } 479 480 repoExists = true 481 482 fs, err = libfs.NewFS( 483 ctx, config, tlfHandle, data.MasterBranch, 484 path.Join(kbfsRepoDir, normalizedRepoName), 485 uniqID, keybase1.MDPriorityGit) 486 if err != nil { 487 return nil, NullID, err 488 } 489 490 f, err := fs.Open(kbfsConfigName) 491 if err != nil && !os.IsNotExist(err) { 492 return nil, NullID, err 493 } else if os.IsNotExist(err) { 494 if op == getOnly { 495 return nil, NullID, errors.WithStack(libkb.RepoDoesntExistError{Name: repoName}) 496 } 497 498 // Create a new repo ID. 499 repoID, err := createNewRepoAndID(ctx, config, tlfHandle, repoName, fs) 500 if err != nil { 501 return nil, NullID, err 502 } 503 fs.SetLockNamespace(repoID.Bytes()) 504 return fs, repoID, nil 505 } 506 defer f.Close() 507 508 buf, err := io.ReadAll(f) 509 if err != nil { 510 return nil, NullID, err 511 } 512 c, err := configFromBytes(buf) 513 if err != nil { 514 return nil, NullID, err 515 } 516 517 if op == createOnly { 518 // If this was already created, but we were expected to create 519 // it, then send back an error. 520 return nil, NullID, libkb.RepoAlreadyExistsError{ 521 DesiredName: repoName, 522 ExistingName: c.Name, 523 ExistingID: c.ID.String(), 524 } 525 } 526 527 fs.SetLockNamespace(c.ID.Bytes()) 528 529 return fs, c.ID, nil 530 } 531 532 // GetOrCreateRepoAndID returns a filesystem object rooted at the 533 // specified repo, along with the stable repo ID. If the repo hasn't 534 // been created yet, it generates a new ID and creates the repo. The 535 // caller is responsible for syncing the FS and flushing the journal, 536 // if desired. 537 func GetOrCreateRepoAndID( 538 ctx context.Context, config libkbfs.Config, tlfHandle *tlfhandle.Handle, 539 repoName string, uniqID string) (*libfs.FS, ID, error) { 540 return getOrCreateRepoAndID( 541 ctx, config, tlfHandle, repoName, uniqID, getOrCreate) 542 } 543 544 // GetRepoAndID returns a filesystem object rooted at the 545 // specified repo, along with the stable repo ID, if it already 546 // exists. 547 func GetRepoAndID( 548 ctx context.Context, config libkbfs.Config, tlfHandle *tlfhandle.Handle, 549 repoName string, uniqID string) (*libfs.FS, ID, error) { 550 return getOrCreateRepoAndID( 551 ctx, config, tlfHandle, repoName, uniqID, getOnly) 552 } 553 554 func makeUniqueID(ctx context.Context, config libkbfs.Config) (string, error) { 555 // Create a unique ID using the verifying key and the `config` 556 // object, which should be unique to each call in practice. 557 session, err := config.KBPKI().GetCurrentSession(ctx) 558 if err != nil { 559 return "", err 560 } 561 return fmt.Sprintf("%s-%p", session.VerifyingKey.String(), config), nil 562 } 563 564 // CreateRepoAndID returns a new stable repo ID for the provided 565 // repoName in the given TLF. If the repo has already been created, 566 // it returns a `RepoAlreadyExistsError`. If `repoName` already 567 // exists, but is a symlink to another renamed directory, the symlink 568 // will be removed in favor of the new repo. The caller is 569 // responsible for syncing the FS and flushing the journal, if 570 // desired. It expects the `config` object to be unique during the 571 // lifetime of this call. 572 func CreateRepoAndID( 573 ctx context.Context, config libkbfs.Config, tlfHandle *tlfhandle.Handle, 574 repoName string) (ID, error) { 575 uniqID, err := makeUniqueID(ctx, config) 576 if err != nil { 577 return NullID, err 578 } 579 580 fs, id, err := getOrCreateRepoAndID( 581 ctx, config, tlfHandle, repoName, uniqID, createOnly) 582 if err != nil { 583 return NullID, err 584 } 585 err = fs.SyncAll() 586 if err != nil { 587 return NullID, err 588 } 589 return id, err 590 } 591 592 // DeleteRepo "deletes" the given repo in the given TLF. Right now it 593 // simply moves the repo out of the way to a special directory, to 594 // allow any concurrent writers to finish their pushes without 595 // triggering conflict resolution. The caller is responsible for 596 // syncing the FS and flushing the journal, if desired. It expects 597 // the `config` object to be unique during the lifetime of this call. 598 func DeleteRepo( 599 ctx context.Context, config libkbfs.Config, tlfHandle *tlfhandle.Handle, 600 repoName string) error { 601 // Create a unique ID using the verifying key and the `config` 602 // object, which should be unique to each call in practice. 603 session, err := config.KBPKI().GetCurrentSession(ctx) 604 if err != nil { 605 return err 606 } 607 608 kbfsOps := config.KBFSOps() 609 rootNode, _, err := kbfsOps.GetOrCreateRootNode( 610 ctx, tlfHandle, data.MasterBranch) 611 if err != nil { 612 return err 613 } 614 normalizedRepoName := NormalizeRepoName(repoName) 615 616 repoNode, _, err := kbfsOps.Lookup( 617 ctx, rootNode, rootNode.ChildName(kbfsRepoDir)) 618 if err != nil { 619 return castNoSuchNameError(err, repoName) 620 } 621 622 // No need to obfuscate the repo name. 623 repoNamePPS := data.NewPathPartString(normalizedRepoName, nil) 624 _, _, err = kbfsOps.Lookup(ctx, repoNode, repoNamePPS) 625 if err != nil { 626 return castNoSuchNameError(err, repoName) 627 } 628 629 ctx = context.WithValue(ctx, libkbfs.CtxAllowNameKey, kbfsDeletedReposDir) 630 deletedReposNode, err := lookupOrCreateDir( 631 ctx, config, repoNode, kbfsDeletedReposDir) 632 if err != nil { 633 return err 634 } 635 636 // For now, just rename the repo out of the way, using the device 637 // ID and the current time in nanoseconds to make uniqueness 638 // probable. 639 dirSuffix := fmt.Sprintf( 640 "%s-%d", session.VerifyingKey.String(), config.Clock().Now().UnixNano()) 641 return kbfsOps.Rename( 642 ctx, repoNode, repoNamePPS, deletedReposNode, 643 deletedReposNode.ChildName(normalizedRepoName+dirSuffix)) 644 } 645 646 func renameRepoInConfigFile( 647 ctx context.Context, repoFS billy.Filesystem, newRepoName string) error { 648 // Assume lock file is already taken for both the old repo and the 649 // new one. 650 f, err := repoFS.OpenFile(kbfsConfigName, os.O_RDWR, 0600) 651 if err != nil { 652 return err 653 } 654 defer f.Close() 655 buf, err := io.ReadAll(f) 656 if err != nil { 657 return err 658 } 659 c, err := configFromBytes(buf) 660 if err != nil { 661 return err 662 } 663 c.Name = newRepoName 664 buf, err = c.toBytes() 665 if err != nil { 666 return err 667 } 668 _, err = f.Seek(0, io.SeekStart) 669 if err != nil { 670 return err 671 } 672 err = f.Truncate(0) 673 if err != nil { 674 return err 675 } 676 _, err = f.Write(buf) 677 if err != nil { 678 return err 679 } 680 return nil 681 } 682 683 // RenameRepo renames the repo from an old name to a new name. It 684 // leaves a symlink behind so that old remotes will continue to work. 685 // The caller is responsible for syncing the FS and flushing the 686 // journal, if desired. 687 func RenameRepo( 688 ctx context.Context, config libkbfs.Config, tlfHandle *tlfhandle.Handle, 689 oldRepoName, newRepoName string) (err error) { 690 if !checkValidRepoName(newRepoName, config) { 691 return errors.WithStack(libkb.InvalidRepoNameError{Name: newRepoName}) 692 } 693 694 kbfsOps := config.KBFSOps() 695 rootNode, _, err := kbfsOps.GetOrCreateRootNode( 696 ctx, tlfHandle, data.MasterBranch) 697 if err != nil { 698 return err 699 } 700 normalizedOldRepoName := NormalizeRepoName(oldRepoName) 701 normalizedNewRepoName := NormalizeRepoName(newRepoName) 702 703 repoNode, _, err := kbfsOps.Lookup( 704 ctx, rootNode, rootNode.ChildName(kbfsRepoDir)) 705 if err != nil { 706 return err 707 } 708 709 // Does the old repo definitely exist? 710 _, _, err = kbfsOps.Lookup( 711 ctx, repoNode, repoNode.ChildName(normalizedOldRepoName)) 712 if err != nil { 713 return err 714 } 715 716 if oldRepoName == newRepoName { 717 // The names are the same, nothing else to do. 718 return nil 719 } 720 721 fs, err := libfs.NewFS( 722 ctx, config, tlfHandle, data.MasterBranch, path.Join(kbfsRepoDir), 723 "", keybase1.MDPriorityGit) 724 if err != nil { 725 return err 726 } 727 728 oldRepoFS, err := fs.Chroot(normalizedOldRepoName) 729 if err != nil { 730 return err 731 } 732 733 // Take locks in both repos during rename (same lock that's taken 734 // for new repo creation). 735 oldLockFile, err := takeConfigLock( 736 oldRepoFS.(*libfs.FS), tlfHandle, oldRepoName) 737 if err != nil { 738 return err 739 } 740 defer func() { 741 closeErr := oldLockFile.Close() 742 if err == nil { 743 err = closeErr 744 } 745 }() 746 747 if normalizedOldRepoName == normalizedNewRepoName { 748 // All we need to do is update the name in the config file, 749 // and the MD. 750 err = renameRepoInConfigFile(ctx, oldRepoFS, newRepoName) 751 if err != nil { 752 return err 753 } 754 // We pass in `oldRepoFS`, which now has the new repo name in its 755 // config. 756 return UpdateRepoMD(ctx, config, tlfHandle, oldRepoFS, 757 keybase1.GitPushType_RENAMEREPO, oldRepoName, nil) 758 } 759 760 // Does the new repo not exist yet? No need to obfuscate the repo name. 761 repoNamePPS := data.NewPathPartString(normalizedNewRepoName, nil) 762 _, ei, err := kbfsOps.Lookup(ctx, repoNode, repoNamePPS) 763 switch errors.Cause(err).(type) { 764 case idutil.NoSuchNameError: 765 // The happy path. 766 case nil: 767 if ei.Type == data.Sym { 768 config.MakeLogger("").CDebugf( 769 ctx, "Overwriting symlink for repo %s with a new repo", 770 normalizedNewRepoName) 771 err = config.KBFSOps().RemoveEntry(ctx, repoNode, repoNamePPS) 772 if err != nil { 773 return err 774 } 775 } else { 776 newRepoFS, err := fs.Chroot(normalizedNewRepoName) 777 if err != nil { 778 return err 779 } 780 // Someone else already created and initialized the repo. 781 return makeExistingRepoError(ctx, config, newRepoFS, newRepoName) 782 } 783 default: 784 return err 785 } 786 787 // Make the new repo subdir just so we can take the lock inside 788 // the new repo. (We'll delete the new dir before the rename.) 789 err = fs.MkdirAll(normalizedNewRepoName, 0777) 790 if err != nil { 791 return err 792 } 793 newRepoFS, err := fs.Chroot(normalizedNewRepoName) 794 if err != nil { 795 return err 796 } 797 newLockFile, err := takeConfigLock( 798 newRepoFS.(*libfs.FS), tlfHandle, newRepoName) 799 if err != nil { 800 return err 801 } 802 defer func() { 803 closeErr := newLockFile.Close() 804 if err == nil { 805 err = closeErr 806 } 807 }() 808 809 // Rename this new dir out of the way before we rename. 810 fi, err := fs.Stat(normalizedNewRepoName) 811 if err != nil { 812 return err 813 } 814 err = libfs.RecursiveDelete(ctx, fs, fi) 815 if err != nil { 816 return err 817 } 818 819 // Now update the old config file and rename, and leave a symlink 820 // behind. TODO: if any of the modifying steps below fail, we 821 // should technically clean up any modifications before return, so 822 // they don't get flushed. However, with journaling on these are 823 // all local operations and all very unlikely to fail. 824 err = renameRepoInConfigFile(ctx, oldRepoFS, newRepoName) 825 if err != nil { 826 return err 827 } 828 err = fs.Rename(normalizedOldRepoName, normalizedNewRepoName) 829 if err != nil { 830 return err 831 } 832 err = fs.Symlink(normalizedNewRepoName, normalizedOldRepoName) 833 if err != nil { 834 return err 835 } 836 newRepoFS, err = fs.Chroot(normalizedNewRepoName) 837 if err != nil { 838 return err 839 } 840 return UpdateRepoMD(ctx, config, tlfHandle, newRepoFS, 841 keybase1.GitPushType_RENAMEREPO, oldRepoName, nil) 842 } 843 844 // GCOptions describe options foe garbage collection. 845 type GCOptions struct { 846 // The most loose refs we will tolerate; if there are more loose 847 // refs, we should pack them. 848 MaxLooseRefs int 849 // The minimum number of potentially-expired loose objects we need 850 // to start the pruning process. If < 0, pruning will not be done. 851 PruneMinLooseObjects int 852 // Any unreachable objects older than this time are subject to 853 // pruning. 854 PruneExpireTime time.Time 855 // The most object packs we will tolerate; if there are more 856 // object packs, we should re-pack all the objects. If < 0, 857 // re-packing will not be done. 858 MaxObjectPacks int 859 } 860 861 // NeedsGC checks the given repo storage layer against the given 862 // options to see what kinds of GC are needed on the repo. 863 func NeedsGC(storage storage.Storer, options GCOptions) ( 864 doPackRefs bool, numLooseRefs int, doPruneLoose, doObjectRepack bool, 865 numObjectPacks int, err error) { 866 numLooseRefs, err = storage.CountLooseRefs() 867 if err != nil { 868 return false, 0, false, false, 0, err 869 } 870 871 doPackRefs = numLooseRefs > options.MaxLooseRefs 872 873 if options.PruneMinLooseObjects >= 0 { 874 los, ok := storage.(storer.LooseObjectStorer) 875 if !ok { 876 panic("storage is unexpectedly not a LooseObjectStorer") 877 } 878 879 // Count the number of loose objects that are older than the 880 // expire time, to see if pruning is needed. 881 numLooseMaybePrune := 0 882 err = los.ForEachObjectHash(func(h plumbing.Hash) error { 883 t, err := los.LooseObjectTime(h) 884 if err != nil { 885 return err 886 } 887 if t.Before(options.PruneExpireTime) { 888 numLooseMaybePrune++ 889 if numLooseMaybePrune >= options.PruneMinLooseObjects { 890 doPruneLoose = true 891 return storer.ErrStop 892 } 893 } 894 return nil 895 }) 896 if err != nil { 897 return false, 0, false, false, 0, err 898 } 899 } 900 901 pos, ok := storage.(storer.PackedObjectStorer) 902 if !ok { 903 panic("storage is unexpectedly not a PackedObjectStorer") 904 } 905 906 packs, err := pos.ObjectPacks() 907 if err != nil { 908 return false, 0, false, false, 0, err 909 } 910 numObjectPacks = len(packs) 911 doObjectRepack = options.MaxObjectPacks >= 0 && 912 numObjectPacks > options.MaxObjectPacks 913 914 return doPackRefs, numLooseRefs, doPruneLoose, 915 doObjectRepack, numObjectPacks, nil 916 } 917 918 func markSuccessfulGC( 919 ctx context.Context, config libkbfs.Config, fs billy.Filesystem) ( 920 err error) { 921 changer, ok := fs.(billy.Change) 922 if !ok { 923 return errors.New("FS does not handle changing mtimes") 924 } 925 926 f, err := fs.Create(repoGCLockFileName) 927 if err != nil { 928 return err 929 } 930 err = f.Close() 931 if err != nil { 932 return err 933 } 934 return changer.Chtimes( 935 repoGCLockFileName, time.Time{}, config.Clock().Now()) 936 } 937 938 func canDoGC( 939 ctx context.Context, config libkbfs.Config, fs *libfs.FS, 940 log logger.Logger) (bool, error) { 941 log.CDebugf(ctx, "Locking for GC") 942 f, err := fs.Create(repoGCLockFileName) 943 if err != nil { 944 return false, err 945 } 946 defer func() { 947 closeErr := f.Close() 948 if err == nil { 949 err = closeErr 950 } 951 }() 952 err = f.Lock() 953 if err != nil { 954 return false, err 955 } 956 957 return canDoWork( 958 ctx, config.MDServer(), config.Clock(), fs, 959 repoGCInProgressFileName, gcTimeLimit, log) 960 } 961 962 // GCRepo runs garbage collection on the specified repo, if it exceeds 963 // any of the thresholds provided in `options`. 964 func GCRepo( 965 ctx context.Context, config libkbfs.Config, tlfHandle *tlfhandle.Handle, 966 repoName string, options GCOptions) (err error) { 967 log := config.MakeLogger("") 968 log.CDebugf(ctx, "Checking whether GC is needed for %s/%s", 969 tlfHandle.GetCanonicalName(), repoName) 970 971 uniqID, err := makeUniqueID(ctx, config) 972 if err != nil { 973 return err 974 } 975 976 fs, _, err := getOrCreateRepoAndID( 977 ctx, config, tlfHandle, repoName, uniqID, getOnly) 978 if err != nil { 979 return err 980 } 981 defer func() { 982 if err == nil { 983 err = markSuccessfulGC(ctx, config, fs) 984 } 985 }() 986 987 fsStorer, err := filesystem.NewStorage(fs) 988 if err != nil { 989 return err 990 } 991 var fsStorage storage.Storer = fsStorer 992 993 // Wrap it in an on-demand storer, so we don't try to read all the 994 // objects of big repos into memory at once. 995 var storage storage.Storer 996 storage, err = NewOnDemandStorer(fsStorage) 997 if err != nil { 998 return err 999 } 1000 1001 // Wrap it in an "ephemeral" config with a fixed pack window, so 1002 // we create packs with delta compression, but don't persist the 1003 // pack window setting to disk. 1004 storage = &ephemeralGitConfigWithFixedPackWindow{ 1005 storage, 1006 fsStorage.(storer.Initializer), 1007 fsStorage.(storer.PackfileWriter), 1008 fsStorage.(storer.LooseObjectStorer), 1009 fsStorage.(storer.PackedObjectStorer), 1010 10, 1011 } 1012 1013 doPackRefs, _, doPruneLoose, doObjectRepack, _, err := NeedsGC( 1014 storage, options) 1015 if err != nil { 1016 return err 1017 } 1018 if !doPackRefs && !doPruneLoose && !doObjectRepack { 1019 log.CDebugf(ctx, "Skipping GC") 1020 return nil 1021 } 1022 1023 doGC, err := canDoGC(ctx, config, fs, log) 1024 if err != nil { 1025 return err 1026 } 1027 if !doGC { 1028 log.CDebugf(ctx, "Skipping GC due to other worker") 1029 return nil 1030 } 1031 1032 defer func() { 1033 removeErr := fs.Remove(repoGCInProgressFileName) 1034 if err == nil { 1035 err = removeErr 1036 } 1037 }() 1038 1039 // Check the GC thresholds again since they might have changed 1040 // while getting the lock. 1041 doPackRefs, numLooseRefs, doPruneLoose, doObjectRepack, 1042 numObjectPacks, err := NeedsGC(storage, options) 1043 if err != nil { 1044 return err 1045 } 1046 if !doPackRefs && !doPruneLoose && !doObjectRepack { 1047 log.CDebugf(ctx, "GC no longer needed") 1048 return nil 1049 } 1050 1051 if doPackRefs { 1052 log.CDebugf(ctx, "Packing %s loose refs", numLooseRefs) 1053 err = storage.PackRefs() 1054 if err != nil { 1055 return err 1056 } 1057 } 1058 1059 if doPruneLoose { 1060 repo, err := gogit.Open(storage, nil) 1061 if err != nil { 1062 return err 1063 } 1064 err = repo.Prune(gogit.PruneOptions{ 1065 OnlyObjectsOlderThan: options.PruneExpireTime, 1066 Handler: repo.DeleteObject, 1067 }) 1068 if err != nil { 1069 return err 1070 } 1071 } 1072 1073 if doObjectRepack { 1074 log.CDebugf(ctx, "Re-packing %d object packs", numObjectPacks) 1075 repo, err := gogit.Open(storage, nil) 1076 if err != nil { 1077 return err 1078 } 1079 err = repo.RepackObjects(&gogit.RepackConfig{ 1080 OnlyDeletePacksOlderThan: options.PruneExpireTime, 1081 }) 1082 if err != nil { 1083 return err 1084 } 1085 } 1086 1087 // TODO: add object re-packing. 1088 return nil 1089 } 1090 1091 // LastGCTime returns the last time the repo was successfully 1092 // garbage-collected. 1093 func LastGCTime(ctx context.Context, fs billy.Filesystem) ( 1094 time.Time, error) { 1095 fi, err := fs.Stat(repoGCLockFileName) 1096 if os.IsNotExist(err) { 1097 return time.Time{}, nil 1098 } else if err != nil { 1099 return time.Time{}, err 1100 } 1101 1102 return fi.ModTime(), nil 1103 }