github.com/dolthub/dolt/go@v0.40.5-0.20240520175717-68db7794bea6/libraries/doltcore/env/actions/remotes.go (about) 1 // Copyright 2019 Dolthub, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package actions 16 17 import ( 18 "context" 19 "errors" 20 "fmt" 21 "strings" 22 "sync" 23 "time" 24 25 "github.com/dolthub/dolt/go/cmd/dolt/cli" 26 eventsapi "github.com/dolthub/dolt/go/gen/proto/dolt/services/eventsapi/v1alpha1" 27 "github.com/dolthub/dolt/go/libraries/doltcore/doltdb" 28 "github.com/dolthub/dolt/go/libraries/doltcore/env" 29 "github.com/dolthub/dolt/go/libraries/doltcore/ref" 30 "github.com/dolthub/dolt/go/libraries/events" 31 "github.com/dolthub/dolt/go/libraries/utils/earl" 32 "github.com/dolthub/dolt/go/libraries/utils/iohelp" 33 "github.com/dolthub/dolt/go/store/datas" 34 "github.com/dolthub/dolt/go/store/datas/pull" 35 "github.com/dolthub/dolt/go/store/hash" 36 ) 37 38 var ErrCantFF = errors.New("can't fast forward merge") 39 var ErrInvalidPullArgs = errors.New("dolt pull takes at most two args") 40 var ErrCannotPushRef = errors.New("cannot push ref") 41 var ErrFailedToDeleteRemote = errors.New("failed to delete remote") 42 var ErrFailedToGetRemoteDb = errors.New("failed to get remote db") 43 var ErrUnknownPushErr = errors.New("unknown push error") 44 45 type ProgStarter func(ctx context.Context) (*sync.WaitGroup, chan pull.Stats) 46 type ProgStopper func(cancel context.CancelFunc, wg *sync.WaitGroup, statsCh chan pull.Stats) 47 48 // Push will update a destination branch, in a given destination database if it can be done as a fast forward merge. 49 // This is accomplished first by verifying that the remote tracking reference for the source database can be updated to 50 // the given commit via a fast forward merge. If this is the case, an attempt will be made to update the branch in the 51 // destination db to the given commit via fast forward move. If that succeeds the tracking branch is updated in the 52 // source db. 53 func Push(ctx context.Context, tempTableDir string, mode ref.UpdateMode, destRef ref.BranchRef, remoteRef ref.RemoteRef, srcDB, destDB *doltdb.DoltDB, commit *doltdb.Commit, statsCh chan pull.Stats) error { 54 var err error 55 if mode == ref.FastForwardOnly { 56 canFF, err := destDB.CanFastForward(ctx, destRef, commit) 57 58 if err != nil { 59 return err 60 } else if !canFF { 61 return ErrCantFF 62 } 63 } 64 65 h, err := commit.HashOf() 66 if err != nil { 67 return err 68 } 69 70 err = destDB.PullChunks(ctx, tempTableDir, srcDB, []hash.Hash{h}, statsCh, nil) 71 72 if err != nil { 73 return err 74 } 75 76 switch mode { 77 case ref.ForceUpdate: 78 err = destDB.SetHeadAndWorkingSetToCommit(ctx, destRef, commit) 79 if err != nil { 80 return err 81 } 82 err = srcDB.SetHeadToCommit(ctx, remoteRef, commit) 83 case ref.FastForwardOnly: 84 err = destDB.FastForwardWithWorkspaceCheck(ctx, destRef, commit) 85 if err != nil { 86 return err 87 } 88 // We set the remote ref to the commit here, regardless of its 89 // previous value. It does not need to be a FastForward update 90 // of the local ref for this operation to succeed. 91 err = srcDB.SetHeadToCommit(ctx, remoteRef, commit) 92 } 93 94 return err 95 } 96 97 // DoPush returns a message about whether the push was successful for each branch or a tag. 98 // This includes if there is a new remote branch created, upstream is set or push was rejected for a branch. 99 func DoPush(ctx context.Context, pushMeta *env.PushOptions, progStarter ProgStarter, progStopper ProgStopper) (returnMsg string, err error) { 100 var successPush, setUpstreamPush, failedPush []string 101 for _, targets := range pushMeta.Targets { 102 err = push(ctx, pushMeta.Rsr, pushMeta.TmpDir, pushMeta.SrcDb, pushMeta.DestDb, pushMeta.Remote, targets, progStarter, progStopper) 103 if err == nil { 104 // TODO: we don't have sufficient information here to know what actually happened in the push. Supporting 105 // git behavior of printing the commit ids updated (e.g. 74476cf38..080b073e7 branch1 -> branch1) isn't 106 // currently possible. We need to plumb through results in the return from the Push(). Having just an error 107 // response is not sufficient, as there are many "success" cases that are not errors. 108 if targets.SrcRef == ref.EmptyBranchRef { 109 successPush = append(successPush, fmt.Sprintf(" - [deleted] %s", targets.DestRef.GetPath())) 110 } else { 111 successPush = append(successPush, fmt.Sprintf(" * [new branch] %s -> %s", targets.SrcRef.GetPath(), targets.DestRef.GetPath())) 112 } 113 114 } else if errors.Is(err, doltdb.ErrIsAhead) || errors.Is(err, ErrCantFF) || errors.Is(err, datas.ErrMergeNeeded) { 115 failedPush = append(failedPush, fmt.Sprintf(" ! [rejected] %s -> %s (non-fast-forward)", targets.SrcRef.GetPath(), targets.DestRef.GetPath())) 116 continue 117 } else if !errors.Is(err, doltdb.ErrUpToDate) { 118 // this will allow getting successful push messages along with the error of current push 119 break 120 } 121 if targets.SetUpstream { 122 err = pushMeta.Rsw.UpdateBranch(targets.SrcRef.GetPath(), env.BranchConfig{ 123 Merge: ref.MarshalableRef{ 124 Ref: targets.DestRef, 125 }, 126 Remote: pushMeta.Remote.Name, 127 }) 128 if err != nil { 129 return "", err 130 } 131 setUpstreamPush = append(setUpstreamPush, fmt.Sprintf("branch '%s' set up to track '%s'.", targets.SrcRef.GetPath(), targets.RemoteRef.GetPath())) 132 } 133 } 134 135 returnMsg, err = buildReturnMsg(successPush, setUpstreamPush, failedPush, pushMeta.Remote.Url, err) 136 return 137 } 138 139 // push performs push on a branch or a tag. 140 func push(ctx context.Context, rsr env.RepoStateReader, tmpDir string, src, dest *doltdb.DoltDB, remote *env.Remote, opts *env.PushTarget, progStarter ProgStarter, progStopper ProgStopper) error { 141 switch opts.SrcRef.GetType() { 142 case ref.BranchRefType: 143 if opts.SrcRef == ref.EmptyBranchRef { 144 return deleteRemoteBranch(ctx, opts.DestRef, opts.RemoteRef, src, dest, *remote, opts.Mode.Force) 145 } else { 146 return PushToRemoteBranch(ctx, rsr, tmpDir, opts.Mode, opts.SrcRef, opts.DestRef, opts.RemoteRef, src, dest, *remote, progStarter, progStopper) 147 } 148 case ref.TagRefType: 149 return pushTagToRemote(ctx, tmpDir, opts.SrcRef, opts.DestRef, src, dest, progStarter, progStopper) 150 default: 151 return fmt.Errorf("%w: %s of type %s", ErrCannotPushRef, opts.SrcRef.String(), opts.SrcRef.GetType()) 152 } 153 } 154 155 // buildReturnMsg combines the push progress information of created branches, remote tracking branches 156 // and rejected branches, in order. // TODO: updated branches info is missing 157 func buildReturnMsg(success, setUpstream, failed []string, remoteUrl string, err error) (string, error) { 158 var retMsg string 159 if len(success) == 0 && len(failed) == 0 { 160 return "", err 161 } else if len(failed) > 0 { 162 err = env.ErrFailedToPush.New(remoteUrl) 163 } else if errors.Is(err, doltdb.ErrUpToDate) { 164 // if there are some branches with successful push 165 err = nil 166 } 167 168 retMsg = fmt.Sprintf("To %s", remoteUrl) 169 for _, sMsg := range success { 170 retMsg = fmt.Sprintf("%s\n%s", retMsg, sMsg) 171 } 172 for _, fMsg := range failed { 173 retMsg = fmt.Sprintf("%s\n%s", retMsg, fMsg) 174 } 175 for _, uMsg := range setUpstream { 176 retMsg = fmt.Sprintf("%s\n%s", retMsg, uMsg) 177 } 178 return retMsg, err 179 } 180 181 // PushTag pushes a commit tag and all underlying data from a local source database to a remote destination database. 182 func PushTag(ctx context.Context, tempTableDir string, destRef ref.TagRef, srcDB, destDB *doltdb.DoltDB, tag *doltdb.Tag, statsCh chan pull.Stats) error { 183 var err error 184 185 addr, err := tag.GetAddr() 186 if err != nil { 187 return err 188 } 189 190 err = destDB.PullChunks(ctx, tempTableDir, srcDB, []hash.Hash{addr}, statsCh, nil) 191 192 if err != nil { 193 return err 194 } 195 196 return destDB.SetHead(ctx, destRef, addr) 197 } 198 199 func deleteRemoteBranch(ctx context.Context, toDelete, remoteRef ref.DoltRef, localDB, remoteDB *doltdb.DoltDB, remote env.Remote, force bool) error { 200 err := DeleteRemoteBranch(ctx, toDelete.(ref.BranchRef), remoteRef.(ref.RemoteRef), localDB, remoteDB, force) 201 202 if err != nil { 203 return fmt.Errorf("%w; '%s' from remote '%s'; %s", ErrFailedToDeleteRemote, toDelete.String(), remote.Name, err) 204 } 205 206 return nil 207 } 208 209 func PushToRemoteBranch(ctx context.Context, rsr env.RepoStateReader, tempTableDir string, mode ref.UpdateMode, srcRef, destRef, remoteRef ref.DoltRef, localDB, remoteDB *doltdb.DoltDB, remote env.Remote, progStarter ProgStarter, progStopper ProgStopper) error { 210 evt := events.GetEventFromContext(ctx) 211 212 u, err := earl.Parse(remote.Url) 213 214 // TODO: why is evt nil sometimes? 215 if err == nil && evt != nil { 216 if u.Scheme != "" { 217 evt.SetAttribute(eventsapi.AttributeID_REMOTE_URL_SCHEME, u.Scheme) 218 } 219 } 220 221 cs, _ := doltdb.NewCommitSpec(srcRef.GetPath()) 222 headRef, err := rsr.CWBHeadRef() 223 if err != nil { 224 return err 225 } 226 optCmt, err := localDB.Resolve(ctx, cs, headRef) 227 if err != nil { 228 return fmt.Errorf("%w; refspec not found: '%s'; %s", ref.ErrInvalidRefSpec, srcRef.GetPath(), err.Error()) 229 } 230 cm, ok := optCmt.ToCommit() 231 if !ok { 232 return doltdb.ErrGhostCommitEncountered 233 } 234 235 newCtx, cancelFunc := context.WithCancel(ctx) 236 wg, statsCh := progStarter(newCtx) 237 err = Push(ctx, tempTableDir, mode, destRef.(ref.BranchRef), remoteRef.(ref.RemoteRef), localDB, remoteDB, cm, statsCh) 238 progStopper(cancelFunc, wg, statsCh) 239 240 switch err { 241 case nil: 242 cli.Println() 243 return nil 244 case doltdb.ErrUpToDate, doltdb.ErrIsAhead, ErrCantFF, datas.ErrMergeNeeded, datas.ErrDirtyWorkspace: 245 return err 246 default: 247 return fmt.Errorf("%w; %s", ErrUnknownPushErr, err.Error()) 248 } 249 } 250 251 func pushTagToRemote(ctx context.Context, tempTableDir string, srcRef, destRef ref.DoltRef, localDB, remoteDB *doltdb.DoltDB, progStarter ProgStarter, progStopper ProgStopper) error { 252 tg, err := localDB.ResolveTag(ctx, srcRef.(ref.TagRef)) 253 254 if err != nil { 255 return err 256 } 257 258 newCtx, cancelFunc := context.WithCancel(ctx) 259 wg, statsCh := progStarter(newCtx) 260 err = PushTag(ctx, tempTableDir, destRef.(ref.TagRef), localDB, remoteDB, tg, statsCh) 261 progStopper(cancelFunc, wg, statsCh) 262 263 if err != nil { 264 return err 265 } 266 267 cli.Println() 268 return nil 269 } 270 271 // DeleteRemoteBranch validates targetRef is a branch on the remote database, and then deletes it, then deletes the 272 // remote tracking branch from the local database. 273 func DeleteRemoteBranch(ctx context.Context, targetRef ref.BranchRef, remoteRef ref.RemoteRef, localDB, remoteDB *doltdb.DoltDB, force bool) error { 274 hasRef, err := remoteDB.HasRef(ctx, targetRef) 275 276 if err != nil { 277 return err 278 } 279 280 wsRefStr := "" 281 if !force { 282 wsRef, err := ref.WorkingSetRefForHead(targetRef) 283 if err != nil { 284 return err 285 } 286 wsRefStr = wsRef.String() 287 } 288 289 if hasRef { 290 err = remoteDB.DeleteBranchWithWorkspaceCheck(ctx, targetRef, nil, wsRefStr) 291 } 292 293 if err != nil { 294 return err 295 } 296 297 err = localDB.DeleteBranch(ctx, remoteRef, nil) 298 299 if err != nil { 300 return err 301 } 302 303 return nil 304 } 305 306 // FetchCommit takes a fetches a commit and all underlying data from a remote source database to the local destination database. 307 func FetchCommit(ctx context.Context, tempTablesDir string, srcDB, destDB *doltdb.DoltDB, srcDBCommit *doltdb.Commit, statsCh chan pull.Stats) error { 308 h, err := srcDBCommit.HashOf() 309 if err != nil { 310 return err 311 } 312 313 return destDB.PullChunks(ctx, tempTablesDir, srcDB, []hash.Hash{h}, statsCh, nil) 314 } 315 316 // FetchTag takes a fetches a commit tag and all underlying data from a remote source database to the local destination database. 317 func FetchTag(ctx context.Context, tempTableDir string, srcDB, destDB *doltdb.DoltDB, srcDBTag *doltdb.Tag, statsCh chan pull.Stats) error { 318 addr, err := srcDBTag.GetAddr() 319 if err != nil { 320 return err 321 } 322 323 return destDB.PullChunks(ctx, tempTableDir, srcDB, []hash.Hash{addr}, statsCh, nil) 324 } 325 326 // Clone pulls all data from a remote source database to a local destination database. 327 func Clone(ctx context.Context, srcDB, destDB *doltdb.DoltDB, eventCh chan<- pull.TableFileEvent) error { 328 return srcDB.Clone(ctx, destDB, eventCh) 329 } 330 331 // FetchFollowTags fetches all tags from the source DB whose commits have already 332 // been fetched into the destination DB. 333 // todo: potentially too expensive to iterate over all srcDB tags 334 func FetchFollowTags(ctx context.Context, tempTableDir string, srcDB, destDB *doltdb.DoltDB, progStarter ProgStarter, progStopper ProgStopper) error { 335 err := IterResolvedTags(ctx, srcDB, func(tag *doltdb.Tag) (stop bool, err error) { 336 tagHash, err := tag.GetAddr() 337 if err != nil { 338 return true, err 339 } 340 341 has, err := destDB.Has(ctx, tagHash) 342 if err != nil { 343 return true, err 344 } 345 if has { 346 // tag is already fetched 347 return false, nil 348 } 349 350 cmHash, err := tag.Commit.HashOf() 351 if err != nil { 352 return true, err 353 } 354 355 has, err = destDB.Has(ctx, cmHash) 356 if err != nil { 357 return true, err 358 } 359 if has { 360 // We _might_ have it. We need to check if it's a ghost, in which case we'll skip this commit. 361 optCmt, err := destDB.ReadCommit(ctx, cmHash) 362 if err != nil { 363 return true, err 364 } 365 _, ok := optCmt.ToCommit() 366 if !ok { 367 return false, nil 368 } 369 } else { 370 return false, nil 371 } 372 373 newCtx, cancelFunc := context.WithCancel(ctx) 374 wg, statsCh := progStarter(newCtx) 375 err = FetchTag(ctx, tempTableDir, srcDB, destDB, tag, statsCh) 376 progStopper(cancelFunc, wg, statsCh) 377 if err == nil { 378 cli.Println() 379 } else if err == pull.ErrDBUpToDate { 380 err = nil 381 } 382 383 if err != nil { 384 return true, err 385 } 386 387 err = destDB.SetHead(ctx, tag.GetDoltRef(), tagHash) 388 389 return false, err 390 }) 391 392 if err != nil { 393 return err 394 } 395 396 return nil 397 } 398 399 // FetchRemoteBranch fetches and returns the |Commit| corresponding to the remote ref given. Returns an error if the 400 // remote reference doesn't exist or can't be fetched. Blocks until the fetch is complete. 401 func FetchRemoteBranch( 402 ctx context.Context, 403 tempTablesDir string, 404 rem env.Remote, 405 srcDB, destDB *doltdb.DoltDB, 406 srcRef ref.DoltRef, 407 progStarter ProgStarter, 408 progStopper ProgStopper, 409 ) (*doltdb.Commit, error) { 410 evt := events.GetEventFromContext(ctx) 411 412 u, err := earl.Parse(rem.Url) 413 414 if err == nil && evt != nil { 415 if u.Scheme != "" { 416 evt.SetAttribute(eventsapi.AttributeID_REMOTE_URL_SCHEME, u.Scheme) 417 } 418 } 419 420 cs, _ := doltdb.NewCommitSpec(srcRef.String()) 421 optCmt, err := srcDB.Resolve(ctx, cs, nil) 422 if err != nil { 423 return nil, fmt.Errorf("unable to find '%s' on '%s'; %w", srcRef.GetPath(), rem.Name, err) 424 } 425 srcDBCommit, ok := optCmt.ToCommit() 426 if !ok { 427 // This really should never happen. The source db is always expected to have everything. 428 return nil, doltdb.ErrGhostCommitRuntimeFailure 429 } 430 431 // The code is structured this way (different paths for progress chan v. not) so that the linter can understand there 432 // isn't a context leak happening on one path 433 if progStarter != nil && progStopper != nil { 434 newCtx, cancelFunc := context.WithCancel(ctx) 435 wg, statsCh := progStarter(newCtx) 436 defer progStopper(cancelFunc, wg, statsCh) 437 438 err = FetchCommit(ctx, tempTablesDir, srcDB, destDB, srcDBCommit, statsCh) 439 440 if err == pull.ErrDBUpToDate { 441 err = nil 442 } 443 444 if err != nil { 445 return nil, err 446 } 447 448 return srcDBCommit, nil 449 } 450 451 err = FetchCommit(ctx, tempTablesDir, srcDB, destDB, srcDBCommit, nil) 452 453 if err == pull.ErrDBUpToDate { 454 err = nil 455 } 456 457 if err != nil { 458 return nil, err 459 } 460 461 return srcDBCommit, nil 462 } 463 464 // ShallowFetchRefSpec fetches the remote refSpec from the source database to the destination database. Currently it is only 465 // used for shallow clones. 466 func ShallowFetchRefSpec( 467 ctx context.Context, 468 dbData env.DbData, 469 srcDB *doltdb.DoltDB, 470 refSpecs ref.RemoteRefSpec, 471 remote *env.Remote, 472 depth int, 473 ) error { 474 475 if depth < 1 { 476 return fmt.Errorf("invalid depth: %d", depth) 477 } 478 479 return fetchRefSpecsWithDepth(ctx, dbData, srcDB, []ref.RemoteRefSpec{refSpecs}, remote, ref.ForceUpdate, depth, nil, nil) 480 } 481 482 // FetchRefSpecs is the common SQL and CLI entrypoint for fetching branches, tags, and heads from a remote. 483 // This function takes dbData which is a env.DbData object for handling repoState read and write, and srcDB is 484 // a remote *doltdb.DoltDB object that is used to fetch remote branches from. 485 func FetchRefSpecs( 486 ctx context.Context, 487 dbData env.DbData, 488 srcDB *doltdb.DoltDB, 489 refSpecs []ref.RemoteRefSpec, 490 remote *env.Remote, 491 mode ref.UpdateMode, 492 progStarter ProgStarter, 493 progStopper ProgStopper, 494 ) error { 495 return fetchRefSpecsWithDepth(ctx, dbData, srcDB, refSpecs, remote, mode, -1, progStarter, progStopper) 496 } 497 498 func fetchRefSpecsWithDepth( 499 ctx context.Context, 500 dbData env.DbData, 501 srcDB *doltdb.DoltDB, 502 refSpecs []ref.RemoteRefSpec, 503 remote *env.Remote, 504 mode ref.UpdateMode, 505 depth int, 506 progStarter ProgStarter, 507 progStopper ProgStopper, 508 ) error { 509 var branchRefs []doltdb.RefWithHash 510 err := srcDB.VisitRefsOfType(ctx, ref.HeadRefTypes, func(r ref.DoltRef, addr hash.Hash) error { 511 branchRefs = append(branchRefs, doltdb.RefWithHash{Ref: r, Hash: addr}) 512 return nil 513 }) 514 if err != nil { 515 return fmt.Errorf("%w: %s", env.ErrFailedToReadDb, err.Error()) 516 } 517 518 // We build up two structures: 519 // 1) The list of chunk addresses to fetch, representing the remote branch heads. 520 // 2) A mapping from branch HEAD to the remote tracking ref we're going to update. 521 522 var toFetch []hash.Hash 523 var newHeads []doltdb.RefWithHash 524 525 for _, rs := range refSpecs { 526 rsSeen := false 527 528 for _, branchRef := range branchRefs { 529 remoteTrackRef := rs.DestRef(branchRef.Ref) 530 531 if remoteTrackRef != nil { 532 rsSeen = true 533 534 toFetch = append(toFetch, branchRef.Hash) 535 newHeads = append(newHeads, doltdb.RefWithHash{Ref: remoteTrackRef, Hash: branchRef.Hash}) 536 } 537 } 538 if !rsSeen { 539 return fmt.Errorf("%w: '%s'", ref.ErrInvalidRefSpec, rs.GetRemRefToLocal()) 540 } 541 } 542 543 shallowClone := depth > 0 544 skipCmts := hash.NewHashSet() 545 allToFetch := toFetch 546 if shallowClone { 547 skipCmts, err = buildInitialSkipList(ctx, srcDB, toFetch) 548 if err != nil { 549 return err 550 } 551 curToFetch := toFetch 552 var newToFetch []hash.Hash 553 depth-- 554 for skipCmts.Size() > 0 && depth > 0 { 555 newToFetch, skipCmts, err = updateSkipList(ctx, srcDB, curToFetch, skipCmts) 556 if err != nil { 557 return err 558 } 559 560 allToFetch = append(allToFetch, newToFetch...) 561 curToFetch = newToFetch 562 depth-- 563 } 564 } 565 toFetch = allToFetch 566 567 // Now we fetch all the new HEADs we need. 568 tmpDir, err := dbData.Rsw.TempTableFilesDir() 569 if err != nil { 570 return err 571 } 572 573 if skipCmts.Size() > 0 { 574 err = dbData.Ddb.PersistGhostCommits(ctx, skipCmts) 575 if err != nil { 576 return err 577 } 578 } 579 580 err = func() error { 581 newCtx := ctx 582 var statsCh chan pull.Stats 583 584 if progStarter != nil && progStopper != nil { 585 var cancelFunc func() 586 newCtx, cancelFunc = context.WithCancel(ctx) 587 var wg *sync.WaitGroup 588 wg, statsCh = progStarter(newCtx) 589 defer progStopper(cancelFunc, wg, statsCh) 590 } 591 592 err = dbData.Ddb.PullChunks(ctx, tmpDir, srcDB, toFetch, statsCh, skipCmts) 593 if err == pull.ErrDBUpToDate { 594 err = nil 595 } 596 return err 597 }() 598 if err != nil { 599 return err 600 } 601 602 for _, newHead := range newHeads { 603 optCmt, err := dbData.Ddb.ReadCommit(ctx, newHead.Hash) 604 if err != nil { 605 return err 606 } 607 commit, ok := optCmt.ToCommit() 608 if !ok { 609 // Dest DB should have each hash in `newHeads` now. If we can't read a commit, something is wrong. 610 return doltdb.ErrGhostCommitRuntimeFailure 611 } 612 613 remoteTrackRef := newHead.Ref 614 615 if mode.Force { 616 // TODO: can't be used safely in a SQL context 617 err := dbData.Ddb.SetHeadToCommit(ctx, remoteTrackRef, commit) 618 if err != nil { 619 return err 620 } 621 } else { 622 ok, err := dbData.Ddb.CanFastForward(ctx, remoteTrackRef, commit) 623 if err != nil && !errors.Is(err, doltdb.ErrUpToDate) { 624 return fmt.Errorf("%w: %s", ErrCantFF, err.Error()) 625 } 626 if !ok { 627 return ErrCantFF 628 } 629 630 switch err { 631 case doltdb.ErrUpToDate: 632 case doltdb.ErrIsAhead, nil: 633 // TODO: can't be used safely in a SQL context 634 err = dbData.Ddb.FastForward(ctx, remoteTrackRef, commit) 635 if err != nil && !errors.Is(err, doltdb.ErrUpToDate) { 636 return fmt.Errorf("%w: %s", ErrCantFF, err.Error()) 637 } 638 default: 639 return fmt.Errorf("%w: %s", ErrCantFF, err.Error()) 640 } 641 } 642 } 643 644 if mode.Prune { 645 err = pruneBranches(ctx, dbData, *remote, newHeads) 646 if err != nil { 647 return err 648 } 649 } 650 651 if !shallowClone { 652 // TODO: Currently shallow clones don't pull any tags, but they could. We need to make FetchFollowTags wise 653 // to the skipped commits list, and then we can remove this conditional. Also, FetchFollowTags assumes that 654 // progStarter and progStopper are always non-nil, which we don't assume elsewhere. Shallow clone has no 655 // progress reporting, and as a result they are nil. 656 err = FetchFollowTags(ctx, tmpDir, srcDB, dbData.Ddb, progStarter, progStopper) 657 if err != nil { 658 return err 659 } 660 } 661 662 return nil 663 } 664 665 func buildInitialSkipList(ctx context.Context, srcDB *doltdb.DoltDB, toFetch []hash.Hash) (hash.HashSet, error) { 666 if len(toFetch) > 1 { 667 return hash.HashSet{}, fmt.Errorf("runtime error: multiple refspecs not supported in shallow clone") 668 } 669 670 cs, err := doltdb.NewCommitSpec(toFetch[0].String()) 671 if err != nil { 672 return hash.HashSet{}, err 673 } 674 675 allCommits, err := srcDB.BootstrapShallowResolve(ctx, cs) 676 677 return allCommits.AsHashSet(ctx) 678 } 679 680 func updateSkipList(ctx context.Context, srcDB *doltdb.DoltDB, toFetch []hash.Hash, skipCmts hash.HashSet) ([]hash.Hash, hash.HashSet, error) { 681 newSkipList := skipCmts.Copy() 682 newFetchList := []hash.Hash{} 683 for _, h := range toFetch { 684 optCmt, err := srcDB.ReadCommit(ctx, h) 685 if err != nil { 686 return nil, nil, err 687 } 688 689 // srcDB should always be the fully populated, so if there is a ghost commit here, someone is calling this 690 // function incorrectly. 691 commit, ok := optCmt.ToCommit() 692 if !ok { 693 return nil, nil, doltdb.ErrGhostCommitEncountered 694 } 695 696 for i := 0; i < commit.NumParents(); i++ { 697 parent, err := commit.GetParent(ctx, i) 698 if err != nil { 699 return nil, nil, err 700 } 701 if newSkipList.Has(parent.Addr) { 702 newSkipList.Remove(parent.Addr) 703 newFetchList = append(newFetchList, parent.Addr) 704 } 705 } 706 707 } 708 709 return newFetchList, newSkipList, nil 710 } 711 712 func pruneBranches(ctx context.Context, dbData env.DbData, remote env.Remote, remoteRefs []doltdb.RefWithHash) error { 713 remoteRefTypes := map[ref.RefType]struct{}{ 714 ref.RemoteRefType: {}, 715 } 716 717 var localRemoteRefs []ref.RemoteRef 718 err := dbData.Ddb.VisitRefsOfType(ctx, remoteRefTypes, func(r ref.DoltRef, addr hash.Hash) error { 719 rref := r.(ref.RemoteRef) 720 localRemoteRefs = append(localRemoteRefs, rref) 721 return nil 722 }) 723 if err != nil { 724 return err 725 } 726 727 // Delete any local remote ref not present in the remoteRefs, only for this remote 728 for _, localRemoteRef := range localRemoteRefs { 729 if localRemoteRef.GetRemote() != remote.Name { 730 continue 731 } 732 733 found := false 734 for _, remoteRef := range remoteRefs { 735 if remoteRef.Ref == localRemoteRef { 736 found = true 737 break 738 } 739 } 740 741 if !found { 742 // TODO: this isn't thread-safe in a SQL context 743 err = dbData.Ddb.DeleteBranch(ctx, localRemoteRef, nil) 744 if err != nil { 745 return err 746 } 747 } 748 } 749 750 return nil 751 } 752 753 // SyncRoots is going to copy the root hash of the database from srcDb to destDb. 754 // We can do this |Clone| if (1) destDb is empty, (2) destDb and srcDb are both 755 // |TableFileStore|s, and (3) srcDb does *not* have a journal file. The most 756 // common scenario where this occurs is when we are restoring a backup. 757 // 758 // The journal's interaction with TableFileStore is not great currently --- 759 // when accessing a journal file through TableFileStore, the Reader() should in 760 // reality return something which is going to result in reading an actual table 761 // file. For now, we avoid the |Clone| path when the journal file is present. 762 func canSyncRootsWithClone(ctx context.Context, srcDb, destDb *doltdb.DoltDB, destDbRoot hash.Hash) (bool, error) { 763 if !destDbRoot.IsEmpty() { 764 return false, nil 765 } 766 if !srcDb.IsTableFileStore() { 767 return false, nil 768 } 769 if !destDb.IsTableFileStore() { 770 return false, nil 771 } 772 srcHasJournal, err := srcDb.TableFileStoreHasJournal(ctx) 773 if err != nil { 774 return false, err 775 } 776 if srcHasJournal { 777 return false, nil 778 } 779 return true, nil 780 } 781 782 // SyncRoots copies the entire chunkstore from srcDb to destDb and rewrites the remote manifest. Used to 783 // streamline database backup and restores. 784 // TODO: this should read/write a backup lock file specific to the client who created the backup 785 // TODO to prevent "restoring a remote", "cloning a backup", "syncing a remote" and "pushing 786 // TODO a backup." SyncRoots has more destructive potential than push right now. 787 func SyncRoots(ctx context.Context, srcDb, destDb *doltdb.DoltDB, tempTableDir string, progStarter ProgStarter, progStopper ProgStopper) error { 788 srcRoot, err := srcDb.NomsRoot(ctx) 789 if err != nil { 790 return nil 791 } 792 793 destRoot, err := destDb.NomsRoot(ctx) 794 if err != nil { 795 return err 796 } 797 798 if srcRoot == destRoot { 799 return pull.ErrDBUpToDate 800 } 801 802 newCtx, cancelFunc := context.WithCancel(ctx) 803 wg, statsCh := progStarter(newCtx) 804 defer func() { 805 progStopper(cancelFunc, wg, statsCh) 806 if err == nil { 807 cli.Println() 808 } 809 }() 810 811 canClone, err := canSyncRootsWithClone(ctx, srcDb, destDb, destRoot) 812 if err != nil { 813 return err 814 } 815 816 if canClone { 817 tfCh := make(chan pull.TableFileEvent) 818 go func() { 819 start := time.Now() 820 stats := make(map[string]iohelp.ReadStats) 821 for { 822 select { 823 case tfe, ok := <-tfCh: 824 if !ok { 825 return 826 } 827 if tfe.EventType == pull.DownloadStats { 828 stats[tfe.TableFiles[0].FileID()] = tfe.Stats[0] 829 830 totalSentBytes := uint64(0) 831 totalBytes := uint64(0) 832 833 for _, v := range stats { 834 if v.Percent > 0.001 { 835 totalSentBytes += v.Read 836 totalBytes += uint64(float64(v.Read) / v.Percent) 837 } 838 } 839 840 // We fake some of these values. 841 toEmit := pull.Stats{ 842 FinishedSendBytes: totalSentBytes, 843 BufferedSendBytes: totalSentBytes, 844 SendBytesPerSec: float64(totalSentBytes) / (time.Since(start).Seconds()), 845 846 // estimate the number of chunks based on an average chunk size of 4096. 847 TotalSourceChunks: totalBytes / 4096, 848 FetchedSourceChunks: totalSentBytes / 4096, 849 850 FetchedSourceBytes: totalSentBytes, 851 FetchedSourceBytesPerSec: float64(totalSentBytes) / (time.Since(start).Seconds()), 852 } 853 854 // TODO: This looks wrong without a ctx.Done() select, but Puller does not conditionally send here... 855 select { 856 case statsCh <- toEmit: 857 } 858 } 859 } 860 } 861 }() 862 863 err := srcDb.Clone(ctx, destDb, tfCh) 864 close(tfCh) 865 if err == nil { 866 return nil 867 } 868 if !errors.Is(err, pull.ErrCloneUnsupported) { 869 return err 870 } 871 872 // If clone is unsupported, we can fall back to pull. 873 } 874 875 err = destDb.PullChunks(ctx, tempTableDir, srcDb, []hash.Hash{srcRoot}, statsCh, nil) 876 if err != nil { 877 return err 878 } 879 880 var numRetries int 881 var success bool 882 for err == nil && !success && numRetries < 10 { 883 success, err = destDb.CommitRoot(ctx, srcRoot, destRoot) 884 if err == nil && !success { 885 destRoot, err = destDb.NomsRoot(ctx) 886 numRetries += 1 887 } 888 } 889 if err != nil { 890 return err 891 } 892 893 if !success { 894 return errors.New("could not set destination root to the same value as this database's root. the destination database received too many writes while we were pushing and we exhausted our retries.") 895 } 896 897 return nil 898 } 899 900 func HandleInitRemoteStorageClientErr(name, url string, err error) error { 901 var detail = fmt.Sprintf("the remote: %s '%s' could not be accessed", name, url) 902 return fmt.Errorf("%w; %s; %s", ErrFailedToGetRemoteDb, detail, err.Error()) 903 } 904 905 // ParseRemoteBranchName takes remote branch ref name, parses it and returns remote branch name. 906 // For example, it parses the input string 'origin/john/mybranch' and returns remote name 'origin' and branch name 'john/mybranch'. 907 func ParseRemoteBranchName(startPt string) (string, string) { 908 startPt = strings.TrimPrefix(startPt, "remotes/") 909 names := strings.SplitN(startPt, "/", 2) 910 if len(names) < 2 { 911 return "", "" 912 } 913 return names[0], names[1] 914 } 915 916 // GetRemoteBranchRef returns a remote ref with matching name for a branch for each remote. 917 func GetRemoteBranchRef(ctx context.Context, ddb *doltdb.DoltDB, name string) ([]ref.RemoteRef, error) { 918 remoteRefFilter := map[ref.RefType]struct{}{ref.RemoteRefType: {}} 919 refs, err := ddb.GetRefsOfType(ctx, remoteRefFilter) 920 if err != nil { 921 return nil, err 922 } 923 924 var remoteRef []ref.RemoteRef 925 for _, rf := range refs { 926 if remRef, ok := rf.(ref.RemoteRef); ok && remRef.GetBranch() == name { 927 remoteRef = append(remoteRef, remRef) 928 } 929 } 930 931 return remoteRef, nil 932 }