github.com/dolthub/dolt/go@v0.40.5-0.20240520175717-68db7794bea6/libraries/doltcore/doltdb/doltdb.go (about) 1 // Copyright 2019 Dolthub, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package doltdb 16 17 import ( 18 "context" 19 "errors" 20 "fmt" 21 "io" 22 "os" 23 "path/filepath" 24 "strings" 25 "time" 26 27 "github.com/dolthub/go-mysql-server/sql" 28 29 "github.com/dolthub/dolt/go/libraries/doltcore/dbfactory" 30 "github.com/dolthub/dolt/go/libraries/doltcore/ref" 31 "github.com/dolthub/dolt/go/libraries/utils/earl" 32 "github.com/dolthub/dolt/go/libraries/utils/filesys" 33 "github.com/dolthub/dolt/go/store/chunks" 34 "github.com/dolthub/dolt/go/store/datas" 35 "github.com/dolthub/dolt/go/store/datas/pull" 36 "github.com/dolthub/dolt/go/store/hash" 37 "github.com/dolthub/dolt/go/store/nbs" 38 "github.com/dolthub/dolt/go/store/prolly" 39 "github.com/dolthub/dolt/go/store/prolly/tree" 40 "github.com/dolthub/dolt/go/store/types" 41 "github.com/dolthub/dolt/go/store/types/edits" 42 ) 43 44 func init() { 45 types.CreateEditAccForMapEdits = edits.NewAsyncSortedEditsWithDefaults 46 } 47 48 // WORKING and STAGED identifiers refer to the working and staged roots in special circumstances where 49 // we expect to resolve a commit spec, but need working or staged 50 const ( 51 Working = "WORKING" 52 Staged = "STAGED" 53 ) 54 55 const ( 56 CreationBranch = "create" 57 58 defaultChunksPerTF = 256 * 1024 59 ) 60 61 var ErrMissingDoltDataDir = errors.New("missing dolt data directory") 62 63 // LocalDirDoltDB stores the db in the current directory 64 var LocalDirDoltDB = "file://./" + dbfactory.DoltDataDir 65 var LocalDirStatsDB = "file://./" + dbfactory.DoltStatsDir 66 67 // InMemDoltDB stores the DoltDB db in memory and is primarily used for testing 68 var InMemDoltDB = "mem://" 69 70 var ErrNoRootValAtHash = errors.New("there is no dolt root value at that hash") 71 var ErrCannotDeleteLastBranch = errors.New("cannot delete the last branch") 72 73 // DoltDB wraps access to the underlying noms database and hides some of the details of the underlying storage. 74 // Additionally the noms codebase uses panics in a way that is non idiomatic and We've opted to recover and return 75 // errors in many cases. 76 type DoltDB struct { 77 db hooksDatabase 78 vrw types.ValueReadWriter 79 ns tree.NodeStore 80 } 81 82 // DoltDBFromCS creates a DoltDB from a noms chunks.ChunkStore 83 func DoltDBFromCS(cs chunks.ChunkStore) *DoltDB { 84 vrw := types.NewValueStore(cs) 85 ns := tree.NewNodeStore(cs) 86 db := datas.NewTypesDatabase(vrw, ns) 87 88 return &DoltDB{hooksDatabase{Database: db}, vrw, ns} 89 } 90 91 // HackDatasDatabaseFromDoltDB unwraps a DoltDB to a datas.Database. 92 // Deprecated: only for use in dolt migrate. 93 func HackDatasDatabaseFromDoltDB(ddb *DoltDB) datas.Database { 94 return ddb.db 95 } 96 97 // LoadDoltDB will acquire a reference to the underlying noms db. If the Location is InMemDoltDB then a reference 98 // to a newly created in memory database will be used. If the location is LocalDirDoltDB, the directory must exist or 99 // this returns nil. 100 func LoadDoltDB(ctx context.Context, nbf *types.NomsBinFormat, urlStr string, fs filesys.Filesys) (*DoltDB, error) { 101 return LoadDoltDBWithParams(ctx, nbf, urlStr, fs, nil) 102 } 103 104 func LoadDoltDBWithParams(ctx context.Context, nbf *types.NomsBinFormat, urlStr string, fs filesys.Filesys, params map[string]interface{}) (*DoltDB, error) { 105 if urlStr == LocalDirDoltDB { 106 exists, isDir := fs.Exists(dbfactory.DoltDataDir) 107 if !exists { 108 return nil, ErrMissingDoltDataDir 109 } else if !isDir { 110 return nil, errors.New("file exists where the dolt data directory should be") 111 } 112 113 absPath, err := fs.Abs(dbfactory.DoltDataDir) 114 if err != nil { 115 return nil, err 116 } 117 118 urlStr = earl.FileUrlFromPath(filepath.ToSlash(absPath), os.PathSeparator) 119 120 if params == nil { 121 params = make(map[string]any) 122 } 123 params[dbfactory.ChunkJournalParam] = struct{}{} 124 } 125 126 db, vrw, ns, err := dbfactory.CreateDB(ctx, nbf, urlStr, params) 127 if err != nil { 128 return nil, err 129 } 130 return &DoltDB{hooksDatabase{Database: db}, vrw, ns}, nil 131 } 132 133 // NomsRoot returns the hash of the noms dataset map 134 func (ddb *DoltDB) NomsRoot(ctx context.Context) (hash.Hash, error) { 135 return datas.ChunkStoreFromDatabase(ddb.db).Root(ctx) 136 } 137 138 func (ddb *DoltDB) AccessMode() chunks.ExclusiveAccessMode { 139 return datas.ChunkStoreFromDatabase(ddb.db).AccessMode() 140 } 141 142 // CommitRoot executes a chunkStore commit, atomically swapping the root hash of the database manifest 143 func (ddb *DoltDB) CommitRoot(ctx context.Context, current, last hash.Hash) (bool, error) { 144 return datas.ChunkStoreFromDatabase(ddb.db).Commit(ctx, current, last) 145 } 146 147 func (ddb *DoltDB) Has(ctx context.Context, h hash.Hash) (bool, error) { 148 return datas.ChunkStoreFromDatabase(ddb.db).Has(ctx, h) 149 } 150 151 func (ddb *DoltDB) CSMetricsSummary() string { 152 return datas.GetCSStatSummaryForDB(ddb.db) 153 } 154 155 // WriteEmptyRepo will create initialize the given db with a master branch which points to a commit which has valid 156 // metadata for the creation commit, and an empty RootValue. 157 func (ddb *DoltDB) WriteEmptyRepo(ctx context.Context, initBranch, name, email string) error { 158 return ddb.WriteEmptyRepoWithCommitMetaGenerator(ctx, initBranch, datas.MakeCommitMetaGenerator(name, email, datas.CommitterDate())) 159 } 160 161 func (ddb *DoltDB) WriteEmptyRepoWithCommitMetaGenerator(ctx context.Context, initBranch string, commitMeta datas.CommitMetaGenerator) error { 162 return ddb.WriteEmptyRepoWithCommitMetaGeneratorAndDefaultBranch(ctx, commitMeta, ref.NewBranchRef(initBranch)) 163 } 164 165 func (ddb *DoltDB) WriteEmptyRepoWithCommitTimeAndDefaultBranch( 166 ctx context.Context, 167 name, email string, 168 t time.Time, 169 init ref.BranchRef, 170 ) error { 171 return ddb.WriteEmptyRepoWithCommitMetaGeneratorAndDefaultBranch(ctx, datas.MakeCommitMetaGenerator(name, email, t), init) 172 } 173 174 func (ddb *DoltDB) WriteEmptyRepoWithCommitMetaGeneratorAndDefaultBranch( 175 ctx context.Context, 176 commitMetaGenerator datas.CommitMetaGenerator, 177 init ref.BranchRef, 178 ) error { 179 ds, err := ddb.db.GetDataset(ctx, CreationBranch) 180 181 if err != nil { 182 return err 183 } 184 185 if ds.HasHead() { 186 return errors.New("database already exists") 187 } 188 189 rv, err := EmptyRootValue(ctx, ddb.vrw, ddb.ns) 190 191 if err != nil { 192 return err 193 } 194 195 rv, _, err = ddb.WriteRootValue(ctx, rv) 196 197 if err != nil { 198 return err 199 } 200 201 var firstCommit *datas.Commit 202 for { 203 cm, err := commitMetaGenerator.Next() 204 if err != nil { 205 return err 206 } 207 208 commitOpts := datas.CommitOptions{Meta: cm} 209 210 cb := ref.NewInternalRef(CreationBranch) 211 ds, err = ddb.db.GetDataset(ctx, cb.String()) 212 if err != nil { 213 return err 214 } 215 216 firstCommit, err = ddb.db.BuildNewCommit(ctx, ds, rv.NomsValue(), commitOpts) 217 if err != nil { 218 return err 219 } 220 221 if commitMetaGenerator.IsGoodCommit(firstCommit) { 222 break 223 } 224 } 225 226 firstCommitDs, err := ddb.db.WriteCommit(ctx, ds, firstCommit) 227 228 if err != nil { 229 return err 230 } 231 232 ds, err = ddb.db.GetDataset(ctx, init.String()) 233 234 if err != nil { 235 return err 236 } 237 238 headAddr, ok := firstCommitDs.MaybeHeadAddr() 239 if !ok { 240 return errors.New("commit without head") 241 } 242 243 _, err = ddb.db.SetHead(ctx, ds, headAddr, "") 244 return err 245 } 246 247 func (ddb *DoltDB) Close() error { 248 return ddb.db.Close() 249 } 250 251 // GetHashForRefStr resolves a ref string (such as a branch name or tag) and resolves it to a hash.Hash. 252 func (ddb *DoltDB) GetHashForRefStr(ctx context.Context, ref string) (*hash.Hash, error) { 253 if err := datas.ValidateDatasetId(ref); err != nil { 254 return nil, fmt.Errorf("invalid ref format: %s", ref) 255 } 256 257 ds, err := ddb.db.GetDataset(ctx, ref) 258 259 if err != nil { 260 return nil, err 261 } 262 263 return hashOfCommit(ds, ref) 264 } 265 266 func (ddb *DoltDB) GetHashForRefStrByNomsRoot(ctx context.Context, ref string, nomsRoot hash.Hash) (*hash.Hash, error) { 267 if err := datas.ValidateDatasetId(ref); err != nil { 268 return nil, fmt.Errorf("invalid ref format: %s", ref) 269 } 270 271 ds, err := ddb.db.GetDatasetByRootHash(ctx, ref, nomsRoot) 272 if err != nil { 273 return nil, err 274 } 275 276 return hashOfCommit(ds, ref) 277 } 278 279 // hashOfCommit returns the hash of the commit at the head of the dataset provided 280 func hashOfCommit(ds datas.Dataset, ref string) (*hash.Hash, error) { 281 if !ds.HasHead() { 282 return nil, ErrBranchNotFound 283 } 284 285 if ds.IsTag() { 286 _, commitHash, err := ds.HeadTag() 287 if err != nil { 288 return nil, err 289 } 290 return &commitHash, nil 291 } else { 292 commitHash, ok := ds.MaybeHeadAddr() 293 if !ok { 294 return nil, fmt.Errorf("Unable to load head for %s", ref) 295 } 296 return &commitHash, nil 297 } 298 } 299 300 func getCommitValForRefStr(ctx context.Context, ddb *DoltDB, ref string) (*datas.Commit, error) { 301 commitHash, err := ddb.GetHashForRefStr(ctx, ref) 302 303 if err != nil { 304 return nil, err 305 } 306 307 return datas.LoadCommitAddr(ctx, ddb.vrw, *commitHash) 308 } 309 310 func getCommitValForRefStrByNomsRoot(ctx context.Context, ddb *DoltDB, ref string, nomsRoot hash.Hash) (*datas.Commit, error) { 311 commitHash, err := ddb.GetHashForRefStrByNomsRoot(ctx, ref, nomsRoot) 312 313 if err != nil { 314 return nil, err 315 } 316 317 return datas.LoadCommitAddr(ctx, ddb.vrw, *commitHash) 318 } 319 320 // Roots is a convenience struct to package up the three roots that most library functions will need to inspect and 321 // modify the working set. This struct is designed to be passed by value always: functions should take a Roots as a 322 // param and return a modified one. 323 // 324 // It contains three root values: 325 // Head: The root of the head of the current working branch 326 // Working: The root of the current working set 327 // Staged: The root of the staged value 328 // 329 // See doltEnvironment.Roots(context.Context) 330 type Roots struct { 331 Head RootValue 332 Working RootValue 333 Staged RootValue 334 } 335 336 func (ddb *DoltDB) getHashFromCommitSpec(ctx context.Context, cs *CommitSpec, cwb ref.DoltRef, nomsRoot hash.Hash) (*hash.Hash, error) { 337 switch cs.csType { 338 case hashCommitSpec: 339 parsedHash, ok := hash.MaybeParse(cs.baseSpec) 340 if !ok { 341 return nil, errors.New("invalid hash: " + cs.baseSpec) 342 } 343 return &parsedHash, nil 344 case refCommitSpec: 345 // For a ref in a CommitSpec, we have the following behavior. 346 // If it starts with `refs/`, we look for an exact match before 347 // we try any suffix matches. After that, we try a match on the 348 // user supplied input, with the following four prefixes, in 349 // order: `refs/`, `refs/heads/`, `refs/tags/`, `refs/remotes/`. 350 candidates := []string{ 351 "refs/" + cs.baseSpec, 352 "refs/heads/" + cs.baseSpec, 353 "refs/tags/" + cs.baseSpec, 354 "refs/remotes/" + cs.baseSpec, 355 } 356 if strings.HasPrefix(cs.baseSpec, "refs/") { 357 candidates = []string{ 358 cs.baseSpec, 359 "refs/" + cs.baseSpec, 360 "refs/heads/" + cs.baseSpec, 361 "refs/tags/" + cs.baseSpec, 362 "refs/remotes/" + cs.baseSpec, 363 } 364 } 365 for _, candidate := range candidates { 366 var valueHash *hash.Hash 367 var err error 368 if nomsRoot.IsEmpty() { 369 valueHash, err = ddb.GetHashForRefStr(ctx, candidate) 370 } else { 371 valueHash, err = ddb.GetHashForRefStrByNomsRoot(ctx, candidate, nomsRoot) 372 } 373 if err == nil { 374 return valueHash, nil 375 } 376 if err != ErrBranchNotFound { 377 return nil, err 378 } 379 } 380 return nil, fmt.Errorf("%w: %s", ErrBranchNotFound, cs.baseSpec) 381 case headCommitSpec: 382 if cwb == nil { 383 return nil, fmt.Errorf("cannot use a nil current working branch with a HEAD commit spec") 384 } 385 if nomsRoot.IsEmpty() { 386 return ddb.GetHashForRefStr(ctx, cwb.String()) 387 } else { 388 return ddb.GetHashForRefStrByNomsRoot(ctx, cwb.String(), nomsRoot) 389 } 390 default: 391 panic("unrecognized commit spec csType: " + cs.csType) 392 } 393 } 394 395 // Resolve takes a CommitSpec and returns a Commit, or an error if the commit cannot be found. 396 // If the CommitSpec is HEAD, Resolve also needs the DoltRef of the current working branch. 397 func (ddb *DoltDB) Resolve(ctx context.Context, cs *CommitSpec, cwb ref.DoltRef) (*OptionalCommit, error) { 398 if cs == nil { 399 panic("nil commit spec") 400 } 401 402 hash, err := ddb.getHashFromCommitSpec(ctx, cs, cwb, hash.Hash{}) 403 if err != nil { 404 return nil, err 405 } 406 407 commitValue, err := datas.LoadCommitAddr(ctx, ddb.vrw, *hash) 408 if err != nil { 409 return nil, err 410 } 411 412 if commitValue.IsGhost() { 413 return &OptionalCommit{nil, *hash}, nil 414 } 415 416 commit, err := NewCommit(ctx, ddb.vrw, ddb.ns, commitValue) 417 if err != nil { 418 return nil, err 419 } 420 421 return commit.GetAncestor(ctx, cs.aSpec) 422 } 423 424 // BootstrapShallowResolve is a special case of Resolve that is used to resolve a commit prior to pulling it's history 425 // in a shallow clone. In general, application code should call Resolve and get an OptionalCommit. This is a special case 426 // where we need to get the head commit for the commit closure used to determine what commits should skipped. 427 func (ddb *DoltDB) BootstrapShallowResolve(ctx context.Context, cs *CommitSpec) (prolly.CommitClosure, error) { 428 if cs == nil { 429 panic("nil commit spec") 430 } 431 432 hash, err := ddb.getHashFromCommitSpec(ctx, cs, nil, hash.Hash{}) 433 if err != nil { 434 return prolly.CommitClosure{}, err 435 } 436 437 commitValue, err := datas.LoadCommitAddr(ctx, ddb.vrw, *hash) 438 if err != nil { 439 return prolly.CommitClosure{}, err 440 } 441 442 if commitValue.IsGhost() { 443 return prolly.CommitClosure{}, ErrGhostCommitEncountered 444 } 445 446 return getCommitClosure(ctx, commitValue, ddb.vrw, ddb.ns) 447 } 448 449 func (ddb *DoltDB) ResolveByNomsRoot(ctx *sql.Context, cs *CommitSpec, cwb ref.DoltRef, root hash.Hash) (*OptionalCommit, error) { 450 if cs == nil { 451 panic("nil commit spec") 452 } 453 454 hash, err := ddb.getHashFromCommitSpec(ctx, cs, cwb, root) 455 if err != nil { 456 return nil, err 457 } 458 459 commitValue, err := datas.LoadCommitAddr(ctx, ddb.vrw, *hash) 460 if err != nil { 461 return nil, err 462 } 463 464 if commitValue.IsGhost() { 465 return &OptionalCommit{nil, *hash}, nil 466 } 467 468 commit, err := NewCommit(ctx, ddb.vrw, ddb.ns, commitValue) 469 if err != nil { 470 return nil, err 471 } 472 return commit.GetAncestor(ctx, cs.aSpec) 473 } 474 475 // ResolveCommitRef takes a DoltRef and returns a Commit, or an error if the commit cannot be found. The ref given must 476 // point to a Commit. 477 func (ddb *DoltDB) ResolveCommitRef(ctx context.Context, ref ref.DoltRef) (*Commit, error) { 478 commitVal, err := getCommitValForRefStr(ctx, ddb, ref.String()) 479 if err != nil { 480 return nil, err 481 } 482 483 if commitVal.IsGhost() { 484 return nil, ErrGhostCommitEncountered 485 } 486 487 return NewCommit(ctx, ddb.vrw, ddb.ns, commitVal) 488 } 489 490 // ResolveCommitRefAtRoot takes a DoltRef and returns a Commit, or an error if the commit cannot be found. The ref given must 491 // point to a Commit. 492 func (ddb *DoltDB) ResolveCommitRefAtRoot(ctx context.Context, ref ref.DoltRef, nomsRoot hash.Hash) (*Commit, error) { 493 commitVal, err := getCommitValForRefStrByNomsRoot(ctx, ddb, ref.String(), nomsRoot) 494 if err != nil { 495 return nil, err 496 } 497 498 if commitVal.IsGhost() { 499 return nil, ErrGhostCommitEncountered 500 } 501 502 return NewCommit(ctx, ddb.vrw, ddb.ns, commitVal) 503 } 504 505 // ResolveBranchRoots returns the Roots for the branch given 506 func (ddb *DoltDB) ResolveBranchRoots(ctx context.Context, branch ref.BranchRef) (Roots, error) { 507 commitRef, err := ddb.ResolveCommitRef(ctx, branch) 508 if err != nil { 509 return Roots{}, err 510 } 511 512 headRoot, err := commitRef.GetRootValue(ctx) 513 if err != nil { 514 return Roots{}, err 515 } 516 517 wsRef, err := ref.WorkingSetRefForHead(branch) 518 if err != nil { 519 return Roots{}, err 520 } 521 522 ws, err := ddb.ResolveWorkingSet(ctx, wsRef) 523 if err != nil { 524 return Roots{}, err 525 } 526 527 return Roots{ 528 Head: headRoot, 529 Working: ws.WorkingRoot(), 530 Staged: ws.StagedRoot(), 531 }, nil 532 } 533 534 // ResolveTag takes a TagRef and returns the corresponding Tag object. 535 func (ddb *DoltDB) ResolveTag(ctx context.Context, tagRef ref.TagRef) (*Tag, error) { 536 ds, err := ddb.db.GetDataset(ctx, tagRef.String()) 537 if err != nil { 538 return nil, ErrTagNotFound 539 } 540 541 if !ds.HasHead() { 542 return nil, ErrTagNotFound 543 } 544 545 if !ds.IsTag() { 546 return nil, fmt.Errorf("tagRef head is not a tag") 547 } 548 549 return NewTag(ctx, tagRef.GetPath(), ds, ddb.vrw, ddb.ns) 550 } 551 552 // ResolveWorkingSet takes a WorkingSetRef and returns the corresponding WorkingSet object. 553 func (ddb *DoltDB) ResolveWorkingSet(ctx context.Context, workingSetRef ref.WorkingSetRef) (*WorkingSet, error) { 554 ds, err := ddb.db.GetDataset(ctx, workingSetRef.String()) 555 556 if err != nil { 557 return nil, ErrWorkingSetNotFound 558 } 559 560 return ddb.workingSetFromDataset(ctx, workingSetRef, ds) 561 } 562 563 // ResolveWorkingSetAtRoot returns the working set object as it existed at the given root hash. 564 func (ddb *DoltDB) ResolveWorkingSetAtRoot(ctx context.Context, workingSetRef ref.WorkingSetRef, nomsRoot hash.Hash) (*WorkingSet, error) { 565 ds, err := ddb.db.GetDatasetByRootHash(ctx, workingSetRef.String(), nomsRoot) 566 567 if err != nil { 568 return nil, ErrWorkingSetNotFound 569 } 570 571 return ddb.workingSetFromDataset(ctx, workingSetRef, ds) 572 } 573 574 func (ddb *DoltDB) workingSetFromDataset(ctx context.Context, workingSetRef ref.WorkingSetRef, ds datas.Dataset) (*WorkingSet, error) { 575 if !ds.HasHead() { 576 return nil, ErrWorkingSetNotFound 577 } 578 579 if !ds.IsWorkingSet() { 580 return nil, fmt.Errorf("workingSetRef head is not a workingSetRef") 581 } 582 583 return newWorkingSet(ctx, workingSetRef.GetPath(), ddb.vrw, ddb.ns, ds) 584 } 585 586 // TODO: convenience method to resolve the head commit of a branch. 587 588 // WriteRootValue will write a doltdb.RootValue instance to the database. This 589 // value will not be associated with a commit and can be committed by hash at a 590 // later time. Returns an updated root value and the hash of the value 591 // written. This method is the primary place in doltcore that handles setting 592 // the FeatureVersion of root values to the current value, so all writes of 593 // RootValues should happen here. 594 func (ddb *DoltDB) WriteRootValue(ctx context.Context, rv RootValue) (RootValue, hash.Hash, error) { 595 nrv, ref, err := ddb.writeRootValue(ctx, rv) 596 if err != nil { 597 return nil, hash.Hash{}, err 598 } 599 return nrv, ref.TargetHash(), nil 600 } 601 602 func (ddb *DoltDB) writeRootValue(ctx context.Context, rv RootValue) (RootValue, types.Ref, error) { 603 rv, err := rv.SetFeatureVersion(DoltFeatureVersion) 604 if err != nil { 605 return nil, types.Ref{}, err 606 } 607 ref, err := ddb.vrw.WriteValue(ctx, rv.NomsValue()) 608 if err != nil { 609 return nil, types.Ref{}, err 610 } 611 return rv, ref, nil 612 } 613 614 // ReadRootValue reads the RootValue associated with the hash given and returns it. Returns an error if the value cannot 615 // be read, or if the hash given doesn't represent a dolt RootValue. 616 func (ddb *DoltDB) ReadRootValue(ctx context.Context, h hash.Hash) (RootValue, error) { 617 val, err := ddb.vrw.ReadValue(ctx, h) 618 if err != nil { 619 return nil, err 620 } 621 return decodeRootNomsValue(ctx, ddb.vrw, ddb.ns, val) 622 } 623 624 // ReadCommit reads the Commit whose hash is |h|, if one exists. 625 func (ddb *DoltDB) ReadCommit(ctx context.Context, h hash.Hash) (*OptionalCommit, error) { 626 c, err := datas.LoadCommitAddr(ctx, ddb.vrw, h) 627 if err != nil { 628 return nil, err 629 } 630 631 if c.IsGhost() { 632 return &OptionalCommit{nil, h}, nil 633 } 634 635 newC, err := NewCommit(ctx, ddb.vrw, ddb.ns, c) 636 if err != nil { 637 return nil, err 638 } 639 return &OptionalCommit{newC, h}, nil 640 } 641 642 // Commit will update a branch's head value to be that of a previously committed root value hash 643 func (ddb *DoltDB) Commit(ctx context.Context, valHash hash.Hash, dref ref.DoltRef, cm *datas.CommitMeta) (*Commit, error) { 644 return ddb.CommitWithParentSpecs(ctx, valHash, dref, nil, cm) 645 } 646 647 // FastForwardWithWorkspaceCheck will perform a fast forward update of the branch given to the commit given, but only 648 // if the working set is in sync with the head of the branch given. This is used in the course of pushing to a remote. 649 // If the target doesn't currently have the working set ref, then no working set change will be made. 650 func (ddb *DoltDB) FastForwardWithWorkspaceCheck(ctx context.Context, branch ref.DoltRef, commit *Commit) error { 651 ds, err := ddb.db.GetDataset(ctx, branch.String()) 652 if err != nil { 653 return err 654 } 655 656 addr, err := commit.HashOf() 657 if err != nil { 658 return err 659 } 660 661 ws := "" 662 pushConcurrencyControl := chunks.GetPushConcurrencyControl(datas.ChunkStoreFromDatabase(ddb.db)) 663 if pushConcurrencyControl == chunks.PushConcurrencyControl_AssertWorkingSet { 664 wsRef, err := ref.WorkingSetRefForHead(branch) 665 if err != nil { 666 return err 667 } 668 ws = wsRef.String() 669 } 670 671 _, err = ddb.db.FastForward(ctx, ds, addr, ws) 672 673 return err 674 } 675 676 // FastForward fast-forwards the branch given to the commit given. 677 func (ddb *DoltDB) FastForward(ctx context.Context, branch ref.DoltRef, commit *Commit) error { 678 addr, err := commit.HashOf() 679 if err != nil { 680 return err 681 } 682 683 return ddb.FastForwardToHash(ctx, branch, addr) 684 } 685 686 // FastForwardToHash fast-forwards the branch given to the commit hash given. 687 func (ddb *DoltDB) FastForwardToHash(ctx context.Context, branch ref.DoltRef, hash hash.Hash) error { 688 ds, err := ddb.db.GetDataset(ctx, branch.String()) 689 if err != nil { 690 return err 691 } 692 693 _, err = ddb.db.FastForward(ctx, ds, hash, "") 694 695 return err 696 } 697 698 // CanFastForward returns whether the given branch can be fast-forwarded to the commit given. 699 func (ddb *DoltDB) CanFastForward(ctx context.Context, branch ref.DoltRef, new *Commit) (bool, error) { 700 current, err := ddb.ResolveCommitRef(ctx, branch) 701 702 if err != nil { 703 if err == ErrBranchNotFound { 704 return true, nil 705 } 706 707 return false, err 708 } 709 710 return current.CanFastForwardTo(ctx, new) 711 } 712 713 // SetHeadToCommit sets the given ref to point at the given commit. It is used in the course of 'force' updates. 714 func (ddb *DoltDB) SetHeadToCommit(ctx context.Context, ref ref.DoltRef, cm *Commit) error { 715 addr, err := cm.HashOf() 716 if err != nil { 717 return err 718 } 719 720 return ddb.SetHead(ctx, ref, addr) 721 } 722 723 // SetHeadAndWorkingSetToCommit sets the given ref to the given commit, and ensures that working is in sync 724 // with the head. Used for 'force' pushes. 725 func (ddb *DoltDB) SetHeadAndWorkingSetToCommit(ctx context.Context, rf ref.DoltRef, cm *Commit) error { 726 addr, err := cm.HashOf() 727 if err != nil { 728 return err 729 } 730 731 wsRef, err := ref.WorkingSetRefForHead(rf) 732 if err != nil { 733 return err 734 } 735 736 ds, err := ddb.db.GetDataset(ctx, rf.String()) 737 if err != nil { 738 return err 739 } 740 741 _, err = ddb.db.SetHead(ctx, ds, addr, wsRef.String()) 742 return err 743 } 744 745 func (ddb *DoltDB) SetHead(ctx context.Context, ref ref.DoltRef, addr hash.Hash) error { 746 ds, err := ddb.db.GetDataset(ctx, ref.String()) 747 748 if err != nil { 749 return err 750 } 751 752 _, err = ddb.db.SetHead(ctx, ds, addr, "") 753 return err 754 } 755 756 // CommitWithParentSpecs commits the value hash given to the branch given, using the list of parent hashes given. Returns an 757 // error if the value or any parents can't be resolved, or if anything goes wrong accessing the underlying storage. 758 func (ddb *DoltDB) CommitWithParentSpecs(ctx context.Context, valHash hash.Hash, dref ref.DoltRef, parentCmSpecs []*CommitSpec, cm *datas.CommitMeta) (*Commit, error) { 759 var parentCommits []*Commit 760 for _, parentCmSpec := range parentCmSpecs { 761 cm, err := ddb.Resolve(ctx, parentCmSpec, nil) 762 if err != nil { 763 return nil, err 764 } 765 766 hardCommit, ok := cm.ToCommit() 767 if !ok { 768 return nil, ErrGhostCommitEncountered 769 } 770 771 parentCommits = append(parentCommits, hardCommit) 772 } 773 return ddb.CommitWithParentCommits(ctx, valHash, dref, parentCommits, cm) 774 } 775 776 func (ddb *DoltDB) CommitWithParentCommits(ctx context.Context, valHash hash.Hash, dref ref.DoltRef, parentCommits []*Commit, cm *datas.CommitMeta) (*Commit, error) { 777 val, err := ddb.vrw.ReadValue(ctx, valHash) 778 779 if err != nil { 780 return nil, err 781 } 782 783 if !isRootValue(ddb.vrw.Format(), val) { 784 return nil, errors.New("can't commit a value that is not a valid root value") 785 } 786 787 ds, err := ddb.db.GetDataset(ctx, dref.String()) 788 789 if err != nil { 790 return nil, err 791 } 792 793 var parents []hash.Hash 794 headAddr, hasHead := ds.MaybeHeadAddr() 795 if err != nil { 796 return nil, err 797 } 798 if hasHead { 799 parents = append(parents, headAddr) 800 } 801 802 for _, cm := range parentCommits { 803 addr, err := cm.HashOf() 804 if err != nil { 805 return nil, err 806 } 807 if addr != headAddr { 808 parents = append(parents, addr) 809 } 810 } 811 commitOpts := datas.CommitOptions{Parents: parents, Meta: cm} 812 813 return ddb.CommitValue(ctx, dref, val, commitOpts) 814 } 815 816 func (ddb *DoltDB) CommitValue(ctx context.Context, dref ref.DoltRef, val types.Value, commitOpts datas.CommitOptions) (*Commit, error) { 817 ds, err := ddb.db.GetDataset(ctx, dref.String()) 818 if err != nil { 819 return nil, err 820 } 821 822 ds, err = ddb.db.Commit(ctx, ds, val, commitOpts) 823 if err != nil { 824 return nil, err 825 } 826 827 r, ok, err := ds.MaybeHeadRef() 828 if err != nil { 829 return nil, err 830 } 831 if !ok { 832 return nil, errors.New("Commit has no head but commit succeeded. This is a bug.") 833 } 834 835 dc, err := datas.LoadCommitRef(ctx, ddb.vrw, r) 836 if err != nil { 837 return nil, err 838 } 839 840 if dc.IsGhost() { 841 return nil, ErrGhostCommitEncountered 842 } 843 844 return NewCommit(ctx, ddb.vrw, ddb.ns, dc) 845 } 846 847 // dangling commits are unreferenced by any branch or ref. They are created in the course of programmatic updates 848 // such as rebase. You must create a ref to a dangling commit for it to be reachable 849 func (ddb *DoltDB) CommitDanglingWithParentCommits(ctx context.Context, valHash hash.Hash, parentCommits []*Commit, cm *datas.CommitMeta) (*Commit, error) { 850 val, err := ddb.vrw.ReadValue(ctx, valHash) 851 if err != nil { 852 return nil, err 853 } 854 if !isRootValue(ddb.vrw.Format(), val) { 855 return nil, errors.New("can't commit a value that is not a valid root value") 856 } 857 858 var parents []hash.Hash 859 for _, cm := range parentCommits { 860 addr, err := cm.HashOf() 861 if err != nil { 862 return nil, err 863 } 864 parents = append(parents, addr) 865 } 866 commitOpts := datas.CommitOptions{Parents: parents, Meta: cm} 867 868 return ddb.CommitDangling(ctx, val, commitOpts) 869 } 870 871 // CommitDangling creates a new Commit for |val| that is not referenced by any DoltRef. 872 func (ddb *DoltDB) CommitDangling(ctx context.Context, val types.Value, opts datas.CommitOptions) (*Commit, error) { 873 cs := datas.ChunkStoreFromDatabase(ddb.db) 874 875 dcommit, err := datas.NewCommitForValue(ctx, cs, ddb.vrw, ddb.ns, val, opts) 876 if err != nil { 877 return nil, err 878 } 879 880 _, err = ddb.vrw.WriteValue(ctx, dcommit.NomsValue()) 881 if err != nil { 882 return nil, err 883 } 884 885 return NewCommit(ctx, ddb.vrw, ddb.ns, dcommit) 886 } 887 888 // ValueReadWriter returns the underlying noms database as a types.ValueReadWriter. 889 func (ddb *DoltDB) ValueReadWriter() types.ValueReadWriter { 890 return ddb.vrw 891 } 892 893 func (ddb *DoltDB) NodeStore() tree.NodeStore { 894 return ddb.ns 895 } 896 897 func (ddb *DoltDB) Format() *types.NomsBinFormat { 898 return ddb.vrw.Format() 899 } 900 901 // ResolveParent returns the n-th ancestor of a given commit (direct parent is index 0). error return value will be 902 // non-nil in the case that the commit cannot be resolved, there aren't as many ancestors as requested, or the 903 // underlying storage cannot be accessed. 904 func (ddb *DoltDB) ResolveParent(ctx context.Context, commit *Commit, parentIdx int) (*OptionalCommit, error) { 905 return commit.GetParent(ctx, parentIdx) 906 } 907 908 func (ddb *DoltDB) ResolveAllParents(ctx context.Context, commit *Commit) ([]*OptionalCommit, error) { 909 num := commit.NumParents() 910 resolved := make([]*OptionalCommit, num) 911 for i := 0; i < num; i++ { 912 parent, err := ddb.ResolveParent(ctx, commit, i) 913 if err != nil { 914 return nil, err 915 } 916 resolved[i] = parent 917 } 918 return resolved, nil 919 } 920 921 // HasRef returns whether the branch given exists in this database. 922 func (ddb *DoltDB) HasRef(ctx context.Context, doltRef ref.DoltRef) (bool, error) { 923 ds, err := ddb.db.GetDataset(ctx, doltRef.String()) 924 if err != nil { 925 if errors.Is(err, datas.ErrInvalidDatasetID) { 926 return false, nil 927 } 928 return false, err 929 } 930 return ds.HasHead(), nil 931 } 932 933 var branchRefFilter = map[ref.RefType]struct{}{ref.BranchRefType: {}} 934 935 // GetBranches returns a list of all branches in the database. 936 func (ddb *DoltDB) GetBranches(ctx context.Context) ([]ref.DoltRef, error) { 937 return ddb.GetRefsOfType(ctx, branchRefFilter) 938 } 939 940 // GetBranches returns a list of all branches in the database. 941 func (ddb *DoltDB) GetBranchesByNomsRoot(ctx context.Context, nomsRoot hash.Hash) ([]ref.DoltRef, error) { 942 return ddb.GetRefsOfTypeByNomsRoot(ctx, branchRefFilter, nomsRoot) 943 } 944 945 // HasBranch returns whether the DB has a branch with the name given, case-insensitive. Returns the case-sensitive 946 // matching branch if found, as well as a bool indicating if there was a case-insensitive match, and any error. 947 func (ddb *DoltDB) HasBranch(ctx context.Context, branchName string) (string, bool, error) { 948 branchName = strings.ToLower(branchName) 949 branches, err := ddb.GetRefsOfType(ctx, branchRefFilter) 950 if err != nil { 951 return "", false, err 952 } 953 954 for _, b := range branches { 955 if strings.ToLower(b.GetPath()) == branchName { 956 return b.GetPath(), true, nil 957 } 958 } 959 960 return "", false, nil 961 } 962 963 // HasRemoteTrackingBranch returns whether the DB has a remote tracking branch with the name given, case-insensitive. 964 // Returns the case-sensitive matching branch if found, as well as a bool indicating if there was a case-insensitive match, 965 // remote tracking branchRef that is the only match for the branchName and any error. 966 func (ddb *DoltDB) HasRemoteTrackingBranch(ctx context.Context, branchName string) (string, bool, ref.RemoteRef, error) { 967 remoteRefFound := false 968 var remoteRef ref.RemoteRef 969 970 remoteRefs, err := ddb.GetRemoteRefs(ctx) 971 if err != nil { 972 return "", false, ref.RemoteRef{}, err 973 } 974 975 for _, rf := range remoteRefs { 976 if remRef, ok := rf.(ref.RemoteRef); ok && remRef.GetBranch() == branchName { 977 if remoteRefFound { 978 // if there are multiple remotes with matching branch names with defined branch name, it errors 979 return "", false, ref.RemoteRef{}, fmt.Errorf("'%s' matched multiple remote tracking branches", branchName) 980 } 981 remoteRefFound = true 982 remoteRef = remRef 983 } 984 } 985 986 if remoteRefFound { 987 return branchName, true, remoteRef, nil 988 } 989 990 return "", false, ref.RemoteRef{}, nil 991 } 992 993 type RefWithHash struct { 994 Ref ref.DoltRef 995 Hash hash.Hash 996 } 997 998 // GetBranchesWithHashes returns all the branches in the database with their hashes 999 func (ddb *DoltDB) GetBranchesWithHashes(ctx context.Context) ([]RefWithHash, error) { 1000 var refs []RefWithHash 1001 err := ddb.VisitRefsOfType(ctx, branchRefFilter, func(r ref.DoltRef, addr hash.Hash) error { 1002 refs = append(refs, RefWithHash{r, addr}) 1003 return nil 1004 }) 1005 return refs, err 1006 } 1007 1008 var allRefsFilter = map[ref.RefType]struct{}{ 1009 ref.BranchRefType: {}, 1010 ref.TagRefType: {}, 1011 ref.WorkspaceRefType: {}, 1012 } 1013 1014 // GetRefsWithHashes returns the list of all commit refs in the database: tags, branches, and workspaces. 1015 func (ddb *DoltDB) GetRefsWithHashes(ctx context.Context) ([]RefWithHash, error) { 1016 var refs []RefWithHash 1017 err := ddb.VisitRefsOfType(ctx, allRefsFilter, func(r ref.DoltRef, addr hash.Hash) error { 1018 refs = append(refs, RefWithHash{r, addr}) 1019 return nil 1020 }) 1021 return refs, err 1022 } 1023 1024 var tagsRefFilter = map[ref.RefType]struct{}{ref.TagRefType: {}} 1025 1026 // GetTags returns a list of all tags in the database. 1027 func (ddb *DoltDB) GetTags(ctx context.Context) ([]ref.DoltRef, error) { 1028 return ddb.GetRefsOfType(ctx, tagsRefFilter) 1029 } 1030 1031 // HasTag returns whether the DB has a tag with the name given 1032 func (ddb *DoltDB) HasTag(ctx context.Context, tagName string) (bool, error) { 1033 tags, err := ddb.GetRefsOfType(ctx, tagsRefFilter) 1034 if err != nil { 1035 return false, err 1036 } 1037 1038 for _, t := range tags { 1039 if t.GetPath() == tagName { 1040 return true, nil 1041 } 1042 } 1043 1044 return false, nil 1045 } 1046 1047 type TagWithHash struct { 1048 Tag *Tag 1049 Hash hash.Hash 1050 } 1051 1052 // GetTagsWithHashes returns a list of objects containing Tags with their associated Commit's hashes 1053 func (ddb *DoltDB) GetTagsWithHashes(ctx context.Context) ([]TagWithHash, error) { 1054 var refs []TagWithHash 1055 err := ddb.VisitRefsOfType(ctx, tagsRefFilter, func(r ref.DoltRef, _ hash.Hash) error { 1056 if tr, ok := r.(ref.TagRef); ok { 1057 tag, err := ddb.ResolveTag(ctx, tr) 1058 if err != nil { 1059 return err 1060 } 1061 h, err := tag.Commit.HashOf() 1062 if err != nil { 1063 return err 1064 } 1065 refs = append(refs, TagWithHash{tag, h}) 1066 } 1067 return nil 1068 }) 1069 return refs, err 1070 } 1071 1072 var workspacesRefFilter = map[ref.RefType]struct{}{ref.WorkspaceRefType: {}} 1073 1074 // GetWorkspaces returns a list of all workspaces in the database. 1075 func (ddb *DoltDB) GetWorkspaces(ctx context.Context) ([]ref.DoltRef, error) { 1076 return ddb.GetRefsOfType(ctx, workspacesRefFilter) 1077 } 1078 1079 var remotesRefFilter = map[ref.RefType]struct{}{ref.RemoteRefType: {}} 1080 1081 // GetRemoteRefs returns a list of all remotes in the database. 1082 func (ddb *DoltDB) GetRemoteRefs(ctx context.Context) ([]ref.DoltRef, error) { 1083 return ddb.GetRefsOfType(ctx, remotesRefFilter) 1084 } 1085 1086 type RemoteWithHash struct { 1087 Ref ref.DoltRef 1088 Hash hash.Hash 1089 } 1090 1091 func (ddb *DoltDB) GetRemotesWithHashes(ctx context.Context) ([]RemoteWithHash, error) { 1092 var refs []RemoteWithHash 1093 err := ddb.VisitRefsOfType(ctx, remotesRefFilter, func(r ref.DoltRef, addr hash.Hash) error { 1094 refs = append(refs, RemoteWithHash{r, addr}) 1095 return nil 1096 }) 1097 return refs, err 1098 } 1099 1100 // GetHeadRefs returns a list of all refs that point to a Commit 1101 func (ddb *DoltDB) GetHeadRefs(ctx context.Context) ([]ref.DoltRef, error) { 1102 return ddb.GetRefsOfType(ctx, ref.HeadRefTypes) 1103 } 1104 1105 func (ddb *DoltDB) VisitRefsOfType(ctx context.Context, refTypeFilter map[ref.RefType]struct{}, visit func(r ref.DoltRef, addr hash.Hash) error) error { 1106 dss, err := ddb.db.Datasets(ctx) 1107 if err != nil { 1108 return err 1109 } 1110 1111 return visitDatasets(ctx, refTypeFilter, visit, dss) 1112 } 1113 1114 func (ddb *DoltDB) VisitRefsOfTypeByNomsRoot(ctx context.Context, refTypeFilter map[ref.RefType]struct{}, nomsRoot hash.Hash, visit func(r ref.DoltRef, addr hash.Hash) error) error { 1115 dss, err := ddb.db.DatasetsByRootHash(ctx, nomsRoot) 1116 if err != nil { 1117 return err 1118 } 1119 1120 return visitDatasets(ctx, refTypeFilter, visit, dss) 1121 } 1122 1123 func visitDatasets(ctx context.Context, refTypeFilter map[ref.RefType]struct{}, visit func(r ref.DoltRef, addr hash.Hash) error, dss datas.DatasetsMap) error { 1124 return dss.IterAll(ctx, func(key string, addr hash.Hash) error { 1125 keyStr := key 1126 1127 if ref.IsRef(keyStr) { 1128 dref, err := ref.Parse(keyStr) 1129 if err != nil { 1130 return err 1131 } 1132 1133 if _, ok := refTypeFilter[dref.GetType()]; ok { 1134 err = visit(dref, addr) 1135 if err != nil { 1136 return err 1137 } 1138 } 1139 } 1140 1141 return nil 1142 }) 1143 } 1144 1145 // GetRefByNameInsensitive searches this Dolt database's branch, tag, and head refs for a case-insensitive 1146 // match of the specified ref name. If a matching DoltRef is found, it is returned; otherwise an error is returned. 1147 func (ddb *DoltDB) GetRefByNameInsensitive(ctx context.Context, refName string) (ref.DoltRef, error) { 1148 branchRefs, err := ddb.GetBranches(ctx) 1149 if err != nil { 1150 return nil, err 1151 } 1152 for _, branchRef := range branchRefs { 1153 if strings.ToLower(branchRef.GetPath()) == strings.ToLower(refName) { 1154 return branchRef, nil 1155 } 1156 } 1157 1158 headRefs, err := ddb.GetHeadRefs(ctx) 1159 if err != nil { 1160 return nil, err 1161 } 1162 for _, headRef := range headRefs { 1163 if strings.ToLower(headRef.GetPath()) == strings.ToLower(refName) { 1164 return headRef, nil 1165 } 1166 } 1167 1168 tagRefs, err := ddb.GetTags(ctx) 1169 if err != nil { 1170 return nil, err 1171 } 1172 for _, tagRef := range tagRefs { 1173 if strings.ToLower(tagRef.GetPath()) == strings.ToLower(refName) { 1174 return tagRef, nil 1175 } 1176 } 1177 1178 return nil, ref.ErrInvalidRefSpec 1179 } 1180 1181 func (ddb *DoltDB) GetRefsOfType(ctx context.Context, refTypeFilter map[ref.RefType]struct{}) ([]ref.DoltRef, error) { 1182 var refs []ref.DoltRef 1183 err := ddb.VisitRefsOfType(ctx, refTypeFilter, func(r ref.DoltRef, _ hash.Hash) error { 1184 refs = append(refs, r) 1185 return nil 1186 }) 1187 return refs, err 1188 } 1189 1190 func (ddb *DoltDB) GetRefsOfTypeByNomsRoot(ctx context.Context, refTypeFilter map[ref.RefType]struct{}, nomsRoot hash.Hash) ([]ref.DoltRef, error) { 1191 var refs []ref.DoltRef 1192 err := ddb.VisitRefsOfTypeByNomsRoot(ctx, refTypeFilter, nomsRoot, func(r ref.DoltRef, _ hash.Hash) error { 1193 refs = append(refs, r) 1194 return nil 1195 }) 1196 return refs, err 1197 } 1198 1199 // NewBranchAtCommit creates a new branch with HEAD at the commit given. Branch names must pass IsValidUserBranchName. 1200 // Silently overwrites any existing branch with the same name given, if one exists. 1201 func (ddb *DoltDB) NewBranchAtCommit(ctx context.Context, branchRef ref.DoltRef, commit *Commit, replicationStatus *ReplicationStatusController) error { 1202 if !IsValidBranchRef(branchRef) { 1203 panic(fmt.Sprintf("invalid branch name %s, use IsValidUserBranchName check", branchRef.String())) 1204 } 1205 1206 ds, err := ddb.db.GetDataset(ctx, branchRef.String()) 1207 if err != nil { 1208 return err 1209 } 1210 1211 addr, err := commit.HashOf() 1212 if err != nil { 1213 return err 1214 } 1215 1216 _, err = ddb.db.SetHead(ctx, ds, addr, "") 1217 if err != nil { 1218 return err 1219 } 1220 1221 // Update the corresponding working set at the same time, either by updating it or creating a new one 1222 // TODO: find all the places HEAD can change, update working set too. This is only necessary when we don't already 1223 // update the working set when the head changes. 1224 commitRoot, err := commit.GetRootValue(ctx) 1225 if err != nil { 1226 return err 1227 } 1228 1229 wsRef, _ := ref.WorkingSetRefForHead(branchRef) 1230 1231 var ws *WorkingSet 1232 var currWsHash hash.Hash 1233 ws, err = ddb.ResolveWorkingSet(ctx, wsRef) 1234 if err == ErrWorkingSetNotFound { 1235 ws = EmptyWorkingSet(wsRef) 1236 } else if err != nil { 1237 return err 1238 } else { 1239 currWsHash, err = ws.HashOf() 1240 if err != nil { 1241 return err 1242 } 1243 } 1244 1245 ws = ws.WithWorkingRoot(commitRoot).WithStagedRoot(commitRoot) 1246 return ddb.UpdateWorkingSet(ctx, wsRef, ws, currWsHash, TodoWorkingSetMeta(), replicationStatus) 1247 } 1248 1249 // CopyWorkingSet copies a WorkingSetRef from one ref to another. If `force` is 1250 // true, will overwrite any existing value in the destination ref. Otherwise 1251 // will fail if the destination ref exists. 1252 // 1253 // If fromWSRef does not exist, this method does not return an error, but 1254 // returns `nil`. In that case, the destination ref is left alone. 1255 func (ddb *DoltDB) CopyWorkingSet(ctx context.Context, fromWSRef ref.WorkingSetRef, toWSRef ref.WorkingSetRef, force bool) error { 1256 ws, err := ddb.ResolveWorkingSet(ctx, fromWSRef) 1257 if err == ErrWorkingSetNotFound { 1258 return nil 1259 } else if err != nil { 1260 return err 1261 } 1262 1263 var currWsHash hash.Hash 1264 toWS, err := ddb.ResolveWorkingSet(ctx, toWSRef) 1265 if err != nil && err != ErrWorkingSetNotFound { 1266 return err 1267 } 1268 if !force && err != ErrWorkingSetNotFound { 1269 return errors.New("cannot overwrite existing working set " + toWSRef.String() + " without force.") 1270 } else if err == nil { 1271 currWsHash, err = toWS.HashOf() 1272 if err != nil { 1273 return err 1274 } 1275 } 1276 1277 return ddb.UpdateWorkingSet(ctx, toWSRef, ws, currWsHash, TodoWorkingSetMeta(), nil) 1278 } 1279 1280 func (ddb *DoltDB) DeleteBranchWithWorkspaceCheck(ctx context.Context, branch ref.DoltRef, replicationStatus *ReplicationStatusController, wsPath string) error { 1281 return ddb.deleteRef(ctx, branch, replicationStatus, wsPath) 1282 } 1283 1284 // DeleteBranch deletes the branch given, returning an error if it doesn't exist. 1285 func (ddb *DoltDB) DeleteBranch(ctx context.Context, branch ref.DoltRef, replicationStatus *ReplicationStatusController) error { 1286 return ddb.deleteRef(ctx, branch, replicationStatus, "") 1287 } 1288 1289 func (ddb *DoltDB) deleteRef(ctx context.Context, dref ref.DoltRef, replicationStatus *ReplicationStatusController, wsPath string) error { 1290 ds, err := ddb.db.GetDataset(ctx, dref.String()) 1291 1292 if err != nil { 1293 return err 1294 } 1295 1296 if !ds.HasHead() { 1297 return ErrBranchNotFound 1298 } 1299 1300 if dref.GetType() == ref.BranchRefType { 1301 branches, err := ddb.GetBranches(ctx) 1302 if err != nil { 1303 return err 1304 } 1305 if len(branches) == 1 { 1306 return ErrCannotDeleteLastBranch 1307 } 1308 } 1309 1310 _, err = ddb.db.withReplicationStatusController(replicationStatus).Delete(ctx, ds, wsPath) 1311 return err 1312 } 1313 1314 // DeleteAllRefs Very destructive, use with caution. Not only does this drop all data, Dolt assume there is always 1315 // a reference in the DB, so do not call this and walk away. The only use case for this method is the 1316 // `dolt clone` command which strip everything from the remote's root object - dolt_clone stored procedure doesn't currently 1317 // use this code path (TODO). 1318 func (ddb *DoltDB) DeleteAllRefs(ctx context.Context) error { 1319 dss, err := ddb.db.Datasets(ctx) 1320 if err != nil { 1321 return err 1322 } 1323 err = dss.IterAll(ctx, func(key string, addr hash.Hash) error { 1324 ds, e := ddb.db.GetDataset(ctx, key) 1325 if e != nil { 1326 return e 1327 } 1328 1329 _, e = ddb.db.Delete(ctx, ds, "") 1330 return e 1331 }) 1332 return err 1333 } 1334 1335 // NewTagAtCommit create a new tag at the commit given. 1336 func (ddb *DoltDB) NewTagAtCommit(ctx context.Context, tagRef ref.DoltRef, c *Commit, meta *datas.TagMeta) error { 1337 if !IsValidTagRef(tagRef) { 1338 panic(fmt.Sprintf("invalid tag name %s, use IsValidUserTagName check", tagRef.String())) 1339 } 1340 1341 ds, err := ddb.db.GetDataset(ctx, tagRef.String()) 1342 1343 if err != nil { 1344 return err 1345 } 1346 1347 if ds.HasHead() { 1348 return fmt.Errorf("dataset already exists for tag %s", tagRef.String()) 1349 } 1350 1351 commitAddr, err := c.HashOf() 1352 if err != nil { 1353 return err 1354 } 1355 1356 tag := datas.TagOptions{Meta: meta} 1357 1358 ds, err = ddb.db.Tag(ctx, ds, commitAddr, tag) 1359 1360 return err 1361 } 1362 1363 // This should be used as the cancel cause for the context passed to a 1364 // ReplicationStatusController Wait function when the wait has been canceled 1365 // because it timed out. Seeing this error from a passed in context may be used 1366 // by some agents to open circuit breakers or tune timeouts. 1367 var ErrReplicationWaitFailed = errors.New("replication wait failed") 1368 1369 type ReplicationStatusController struct { 1370 // A slice of funcs which can be called to wait for the replication 1371 // associated with a commithook to complete. Must return if the 1372 // associated Context is canceled. 1373 Wait []func(ctx context.Context) error 1374 1375 // There is an entry here for each function in Wait. If a Wait fails, 1376 // you can notify the corresponding function in this slice. This might 1377 // control resiliency behaviors like adaptive retry and timeouts, 1378 // circuit breakers, etc. and might feed into exposed replication 1379 // metrics. 1380 NotifyWaitFailed []func() 1381 } 1382 1383 // UpdateWorkingSet updates the working set with the ref given to the root value given 1384 // |prevHash| is the hash of the expected WorkingSet struct stored in the ref, not the hash of the RootValue there. 1385 func (ddb *DoltDB) UpdateWorkingSet( 1386 ctx context.Context, 1387 workingSetRef ref.WorkingSetRef, 1388 workingSet *WorkingSet, 1389 prevHash hash.Hash, 1390 meta *datas.WorkingSetMeta, 1391 replicationStatus *ReplicationStatusController, 1392 ) error { 1393 ds, err := ddb.db.GetDataset(ctx, workingSetRef.String()) 1394 if err != nil { 1395 return err 1396 } 1397 1398 wsSpec, err := workingSet.writeValues(ctx, ddb, meta) 1399 if err != nil { 1400 return err 1401 } 1402 1403 _, err = ddb.db.withReplicationStatusController(replicationStatus).UpdateWorkingSet(ctx, ds, *wsSpec, prevHash) 1404 return err 1405 } 1406 1407 // CommitWithWorkingSet combines the functionality of CommitWithParents with UpdateWorking set, and takes a combination 1408 // of their parameters. It's a way to update the working set and current HEAD in the same atomic transaction. It commits 1409 // to disk a pending commit value previously created with NewPendingCommit, asserting that the working set hash given 1410 // is still current for that HEAD. 1411 func (ddb *DoltDB) CommitWithWorkingSet( 1412 ctx context.Context, 1413 headRef ref.DoltRef, workingSetRef ref.WorkingSetRef, 1414 commit *PendingCommit, workingSet *WorkingSet, 1415 prevHash hash.Hash, 1416 meta *datas.WorkingSetMeta, 1417 replicationStatus *ReplicationStatusController, 1418 ) (*Commit, error) { 1419 wsDs, err := ddb.db.GetDataset(ctx, workingSetRef.String()) 1420 if err != nil { 1421 return nil, err 1422 } 1423 1424 headDs, err := ddb.db.GetDataset(ctx, headRef.String()) 1425 if err != nil { 1426 return nil, err 1427 } 1428 1429 wsSpec, err := workingSet.writeValues(ctx, ddb, meta) 1430 if err != nil { 1431 return nil, err 1432 } 1433 1434 commitDataset, _, err := ddb.db.withReplicationStatusController(replicationStatus). 1435 CommitWithWorkingSet(ctx, headDs, wsDs, commit.Roots.Staged.NomsValue(), *wsSpec, prevHash, commit.CommitOptions) 1436 if err != nil { 1437 return nil, err 1438 } 1439 1440 commitRef, ok, err := commitDataset.MaybeHeadRef() 1441 if err != nil { 1442 return nil, err 1443 } 1444 if !ok { 1445 return nil, errors.New("Commit has no head but commit succeeded. This is a bug.") 1446 } 1447 1448 dc, err := datas.LoadCommitRef(ctx, ddb.vrw, commitRef) 1449 if err != nil { 1450 return nil, err 1451 } 1452 1453 if dc.IsGhost() { 1454 return nil, ErrGhostCommitEncountered 1455 } 1456 1457 return NewCommit(ctx, ddb.vrw, ddb.ns, dc) 1458 } 1459 1460 // DeleteWorkingSet deletes the working set given 1461 func (ddb *DoltDB) DeleteWorkingSet(ctx context.Context, workingSetRef ref.WorkingSetRef) error { 1462 ds, err := ddb.db.GetDataset(ctx, workingSetRef.String()) 1463 if err != nil { 1464 return err 1465 } 1466 1467 _, err = ddb.db.Delete(ctx, ds, "") 1468 return err 1469 } 1470 1471 func (ddb *DoltDB) DeleteTag(ctx context.Context, tag ref.DoltRef) error { 1472 err := ddb.deleteRef(ctx, tag, nil, "") 1473 1474 if err == ErrBranchNotFound { 1475 return ErrTagNotFound 1476 } 1477 1478 return err 1479 } 1480 1481 // NewWorkspaceAtCommit create a new workspace at the commit given. 1482 func (ddb *DoltDB) NewWorkspaceAtCommit(ctx context.Context, workRef ref.DoltRef, c *Commit) error { 1483 ds, err := ddb.db.GetDataset(ctx, workRef.String()) 1484 if err != nil { 1485 return err 1486 } 1487 1488 addr, err := c.HashOf() 1489 if err != nil { 1490 return err 1491 } 1492 1493 ds, err = ddb.db.SetHead(ctx, ds, addr, "") 1494 1495 return err 1496 } 1497 1498 func (ddb *DoltDB) DeleteWorkspace(ctx context.Context, workRef ref.DoltRef) error { 1499 err := ddb.deleteRef(ctx, workRef, nil, "") 1500 1501 if err == ErrBranchNotFound { 1502 return ErrWorkspaceNotFound 1503 } 1504 1505 return err 1506 } 1507 1508 // Rebase rebases the underlying db from disk, re-loading the manifest. Useful when another process might have made 1509 // changes to the database we need to read. 1510 func (ddb *DoltDB) Rebase(ctx context.Context) error { 1511 return datas.ChunkStoreFromDatabase(ddb.db).Rebase(ctx) 1512 } 1513 1514 // GC performs garbage collection on this ddb. 1515 // 1516 // If |safepointF| is non-nil, it will be called at some point after the GC begins 1517 // and before the GC ends. It will be called without 1518 // Database/ValueStore/NomsBlockStore locks held. If should establish 1519 // safepoints in every application-level in-progress read and write workflow 1520 // against this DoltDB. Examples of doing this include, for example, blocking 1521 // until no possibly-stale ChunkStore state is retained in memory, or failing 1522 // certain in-progress operations which cannot be finalized in a timely manner, 1523 // etc. 1524 func (ddb *DoltDB) GC(ctx context.Context, safepointF func() error) error { 1525 collector, ok := ddb.db.Database.(datas.GarbageCollector) 1526 if !ok { 1527 return fmt.Errorf("this database does not support garbage collection") 1528 } 1529 1530 err := ddb.pruneUnreferencedDatasets(ctx) 1531 if err != nil { 1532 return err 1533 } 1534 1535 datasets, err := ddb.db.Datasets(ctx) 1536 if err != nil { 1537 return err 1538 } 1539 1540 newGen := make(hash.HashSet) 1541 oldGen := make(hash.HashSet) 1542 err = datasets.IterAll(ctx, func(keyStr string, h hash.Hash) error { 1543 var isOldGen bool 1544 switch { 1545 case ref.IsRef(keyStr): 1546 parsed, err := ref.Parse(keyStr) 1547 if err != nil && !errors.Is(err, ref.ErrUnknownRefType) { 1548 return err 1549 } 1550 1551 refType := parsed.GetType() 1552 isOldGen = refType == ref.BranchRefType || refType == ref.RemoteRefType || refType == ref.InternalRefType 1553 } 1554 1555 if isOldGen { 1556 oldGen.Insert(h) 1557 } else { 1558 newGen.Insert(h) 1559 } 1560 1561 return nil 1562 }) 1563 1564 if err != nil { 1565 return err 1566 } 1567 1568 return collector.GC(ctx, oldGen, newGen, safepointF) 1569 } 1570 1571 func (ddb *DoltDB) ShallowGC(ctx context.Context) error { 1572 return datas.PruneTableFiles(ctx, ddb.db) 1573 } 1574 1575 func (ddb *DoltDB) pruneUnreferencedDatasets(ctx context.Context) error { 1576 dd, err := ddb.db.Datasets(ctx) 1577 if err != nil { 1578 return err 1579 } 1580 1581 var deletes []string 1582 _ = dd.IterAll(ctx, func(dsID string, _ hash.Hash) (err error) { 1583 if !ref.IsRef(dsID) && !ref.IsWorkingSet(dsID) { 1584 deletes = append(deletes, dsID) 1585 } 1586 return nil 1587 }) 1588 1589 // e.g. flushes 1590 for _, dsID := range deletes { 1591 ds, err := ddb.db.GetDataset(ctx, dsID) 1592 if err != nil { 1593 return err 1594 } 1595 1596 ds, err = ddb.db.Delete(ctx, ds, "") 1597 if err != nil { 1598 return err 1599 } 1600 1601 if ds.HasHead() { 1602 return fmt.Errorf("unsuccessful delete for dataset %s", ds.ID()) 1603 } 1604 } 1605 1606 return nil 1607 } 1608 1609 // PullChunks initiates a pull into this database from the source database 1610 // given, pulling all chunks reachable from the given targetHash. Pull progress 1611 // is communicated over the provided channel. 1612 func (ddb *DoltDB) PullChunks( 1613 ctx context.Context, 1614 tempDir string, 1615 srcDB *DoltDB, 1616 targetHashes []hash.Hash, 1617 statsCh chan pull.Stats, 1618 skipHashes hash.HashSet, 1619 ) error { 1620 return pullHash(ctx, ddb.db, srcDB.db, targetHashes, tempDir, statsCh, skipHashes) 1621 } 1622 1623 func pullHash( 1624 ctx context.Context, 1625 destDB, srcDB datas.Database, 1626 targetHashes []hash.Hash, 1627 tempDir string, 1628 statsCh chan pull.Stats, 1629 skipHashes hash.HashSet, 1630 ) error { 1631 srcCS := datas.ChunkStoreFromDatabase(srcDB) 1632 destCS := datas.ChunkStoreFromDatabase(destDB) 1633 waf := types.WalkAddrsForNBF(srcDB.Format(), skipHashes) 1634 1635 if datas.CanUsePuller(srcDB) && datas.CanUsePuller(destDB) { 1636 puller, err := pull.NewPuller(ctx, tempDir, defaultChunksPerTF, srcCS, destCS, waf, targetHashes, statsCh) 1637 if err == pull.ErrDBUpToDate { 1638 return nil 1639 } else if err != nil { 1640 return err 1641 } 1642 1643 return puller.Pull(ctx) 1644 } else { 1645 return errors.New("Puller not supported") 1646 } 1647 } 1648 1649 func (ddb *DoltDB) Clone(ctx context.Context, destDB *DoltDB, eventCh chan<- pull.TableFileEvent) error { 1650 return pull.Clone(ctx, datas.ChunkStoreFromDatabase(ddb.db), datas.ChunkStoreFromDatabase(destDB.db), eventCh) 1651 } 1652 1653 // Returns |true| if the underlying ChunkStore for this DoltDB implements |chunks.TableFileStore|. 1654 func (ddb *DoltDB) IsTableFileStore() bool { 1655 _, ok := datas.ChunkStoreFromDatabase(ddb.db).(chunks.TableFileStore) 1656 return ok 1657 } 1658 1659 // ChunkJournal returns the ChunkJournal for this DoltDB, if one is in use. 1660 func (ddb *DoltDB) ChunkJournal() *nbs.ChunkJournal { 1661 tableFileStore, ok := datas.ChunkStoreFromDatabase(ddb.db).(chunks.TableFileStore) 1662 if !ok { 1663 return nil 1664 } 1665 1666 generationalNbs, ok := tableFileStore.(*nbs.GenerationalNBS) 1667 if !ok { 1668 return nil 1669 } 1670 1671 newGen := generationalNbs.NewGen() 1672 nbs, ok := newGen.(*nbs.NomsBlockStore) 1673 if !ok { 1674 return nil 1675 } 1676 1677 return nbs.ChunkJournal() 1678 } 1679 1680 func (ddb *DoltDB) TableFileStoreHasJournal(ctx context.Context) (bool, error) { 1681 tableFileStore, ok := datas.ChunkStoreFromDatabase(ddb.db).(chunks.TableFileStore) 1682 if !ok { 1683 return false, errors.New("unsupported operation, DoltDB.TableFileStoreHasManifest on non-TableFileStore") 1684 } 1685 _, tableFiles, _, err := tableFileStore.Sources(ctx) 1686 if err != nil { 1687 return false, err 1688 } 1689 for _, tableFile := range tableFiles { 1690 if tableFile.FileID() == chunks.JournalFileID { 1691 return true, nil 1692 } 1693 } 1694 return false, nil 1695 } 1696 1697 // DatasetsByRootHash returns the DatasetsMap for the specified root |hashof|. 1698 func (ddb *DoltDB) DatasetsByRootHash(ctx context.Context, hashof hash.Hash) (datas.DatasetsMap, error) { 1699 return ddb.db.DatasetsByRootHash(ctx, hashof) 1700 } 1701 1702 func (ddb *DoltDB) SetCommitHooks(ctx context.Context, postHooks []CommitHook) *DoltDB { 1703 ddb.db = ddb.db.SetCommitHooks(ctx, postHooks) 1704 return ddb 1705 } 1706 1707 func (ddb *DoltDB) PrependCommitHook(ctx context.Context, hook CommitHook) *DoltDB { 1708 ddb.db = ddb.db.SetCommitHooks(ctx, append([]CommitHook{hook}, ddb.db.PostCommitHooks()...)) 1709 return ddb 1710 } 1711 1712 func (ddb *DoltDB) SetCommitHookLogger(ctx context.Context, wr io.Writer) *DoltDB { 1713 if ddb.db.Database != nil { 1714 ddb.db = ddb.db.SetCommitHookLogger(ctx, wr) 1715 } 1716 return ddb 1717 } 1718 1719 func (ddb *DoltDB) ExecuteCommitHooks(ctx context.Context, datasetId string) error { 1720 ds, err := ddb.db.GetDataset(ctx, datasetId) 1721 if err != nil { 1722 return err 1723 } 1724 ddb.db.ExecuteCommitHooks(ctx, ds, false) 1725 return nil 1726 } 1727 1728 func (ddb *DoltDB) GetBranchesByRootHash(ctx context.Context, rootHash hash.Hash) ([]RefWithHash, error) { 1729 dss, err := ddb.db.DatasetsByRootHash(ctx, rootHash) 1730 if err != nil { 1731 return nil, err 1732 } 1733 1734 var refs []RefWithHash 1735 1736 err = dss.IterAll(ctx, func(key string, addr hash.Hash) error { 1737 keyStr := key 1738 1739 var dref ref.DoltRef 1740 if ref.IsRef(keyStr) { 1741 dref, err = ref.Parse(keyStr) 1742 if err != nil { 1743 return err 1744 } 1745 1746 if _, ok := branchRefFilter[dref.GetType()]; ok { 1747 refs = append(refs, RefWithHash{dref, addr}) 1748 } 1749 } 1750 1751 return nil 1752 }) 1753 if err != nil { 1754 return nil, err 1755 } 1756 1757 return refs, nil 1758 } 1759 1760 // AddStash takes current branch head commit, stash root value and stash metadata to create a new stash. 1761 // It stores the new stash object in stash list Dataset, which can be created if it does not exist. 1762 // Otherwise, it updates the stash list Dataset as there can only be one stashes Dataset. 1763 func (ddb *DoltDB) AddStash(ctx context.Context, head *Commit, stash RootValue, meta *datas.StashMeta) error { 1764 stashesDS, err := ddb.db.GetDataset(ctx, ref.NewStashRef().String()) 1765 if err != nil { 1766 return err 1767 } 1768 1769 headCommitAddr, err := head.HashOf() 1770 if err != nil { 1771 return err 1772 } 1773 1774 _, stashVal, err := ddb.writeRootValue(ctx, stash) 1775 if err != nil { 1776 return err 1777 } 1778 1779 nbf := ddb.Format() 1780 vrw := ddb.ValueReadWriter() 1781 stashAddr, _, err := datas.NewStash(ctx, nbf, vrw, stashVal, headCommitAddr, meta) 1782 if err != nil { 1783 return err 1784 } 1785 1786 // this either creates new stash list dataset or loads current stash list dataset if exists. 1787 stashList, err := datas.LoadStashList(ctx, nbf, ddb.NodeStore(), vrw, stashesDS) 1788 if err != nil { 1789 return err 1790 } 1791 1792 stashListAddr, err := stashList.AddStash(ctx, vrw, stashAddr) 1793 if err != nil { 1794 return err 1795 } 1796 1797 stashesDS, err = ddb.db.UpdateStashList(ctx, stashesDS, stashListAddr) 1798 return err 1799 } 1800 1801 func (ddb *DoltDB) SetStatisics(ctx context.Context, branch string, addr hash.Hash) error { 1802 statsDs, err := ddb.db.GetDataset(ctx, ref.NewStatsRef(branch).String()) 1803 if err != nil { 1804 return err 1805 } 1806 _, err = ddb.db.SetStatsRef(ctx, statsDs, addr) 1807 return err 1808 } 1809 1810 func (ddb *DoltDB) DropStatisics(ctx context.Context, branch string) error { 1811 statsDs, err := ddb.db.GetDataset(ctx, ref.NewStatsRef(branch).String()) 1812 1813 _, err = ddb.db.Delete(ctx, statsDs, "") 1814 if err != nil { 1815 return err 1816 } 1817 return nil 1818 } 1819 1820 var ErrNoStatistics = errors.New("no statistics found") 1821 1822 // GetStatistics returns the value of the singleton ref.StatsRef for this database 1823 func (ddb *DoltDB) GetStatistics(ctx context.Context, branch string) (prolly.Map, error) { 1824 ds, err := ddb.db.GetDataset(ctx, ref.NewStatsRef(branch).String()) 1825 if err != nil { 1826 return prolly.Map{}, err 1827 } 1828 1829 if !ds.HasHead() { 1830 return prolly.Map{}, ErrNoStatistics 1831 } 1832 1833 stats, err := datas.LoadStatistics(ctx, ddb.Format(), ddb.NodeStore(), ddb.ValueReadWriter(), ds) 1834 if err != nil { 1835 return prolly.Map{}, err 1836 } 1837 1838 return stats.Map(), nil 1839 1840 } 1841 1842 // RemoveStashAtIdx takes and index of a stash to remove from the stash list map. 1843 // It removes a Stash message from stash list Dataset, which cannot be performed 1844 // by database Delete function. This function removes a single stash only and stash 1845 // list dataset does not get removed if there are no entries left. 1846 func (ddb *DoltDB) RemoveStashAtIdx(ctx context.Context, idx int) error { 1847 stashesDS, err := ddb.db.GetDataset(ctx, ref.NewStashRef().String()) 1848 if err != nil { 1849 return err 1850 } 1851 1852 if !stashesDS.HasHead() { 1853 return errors.New("No stash entries found.") 1854 } 1855 1856 vrw := ddb.ValueReadWriter() 1857 stashList, err := datas.LoadStashList(ctx, ddb.Format(), ddb.NodeStore(), vrw, stashesDS) 1858 if err != nil { 1859 return err 1860 } 1861 1862 stashListAddr, err := stashList.RemoveStashAtIdx(ctx, vrw, idx) 1863 if err != nil { 1864 return err 1865 } 1866 1867 stashListCount, err := stashList.Count() 1868 if err != nil { 1869 return err 1870 } 1871 // if the stash list is empty, remove the stash list Dataset from the database 1872 if stashListCount == 0 { 1873 return ddb.RemoveAllStashes(ctx) 1874 } 1875 1876 stashesDS, err = ddb.db.UpdateStashList(ctx, stashesDS, stashListAddr) 1877 return err 1878 } 1879 1880 // RemoveAllStashes removes the stash list Dataset from the database, 1881 // which equivalent to removing Stash entries from the stash list. 1882 func (ddb *DoltDB) RemoveAllStashes(ctx context.Context) error { 1883 err := ddb.deleteRef(ctx, ref.NewStashRef(), nil, "") 1884 if err == ErrBranchNotFound { 1885 return nil 1886 } 1887 return err 1888 } 1889 1890 // GetStashes returns array of Stash objects containing all stash entries in the stash list Dataset. 1891 func (ddb *DoltDB) GetStashes(ctx context.Context) ([]*Stash, error) { 1892 stashesDS, err := ddb.db.GetDataset(ctx, ref.NewStashRef().String()) 1893 if err != nil { 1894 return nil, err 1895 } 1896 1897 if !stashesDS.HasHead() { 1898 return []*Stash{}, nil 1899 } 1900 1901 return getStashList(ctx, stashesDS, ddb.vrw, ddb.NodeStore()) 1902 } 1903 1904 // GetStashHashAtIdx returns hash address only of the stash at given index. 1905 func (ddb *DoltDB) GetStashHashAtIdx(ctx context.Context, idx int) (hash.Hash, error) { 1906 ds, err := ddb.db.GetDataset(ctx, ref.NewStashRef().String()) 1907 if err != nil { 1908 return hash.Hash{}, err 1909 } 1910 1911 if !ds.HasHead() { 1912 return hash.Hash{}, errors.New("No stash entries found.") 1913 } 1914 1915 return getStashHashAtIdx(ctx, ds, ddb.NodeStore(), idx) 1916 } 1917 1918 // GetStashRootAndHeadCommitAtIdx returns root value of stash working set and head commit of the branch that the stash was made on 1919 // of the stash at given index. 1920 func (ddb *DoltDB) GetStashRootAndHeadCommitAtIdx(ctx context.Context, idx int) (RootValue, *Commit, *datas.StashMeta, error) { 1921 ds, err := ddb.db.GetDataset(ctx, ref.NewStashRef().String()) 1922 if err != nil { 1923 return nil, nil, nil, err 1924 } 1925 1926 if !ds.HasHead() { 1927 return nil, nil, nil, errors.New("No stash entries found.") 1928 } 1929 1930 return getStashAtIdx(ctx, ds, ddb.vrw, ddb.NodeStore(), idx) 1931 } 1932 1933 // PersistGhostCommits persists the set of ghost commits to the database. This is how the application layer passes 1934 // information about ghost commits to the storage layer. This can be called multiple times over the course of performing 1935 // a shallow clone, but should not be called after the clone is complete. 1936 func (ddb *DoltDB) PersistGhostCommits(ctx context.Context, ghostCommits hash.HashSet) error { 1937 return ddb.db.Database.PersistGhostCommitIDs(ctx, ghostCommits) 1938 }